summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/net/ebpf_jit.c1950
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c18
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/net/tcp.h10
-rw-r--r--net/batman-adv/translation-table.c60
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/rds/ib_recv.c5
-rw-r--r--net/sched/act_ipt.c20
-rw-r--r--tools/build/feature/test-bpf.c2
-rw-r--r--tools/lib/bpf/bpf.c2
-rw-r--r--tools/testing/selftests/bpf/test_pkt_md_access.c11
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
55 files changed, 2490 insertions, 186 deletions
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000000..3f87b96da5c4
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1950 @@
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_AT 1
+#define MIPS_R_V0 2 /* BPF_R0 */
+#define MIPS_R_V1 3
+#define MIPS_R_A0 4 /* BPF_R1 */
+#define MIPS_R_A1 5 /* BPF_R2 */
+#define MIPS_R_A2 6 /* BPF_R3 */
+#define MIPS_R_A3 7 /* BPF_R4 */
+#define MIPS_R_A4 8 /* BPF_R5 */
+#define MIPS_R_T4 12 /* BPF_AX */
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16 /* BPF_R6 */
+#define MIPS_R_S1 17 /* BPF_R7 */
+#define MIPS_R_S2 18 /* BPF_R8 */
+#define MIPS_R_S3 19 /* BPF_R9 */
+#define MIPS_R_S4 20 /* BPF_TCC */
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_T8 24
+#define MIPS_R_T9 25
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0 BIT(0)
+#define EBPF_SAVE_S1 BIT(1)
+#define EBPF_SAVE_S2 BIT(2)
+#define EBPF_SAVE_S3 BIT(3)
+#define EBPF_SAVE_S4 BIT(4)
+#define EBPF_SAVE_RA BIT(5)
+#define EBPF_SEEN_FP BIT(6)
+#define EBPF_SEEN_TC BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register. The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+ /* uninitialized */
+ REG_UNKNOWN,
+ /* not known to be 32-bit compatible. */
+ REG_64BIT,
+ /* 32-bit compatible, no truncation needed for 64-bit ops. */
+ REG_64BIT_32BIT,
+ /* 32-bit compatible, need truncation for 64-bit ops. */
+ REG_32BIT,
+ /* 32-bit zero extended. */
+ REG_32BIT_ZERO_EX,
+ /* 32-bit no sign/zero extension needed. */
+ REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @stack_size: eBPF stack size
+ * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ * @reg_val_types Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ int stack_size;
+ int tmp_offset;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+ u64 *reg_val_types;
+ unsigned int long_b_conversion:1;
+ unsigned int gen_b_offsets:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+ *rvt &= ~(7ull << (reg * 3));
+ *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+ int index, int reg)
+{
+ return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ uasm_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+ unsigned long target_va, base_va;
+ unsigned int r;
+
+ if (!ctx->target)
+ return 0;
+
+ base_va = (unsigned long)ctx->target;
+ target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+ if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+ return (unsigned int)-1;
+ r = target_va & 0x0ffffffful;
+ return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (!ctx->gen_b_offsets)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+ (ctx->idx * 4) - 4;
+}
+
+int bpf_jit_enable __read_mostly;
+
+enum which_ebpf_reg {
+ src_reg,
+ src_reg_no_fp,
+ dst_reg,
+ dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
+{
+ int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+ insn->src_reg : insn->dst_reg;
+
+ switch (ebpf_reg) {
+ case BPF_REG_0:
+ return MIPS_R_V0;
+ case BPF_REG_1:
+ return MIPS_R_A0;
+ case BPF_REG_2:
+ return MIPS_R_A1;
+ case BPF_REG_3:
+ return MIPS_R_A2;
+ case BPF_REG_4:
+ return MIPS_R_A3;
+ case BPF_REG_5:
+ return MIPS_R_A4;
+ case BPF_REG_6:
+ ctx->flags |= EBPF_SAVE_S0;
+ return MIPS_R_S0;
+ case BPF_REG_7:
+ ctx->flags |= EBPF_SAVE_S1;
+ return MIPS_R_S1;
+ case BPF_REG_8:
+ ctx->flags |= EBPF_SAVE_S2;
+ return MIPS_R_S2;
+ case BPF_REG_9:
+ ctx->flags |= EBPF_SAVE_S3;
+ return MIPS_R_S3;
+ case BPF_REG_10:
+ if (w == dst_reg || w == src_reg_no_fp)
+ goto bad_reg;
+ ctx->flags |= EBPF_SEEN_FP;
+ /*
+ * Needs special handling, return something that
+ * cannot be clobbered just in case.
+ */
+ return MIPS_R_ZERO;
+ case BPF_REG_AX:
+ return MIPS_R_T4;
+ default:
+bad_reg:
+ WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+ return -EINVAL;
+ }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ * Entry $sp ------> +--------------------------------+
+ * | $ra (optional) |
+ * +--------------------------------+
+ * | $s0 (optional) |
+ * +--------------------------------+
+ * | $s1 (optional) |
+ * +--------------------------------+
+ * | $s2 (optional) |
+ * +--------------------------------+
+ * | $s3 (optional) |
+ * +--------------------------------+
+ * | $s4 (optional) |
+ * +--------------------------------+
+ * | tmp-storage (if $ra saved) |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ * | BPF_REG_10 relative storage |
+ * | MAX_BPF_STACK (optional) |
+ * | . |
+ * | . |
+ * | . |
+ * $sp --------> +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+ int stack_adjust = 0;
+ int store_offset;
+ int locals_size;
+
+ if (ctx->flags & EBPF_SAVE_RA)
+ /*
+ * If RA we are doing a function call and may need
+ * extra 8-byte tmp area.
+ */
+ stack_adjust += 16;
+ if (ctx->flags & EBPF_SAVE_S0)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S1)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S2)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S3)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S4)
+ stack_adjust += 8;
+
+ BUILD_BUG_ON(MAX_BPF_STACK & 7);
+ locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+ stack_adjust += locals_size;
+ ctx->tmp_offset = locals_size;
+
+ ctx->stack_size = stack_adjust;
+
+ /*
+ * First instruction initializes the tail call count (TCC).
+ * On tail call we skip this instruction, and the TCC is
+ * passed in $v1 from the caller.
+ */
+ emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+ else
+ return 0;
+
+ store_offset = stack_adjust - 8;
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+
+ if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+ emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+ return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ int stack_adjust = ctx->stack_size;
+ int store_offset = stack_adjust - 8;
+ int r0 = MIPS_R_V0;
+
+ if (dest_reg == MIPS_R_RA &&
+ get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+ /* Don't let zero extended value escape. */
+ emit_instr(ctx, sll, r0, r0, 0);
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ emit_instr(ctx, jr, dest_reg);
+
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+ else
+ emit_instr(ctx, nop);
+
+ return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+ struct jit_ctx *ctx)
+{
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+ } else {
+ int lower = (s16)(insn->imm & 0xffff);
+ int upper = insn->imm - lower;
+
+ emit_instr(ctx, lui, reg, upper >> 16);
+ emit_instr(ctx, addiu, reg, reg, lower);
+ }
+
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int idx)
+{
+ int upper_bound, lower_bound;
+ int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+ if (dst < 0)
+ return dst;
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ case BPF_ADD:
+ upper_bound = S16_MAX;
+ lower_bound = S16_MIN;
+ break;
+ case BPF_SUB:
+ upper_bound = -(int)S16_MIN;
+ lower_bound = -(int)S16_MAX;
+ break;
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ upper_bound = 0xffff;
+ lower_bound = 0;
+ break;
+ case BPF_RSH:
+ case BPF_LSH:
+ case BPF_ARSH:
+ /* Shift amounts are truncated, no need for bounds */
+ upper_bound = S32_MAX;
+ lower_bound = S32_MIN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Immediate move clobbers the register, so no sign/zero
+ * extension needed.
+ */
+ if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+ if (BPF_CLASS(insn->code) == BPF_ALU &&
+ BPF_OP(insn->code) != BPF_LSH &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+ emit_instr(ctx, sll, dst, dst, 0);
+
+ if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+ /* single insn immediate case */
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_MOV:
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, andi, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, ori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+ break;
+ case BPF_ALU64 | BPF_RSH:
+ emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_RSH:
+ emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_LSH:
+ emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_LSH:
+ emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_ARSH:
+ emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_ARSH:
+ emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU | BPF_MOV:
+ emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, addiu, dst, dst, -insn->imm);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* multi insn immediate case */
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ gen_imm_to_reg(insn, dst, ctx);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void * __must_check
+ool_skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
+static int size_to_len(const struct bpf_insn *insn)
+{
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ return 1;
+ case BPF_H:
+ return 2;
+ case BPF_W:
+ return 4;
+ case BPF_DW:
+ return 8;
+ }
+ return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+ if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+ } else if (value >= 0xffffffff80000000ull ||
+ (value < 0x80000000 && value > 0xffff)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+ emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+ } else {
+ int i;
+ bool seen_part = false;
+ int needed_shift = 0;
+
+ for (i = 0; i < 4; i++) {
+ u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+ if (seen_part && needed_shift > 0 && (part || i == 3)) {
+ emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+ needed_shift = 0;
+ }
+ if (part) {
+ if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)part);
+ needed_shift = -16;
+ } else {
+ emit_instr(ctx, ori, dst,
+ seen_part ? dst : MIPS_R_ZERO,
+ (unsigned int)part);
+ }
+ seen_part = true;
+ }
+ if (seen_part)
+ needed_shift += 16;
+ }
+ }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+ int off, b_off;
+
+ ctx->flags |= EBPF_SEEN_TC;
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+ emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /*
+ * if (--TCC < 0)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, daddiu, MIPS_R_T5,
+ (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+ /*
+ * prog = array->ptrs[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+ off = offsetof(struct bpf_array, ptrs);
+ emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /* Delay slot */
+ emit_instr(ctx, nop);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+ /* All systems are go... propagate TCC */
+ emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+ /* Skip first instruction (TCC initialization) */
+ emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+ return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool use_bbit_insns(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int this_idx, int exit_idx)
+{
+ int src, dst, r, td, ts, mem_off, b_off;
+ bool need_swap, did_move, cmp_eq;
+ unsigned int target;
+ u64 t64;
+ s64 t64s;
+
+ switch (insn->code) {
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ r = gen_imm_insn(insn, ctx, this_idx);
+ if (r < 0)
+ return r;
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, multu, dst, MIPS_R_AT);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) { /* Div by zero */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+ }
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (BPF_OP(insn->code) == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, divu, dst, MIPS_R_AT);
+ if (BPF_OP(insn->code) == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) { /* Div by zero */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+ }
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (BPF_OP(insn->code) == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+ if (BPF_OP(insn->code) == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ did_move = false;
+ if (insn->src_reg == BPF_REG_10) {
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+ did_move = true;
+ } else {
+ emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+ src = MIPS_R_AT;
+ }
+ } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, dmultu, dst, src);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+ emit_instr(ctx, ddivu, dst, src);
+ if (BPF_OP(insn->code) == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, dsllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, dsrlv, dst, dst, src);
+ break;
+ case BPF_ARSH:
+ emit_instr(ctx, dsrav, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU64_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ did_move = false;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ /* sign extend */
+ emit_instr(ctx, sll, tmp_reg, src, 0);
+ src = MIPS_R_AT;
+ }
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, mul, dst, dst, src);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+ emit_instr(ctx, divu, dst, src);
+ if (BPF_OP(insn->code) == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, sllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, srlv, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_JMP | BPF_EXIT:
+ if (this_idx + 1 < exit_idx) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+ cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) {
+ src = MIPS_R_ZERO;
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ src = MIPS_R_AT;
+ }
+ goto jeq_common;
+ case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (td == REG_32BIT && ts != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ } else if (ts == REG_32BIT && td != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+ dst = MIPS_R_AT;
+ }
+ if (BPF_OP(insn->code) == BPF_JSET) {
+ emit_instr(ctx, and, MIPS_R_AT, dst, src);
+ cmp_eq = false;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (BPF_OP(insn->code) == BPF_JSGT) {
+ emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ break;
+ } else if (BPF_OP(insn->code) == BPF_JSGE) {
+ emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+ cmp_eq = true;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (BPF_OP(insn->code) == BPF_JGT) {
+ /* dst or src could be AT */
+ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ /* SP known to be non-zero, movz becomes boolean not */
+ emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
+ emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+ emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+ cmp_eq = true;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (BPF_OP(insn->code) == BPF_JGE) {
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ cmp_eq = true;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else { /* JNE/JEQ case */
+ cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+ }
+jeq_common:
+ /*
+ * If the next insn is EXIT and we are jumping arround
+ * only it, invert the sense of the compare and
+ * conditionally jump to the exit. Poor man's branch
+ * chaining.
+ */
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, bne, dst, src, b_off);
+ else
+ emit_instr(ctx, beq, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, beq, dst, src, b_off);
+ else
+ emit_instr(ctx, bne, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+ cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (insn->imm == 0) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (cmp_eq)
+ emit_instr(ctx, bltz, dst, b_off);
+ else
+ emit_instr(ctx, blez, dst, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (cmp_eq)
+ emit_instr(ctx, bgez, dst, b_off);
+ else
+ emit_instr(ctx, bgtz, dst, b_off);
+ emit_instr(ctx, nop);
+ break;
+ }
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT"
+ */
+ t64s = insn->imm + (cmp_eq ? 0 : 1);
+ if (t64s >= S16_MIN && t64s <= S16_MAX) {
+ emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = true;
+ goto jeq_common;
+ }
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = true;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT"
+ */
+ t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
+ if (t64s >= 0 && t64s <= S16_MAX) {
+ emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = true;
+ goto jeq_common;
+ }
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = true;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ break;
+ }
+ t64 = (u32)insn->imm;
+ emit_const_to_reg(ctx, MIPS_R_AT, t64);
+ emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = false;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JA:
+ /*
+ * Prefer relative branch for easier debugging, but
+ * fall back if needed.
+ */
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ emit_instr(ctx, j, target);
+ } else {
+ emit_instr(ctx, b, b_off);
+ }
+ emit_instr(ctx, nop);
+ break;
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->src_reg != 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+ emit_const_to_reg(ctx, dst, t64);
+ return 2; /* Double slot insn */
+
+ case BPF_JMP | BPF_CALL:
+ ctx->flags |= EBPF_SAVE_RA;
+ t64s = (s64)insn->imm + (s64)__bpf_call_base;
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot */
+ emit_instr(ctx, nop);
+ break;
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(ctx, this_idx))
+ return -EINVAL;
+ break;
+
+ case BPF_LD | BPF_B | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_DW | BPF_ABS:
+ ctx->flags |= EBPF_SAVE_RA;
+
+ gen_imm_to_reg(insn, MIPS_R_A1, ctx);
+ emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+
+ if (insn->imm < 0) {
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
+ } else {
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+ emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+ }
+ goto ld_skb_common;
+
+ case BPF_LD | BPF_B | BPF_IND:
+ case BPF_LD | BPF_H | BPF_IND:
+ case BPF_LD | BPF_W | BPF_IND:
+ case BPF_LD | BPF_DW | BPF_IND:
+ ctx->flags |= EBPF_SAVE_RA;
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, MIPS_R_A1, src, 0);
+ src = MIPS_R_A1;
+ }
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
+ }
+ /* truncate to 32-bit int */
+ emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
+ emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+ emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
+
+ emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+ emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+ emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
+
+ld_skb_common:
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot move */
+ emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
+
+ /* Check the error value */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
+ emit_instr(ctx, nop);
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ } else {
+ emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = false;
+#else
+ need_swap = true;
+#endif
+ dst = MIPS_R_V0;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ break;
+ }
+
+ break;
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (insn->imm == 64 && td == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm != 64 &&
+ (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+ if (insn->imm == 16) {
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, andi, dst, dst, 0xffff);
+ } else if (insn->imm == 32) {
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ } else { /* 64-bit*/
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ }
+ break;
+
+ case BPF_ST | BPF_B | BPF_MEM:
+ case BPF_ST | BPF_H | BPF_MEM:
+ case BPF_ST | BPF_W | BPF_MEM:
+ case BPF_ST | BPF_DW | BPF_MEM:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+ break;
+ }
+ break;
+
+ case BPF_LDX | BPF_B | BPF_MEM:
+ case BPF_LDX | BPF_H | BPF_MEM:
+ case BPF_LDX | BPF_W | BPF_MEM:
+ case BPF_LDX | BPF_DW | BPF_MEM:
+ if (insn->src_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ src = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ mem_off = insn->off;
+ }
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, mem_off, src);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, mem_off, src);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, mem_off, src);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, mem_off, src);
+ break;
+ }
+ break;
+
+ case BPF_STX | BPF_B | BPF_MEM:
+ case BPF_STX | BPF_H | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_MEM:
+ case BPF_STX | BPF_DW | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_XADD:
+ case BPF_STX | BPF_DW | BPF_XADD:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return dst;
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_W:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+ /*
+ * On failure back up to LL (-4
+ * instructions of 4 bytes each
+ */
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ }
+ } else { /* BPF_MEM */
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, src, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, src, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, src, mem_off, dst);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, sd, src, mem_off, dst);
+ break;
+ }
+ }
+ break;
+
+ default:
+ pr_err("NOT HANDLED %d - (%02x)\n",
+ this_idx, (unsigned int)insn->code);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ int i, r;
+
+ for (i = 0; i < prog->len; ) {
+ insn = prog->insnsi + i;
+ if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+ /* dead instruction, don't emit it. */
+ i++;
+ continue;
+ }
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+ r = build_one_insn(insn, ctx, i, prog->len);
+ if (r < 0)
+ return r;
+ i += r;
+ }
+ /* epilogue offset */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ /*
+ * All exits have an offset of the epilogue, some offsets may
+ * not have been set due to banch-around threading, so set
+ * them now.
+ */
+ if (ctx->target == NULL)
+ for (i = 0; i < prog->len; i++) {
+ insn = prog->insnsi + i;
+ if (insn->code == (BPF_JMP | BPF_EXIT))
+ ctx->offsets[i] = ctx->idx * 4;
+ }
+ return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+ int start_idx, bool follow_taken)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ u64 exit_rvt = initial_rvt;
+ u64 *rvt = ctx->reg_val_types;
+ int idx;
+ int reg;
+
+ for (idx = start_idx; idx < prog->len; idx++) {
+ rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+ insn = prog->insnsi + idx;
+ switch (BPF_CLASS(insn->code)) {
+ case BPF_ALU:
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_MUL:
+ case BPF_DIV:
+ case BPF_OR:
+ case BPF_AND:
+ case BPF_LSH:
+ case BPF_RSH:
+ case BPF_NEG:
+ case BPF_MOD:
+ case BPF_XOR:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ }
+ break;
+ case BPF_END:
+ if (insn->imm == 64)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ else if (insn->imm == 32)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ else /* insn->imm == 16 */
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_ALU64:
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ /* REG to REG move*/
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ }
+ break;
+ default:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LD:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_IMM) {
+ s64 val;
+
+ val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+ if (val > 0 && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else if (val >= S32_MIN && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ rvt[idx] |= RVT_DONE;
+ idx++;
+ } else {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ if (BPF_MODE(insn->code) == BPF_IMM)
+ set_reg_val_type(&exit_rvt, insn->dst_reg,
+ insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LDX:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_JMP:
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+ rvt[idx] = RVT_DONE | exit_rvt;
+ rvt[prog->len] = exit_rvt;
+ return idx;
+ case BPF_JA:
+ rvt[idx] |= RVT_DONE;
+ idx += insn->off;
+ break;
+ case BPF_JEQ:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JSET:
+ case BPF_JNE:
+ case BPF_JSGT:
+ case BPF_JSGE:
+ if (follow_taken) {
+ rvt[idx] |= RVT_BRANCH_TAKEN;
+ idx += insn->off;
+ follow_taken = false;
+ } else {
+ rvt[idx] |= RVT_FALL_THROUGH;
+ }
+ break;
+ case BPF_CALL:
+ set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+ /* Upon call return, argument registers are clobbered. */
+ for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ rvt[idx] |= RVT_DONE;
+ break;
+ default:
+ WARN(1, "Unhandled BPF_JMP case.\n");
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ break;
+ default:
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ }
+ return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn. This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ u64 exit_rvt;
+ int reg;
+ int i;
+
+ /*
+ * 11 registers * 3 bits/reg leaves top bits free for other
+ * uses. Bit-62..63 used to see if we have visited an insn.
+ */
+ exit_rvt = 0;
+
+ /* Upon entry, argument registers are 64-bit. */
+ for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ /*
+ * First follow all conditional branches on the fall-through
+ * edge of control flow..
+ */
+ reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+ /*
+ * Then repeatedly find the first conditional branch where
+ * both edges of control flow have not been taken, and follow
+ * the branch taken edge. We will end up restarting the
+ * search once per conditional branch insn.
+ */
+ for (i = 0; i < prog->len; i++) {
+ u64 rvt = ctx->reg_val_types[i];
+
+ if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+ (rvt & RVT_VISITED_MASK) == 0)
+ continue;
+ if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+ } else { /* RVT_BRANCH_TAKEN */
+ WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+ }
+ goto restart_search;
+ }
+ /*
+ * Eventually all conditional branches have been followed on
+ * both branches and we are done. Any insn that has not been
+ * visited at this point is dead.
+ */
+
+ return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+ u32 *p;
+
+ /* We are guaranteed to have aligned memory. */
+ for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+ uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct bpf_prog *orig_prog = prog;
+ bool tmp_blinded = false;
+ struct bpf_prog *tmp;
+ struct bpf_binary_header *header = NULL;
+ struct jit_ctx ctx;
+ unsigned int image_size;
+ u8 *image_ptr;
+
+ if (!bpf_jit_enable || !cpu_has_mips64r2)
+ return prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ /* If blinding was requested and we failed during blinding,
+ * we must fall back to the interpreter.
+ */
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ goto out_err;
+
+ ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+ if (ctx.reg_val_types == NULL)
+ goto out_err;
+
+ ctx.skf = prog;
+
+ if (reg_val_propagate(&ctx))
+ goto out_err;
+
+ /*
+ * First pass discovers used resources and instruction offsets
+ * assuming short branches are used.
+ */
+ if (build_int_body(&ctx))
+ goto out_err;
+
+ /*
+ * If no calls are made (EBPF_SAVE_RA), then tail call count
+ * in $v1, else we must save in n$s4.
+ */
+ if (ctx.flags & EBPF_SEEN_TC) {
+ if (ctx.flags & EBPF_SAVE_RA)
+ ctx.flags |= EBPF_SAVE_S4;
+ else
+ ctx.flags |= EBPF_TCC_IN_V1;
+ }
+
+ /*
+ * Second pass generates offsets, if any branches are out of
+ * range a jump-around long sequence is generated, and we have
+ * to try again from the beginning to generate the new
+ * offsets. This is done until no additional conversions are
+ * necessary.
+ */
+ do {
+ ctx.idx = 0;
+ ctx.gen_b_offsets = 1;
+ ctx.long_b_conversion = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+ } while (ctx.long_b_conversion);
+
+ image_size = 4 * ctx.idx;
+
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL)
+ goto out_err;
+
+ ctx.target = (u32 *)image_ptr;
+
+ /* Third pass generates the code */
+ ctx.idx = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+
+ /* Update the icache */
+ flush_icache_range((unsigned long)ctx.target,
+ (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+ bpf_jit_binary_lock_ro(header);
+ prog->bpf_func = (void *)ctx.target;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+out_normal:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ kfree(ctx.offsets);
+ kfree(ctx.reg_val_types);
+
+ return prog;
+
+out_err:
+ prog = orig_prog;
+ if (header)
+ bpf_jit_binary_free(header);
+ goto out_normal;
+}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 01c6fbc3e85b..1803797fc885 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
return -1;
- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+ /* Next instruction address */
+ jit->addrs[i + insn_count] = jit->prg;
}
bpf_jit_epilogue(jit);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */
- int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue;
};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
{
struct log_data *ib;
struct procdata *pd = card->proclog;
- int i;
unsigned long flags;
if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
else
pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */
- i = pd->del_lock++; /* get lock state */
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */
- if (!i)
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else
- break;
- } /* pd->log_head->next */
- pd->del_lock--; /* release lock level */
+ while (pd->log_head->next) {
+ if ((pd->log_head->usage_cnt <= 0) &&
+ (pd->log_head->next->usage_cnt <= 0)) {
+ ib = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(ib);
+ } else {
+ break;
+ }
+ } /* pd->log_head->next */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
* all finished.
*/
mt7623_pad_clk_setup(ds);
+ } else {
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
+ u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ };
+
+ if (phydev->link)
+ mcr |= PMCR_FORCE_LNK;
+
+ if (phydev->duplex) {
+ mcr |= PMCR_FORCE_FDX;
+
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= PMCR_TX_FC_EN;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= PMCR_RX_FC_EN;
+ }
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr);
}
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
#define PMCR_TX_FC_EN BIT(5)
#define PMCR_RX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
+#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 86058a9f3417..1d307f2def2d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_enet_gpiod_get(pdata);
- if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
/* Abort if the clock is defined but couldn't be
* retrived. Always abort if the clock is missing on
* DT system as the driver can't cope with this case.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
+ u64_stats_init(&bp->hw_stats.syncp);
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
static void send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
struct device *dev = &adapter->vdev->dev;
+ int rc;
do {
if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
dev_err(dev, "Capabilities query timeout\n");
return -1;
}
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's failed\n");
+ return -1;
+ }
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's irqs failed\n");
+ return -1;
+ }
}
reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
ibmvnic_send_req_caps(adapter, 1);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
/* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_caps *caps = &priv->mdev->dev->caps;
int err = 0;
u64 config = 0;
u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & mask)) {
+ if (!(caps->flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
}
+ if (caps->wol_port[priv->port])
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) {
en_err(priv, "Failed to get WoL information\n");
return;
}
- if (config & MLX4_EN_WOL_MAGIC)
- wol->supported = WAKE_MAGIC;
- else
- wol->supported = 0;
-
- if (config & MLX4_EN_WOL_ENABLED)
+ if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 436f7689a032..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
* header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW.
*/
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
- struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct iphdr *iph)
{
__u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0;
+ __u8 ipproto = iph->protocol;
+
+ if (unlikely(ipproto == IPPROTO_SCTP))
+ return -1;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
- length_for_csum, iph->protocol, 0);
+ length_for_csum, ipproto, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
+ __u8 nexthdr = ipv6h->nexthdr;
__wsum csum_pseudo_hdr = 0;
- if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
- ipv6h->nexthdr == IPPROTO_HOPOPTS))
+ if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+ nexthdr == IPPROTO_HOPOPTS ||
+ nexthdr == IPPROTO_SCTP))
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+ (__force __wsum)htons(nexthdr));
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
}
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
- else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
- return -1;
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+ return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)",
- [35] = "QinQ VST mode support",
- [36] = "sl to vl mapping table change event support"
+ [35] = "Diag counters per port",
+ [36] = "QinQ VST mode support",
+ [37] = "sl to vl mapping table change event support",
};
int i;
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
+#define QUERY_DEV_CAP_WOL_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
+ dev_cap->wol_port[1] = !!(field & 0x20);
+ dev_cap->wol_port[2] = !!(field & 0x40);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+ bool wol_port[MLX4_MAX_PORTS + 1];
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..09b9bc17bce9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ dev->caps.wol_port[1] = dev_cap->wol_port[1];
+ dev->caps.wol_port[2] = dev_cap->wol_port[2];
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..5eb1606765c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
if (!bridge_port->bridge_device->multicast_enabled)
return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
}
+static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mid *mid, *tmp;
+
+ list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
+ list_del(&mid->list);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ kfree(mid);
+ }
+}
+
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..4631ca8b8eb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
tx_ring->idx = idx;
tx_ring->r_vec = r_vec;
tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->idx = idx;
rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
- if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
goto err;
return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
#include "cpts.h"
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+ unsigned long tmo;
+};
+
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+ u16 ts_seqid, u8 ts_msgtype);
+
static int event_expired(struct cpts_event *event)
{
return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff *skb, *tmp;
+ u16 seqid;
+ u8 mtype;
+ bool found = false;
+
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ unsigned int class = ptp_classify_raw(skb);
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (cpts_match(skb, class, seqid, mtype)) {
+ u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ } else if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ return found;
+}
+
/*
* Returns zero if matching event type was found.
*/
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
event->low = lo;
type = event_type(event);
switch (type) {
+ case CPTS_EV_TX:
+ if (cpts_match_tx_ts(cpts, event)) {
+ /* if the new event matches an existing skb,
+ * then don't queue it
+ */
+ break;
+ }
case CPTS_EV_PUSH:
case CPTS_EV_RX:
- case CPTS_EV_TX:
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ unsigned long delay = cpts->ov_check_period;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ return (long)delay;
+}
+
static struct ptp_clock_info cpts_info = {
.owner = THIS_MODULE,
.name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
.gettime64 = cpts_ptp_gettime,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
+ .do_aux_work = cpts_overflow_check,
};
-static void cpts_overflow_check(struct work_struct *work)
-{
- struct timespec64 ts;
- struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
-
- cpts_ptp_gettime(&cpts->info, &ts);
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
-}
-
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
u16 ts_seqid, u8 ts_msgtype)
{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
return 0;
spin_lock_irqsave(&cpts->lock, flags);
- cpts_fifo_read(cpts, CPTS_EV_PUSH);
+ cpts_fifo_read(cpts, -1);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
break;
}
}
+
+ if (ev_type == CPTS_EV_TX && !ns) {
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+ /* Not found, add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ __skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
{
int err, i;
+ skb_queue_head_init(&cpts->txq);
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
}
cpts->phc_index = ptp_clock_index(cpts->clock);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
+ ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
return 0;
err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
if (WARN_ON(!cpts->clock))
return;
- cancel_delayed_work_sync(&cpts->overflow_work);
-
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
+ /* Drop all packet */
+ skb_queue_purge(&cpts->txq);
+
clk_disable(cpts->refclk);
}
EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
spin_lock_init(&cpts->lock);
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
ret = cpts_of_parse(cpts, node);
if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
- struct delayed_work overflow_work;
int phc_index;
struct clk *refclk;
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
+ struct sk_buff_head txq;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
gtp->dev = dev;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
- refcount_t sc_offered;
+ atomic_t open_chn;
+ wait_queue_head_t subchan_open;
struct rndis_device *extension;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait);
+ init_waitqueue_head(&net_device->subchan_open);
return net_device;
}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->channel = device->channel;
+ u64_stats_init(&nvchan->tx_stats.syncp);
+ u64_stats_init(&nvchan->rx_stats.syncp);
}
/* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
else
netif_napi_del(&nvchan->napi);
- if (refcount_dec_and_test(&nvscdev->sc_offered))
- complete(&nvscdev->channel_init_wait);
+ atomic_inc(&nvscdev->open_chn);
+ wake_up(&nvscdev->subchan_open);
}
int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
- refcount_set(&net_device->sc_offered, 0);
-
net_device->extension = rndis_device;
rndis_device->ndev = net;
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn);
+ atomic_set(&net_device->open_chn, 1);
num_rss_qs = net_device->num_chn - 1;
if (num_rss_qs == 0)
return 0;
- refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto out;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
ret = -ENODEV;
goto out;
}
- wait_for_completion(&net_device->channel_init_wait);
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+ /* wait for all sub channels to open */
+ wait_event(net_device->subchan_open,
+ atomic_read(&net_device->open_chn) == net_device->num_chn);
+
/* ignore failues from setting rss parameters, still have channels */
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
netdev_lockdep_set_classes(dev);
- ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+ ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index bd4303944e44..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
- ppp_xmit_process(ppp);
- read_unlock(&pch->upl);
+ __ppp_xmit_process(ppp);
}
}
static void ppp_channel_push(struct channel *pch)
{
- local_bh_disable();
-
- __ppp_channel_push(pch);
-
- local_bh_enable();
+ read_lock_bh(&pch->upl);
+ if (pch->ppp) {
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+ __ppp_channel_push(pch);
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+ } else {
+ __ppp_channel_push(pch);
+ }
+ read_unlock_bh(&pch->upl);
}
/*
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx);
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
+void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+ /* Reset the variables that have a lifetime outside of
+ * asix_rx_fixup_internal() so that future processing starts from a
+ * known set of initial conditions.
+ */
+
+ if (rx->ax_skb) {
+ /* Discard any incomplete Ethernet frame in the netdev buffer */
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+
+ /* Assume the Data header 32-bit word is at the start of the current
+ * or next URB socket buffer so reset all the state variables.
+ */
+ rx->remaining = 0;
+ rx->split_head = false;
+ rx->header = 0;
+}
+
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx)
{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
rx->remaining);
- if (rx->ax_skb) {
- kfree_skb(rx->ax_skb);
- rx->ax_skb = NULL;
- /* Discard the incomplete netdev Ethernet frame
- * and assume the Data header is at the start of
- * the current URB socket buffer.
- */
- }
- rx->remaining = 0;
+ reset_asix_rx_fixup_info(rx);
}
}
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
rx->header, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (rx->ax_skb) {
skb_put_data(rx->ax_skb, skb->data + offset,
copy_length);
- if (!rx->remaining)
+ if (!rx->remaining) {
usbnet_skb_return(dev, rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
}
offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
skb->len, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
return asix_rx_fixup_internal(dev, skb, rx);
}
+void asix_rx_fixup_common_free(struct asix_common_private *dp)
+{
+ struct asix_rx_fixup_info *rx;
+
+ if (!dp)
+ return;
+
+ rx = &dp->rx_fixup_info;
+
+ if (rx->ax_skb) {
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+}
+
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ asix_rx_fixup_common_free(dev->driver_priv);
kfree(dev->driver_priv);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */
lan78xx_init_ltm(dev);
- dev->net->hard_header_len += TX_OVERHEAD;
- dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
if (dev->udev->speed == USB_SPEED_SUPER) {
buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
return ret;
}
+ dev->net->hard_header_len += TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
/* Init all registers */
ret = lan78xx_reset(dev);
- lan78xx_mdio_init(dev);
+ ret = lan78xx_mdio_init(dev);
dev->net->flags |= IFF_MULTICAST;
pdata->wol = WAKE_MAGIC;
- return 0;
+ return ret;
}
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
udev = usb_get_dev(udev);
- ret = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct lan78xx_net));
if (!netdev) {
- dev_err(&intf->dev, "Error: OOM\n");
- goto out1;
+ dev_err(&intf->dev, "Error: OOM\n");
+ ret = -ENOMEM;
+ goto out1;
}
/* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out2;
+ goto out3;
}
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
static void qmi_wwan_disconnect(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct qmi_wwan_state *info = (void *)&dev->data;
+ struct qmi_wwan_state *info;
struct list_head *iter;
struct net_device *ldev;
+ /* called twice if separate control and data intf */
+ if (!dev)
+ return;
+ info = (void *)&dev->data;
if (info->flags & QMI_WWAN_FLAG_MUX) {
if (!rtnl_trylock()) {
restart_syscall();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
out:
skb_gro_remcsum_cleanup(skb, &grc);
+ skb->remcsum_offload = 0;
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
kfree(ptp);
}
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+ struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+ aux_work.work);
+ struct ptp_clock_info *info = ptp->info;
+ long delay;
+
+ delay = info->do_aux_work(info);
+
+ if (delay >= 0)
+ kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
+ if (ptp->info->do_aux_work) {
+ char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
+
+ kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ ptp->kworker = kthread_create_worker(0, worker_name ?
+ worker_name : info->name);
+ kfree(worker_name);
+ if (IS_ERR(ptp->kworker)) {
+ err = PTR_ERR(ptp->kworker);
+ pr_err("failed to create ptp aux_worker %d\n", err);
+ goto kworker_err;
+ }
+ }
+
err = ptp_populate_pin_groups(ptp);
if (err)
goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ if (ptp->kworker) {
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+ kthread_destroy_worker(ptp->kworker);
+ }
+
/* Release the clock's resources. */
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
}
EXPORT_SYMBOL(ptp_find_pin);
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+ return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/posix-clock.h>
#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
struct attribute_group pin_attr_group;
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */
const struct attribute_group *pin_attr_groups[2];
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work aux_work;
};
/*
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rtable *rt = (struct rtable *) dst;
__be32 *pkey = &ip_hdr(skb)->daddr;
- if (rt->rt_gateway)
+ if (rt && rt->rt_gateway)
pkey = &rt->rt_gateway;
/* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rt6_info *rt = (struct rt6_info *) dst;
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
- if (!ipv6_addr_any(&rt->rt6i_gateway))
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
pkey = &rt->rt6i_gateway;
/* IPv6 */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index aad5d81dfb44..b54517c05e9a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -620,6 +620,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..51349d124ee5 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
+ * @do_work: Request driver to perform auxiliary (periodic) operations
+ * Driver should return delay of the next auxiliary work scheduling
+ * time (>=0) or negative value in case further scheduling
+ * is not required.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -126,6 +131,7 @@ struct ptp_clock_info {
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
+ long (*do_aux_work)(struct ptp_clock_info *ptp);
};
struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @delay: number of jiffies to wait before queuing
+ * See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+ unsigned long delay)
+{ return -EOPNOTSUPP; }
+
#endif
#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70483296157f..ada65e767b28 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
+/* At how many usecs into the future should the RTO fire? */
+static inline s64 tcp_rto_delta_us(const struct sock *sk)
+{
+ const struct sk_buff *skb = tcp_write_queue_head(sk);
+ u32 rto = inet_csk(sk)->icsk_rto;
+ u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+}
+
/*
* Save and compile IPv4 options, return a pointer to it
*/
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e1133bc634b5..8a3ce79b1307 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
return found;
}
+/**
+ * batadv_tt_global_sync_flags - update TT sync flags
+ * @tt_global: the TT global entry to update sync flags in
+ *
+ * Updates the sync flag bits in the tt_global flag attribute with a logical
+ * OR of all sync flags from any of its TT orig entries.
+ */
+static void
+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+ const struct hlist_head *head;
+ u16 flags = BATADV_NO_FLAGS;
+
+ rcu_read_lock();
+ head = &tt_global->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, head, list)
+ flags |= orig_entry->flags;
+ rcu_read_unlock();
+
+ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
+ tt_global->common.flags = flags;
+}
+
+/**
+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * @tt_global: the TT global entry to add an orig entry in
+ * @orig_node: the originator to add an orig entry for
+ * @ttvn: translation table version number of this changeset
+ * @flags: TT sync flags
+ */
static void
batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
- struct batadv_orig_node *orig_node, int ttvn)
+ struct batadv_orig_node *orig_node, int ttvn,
+ u8 flags)
{
struct batadv_tt_orig_list_entry *orig_entry;
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
* was added during a "temporary client detection"
*/
orig_entry->ttvn = ttvn;
- goto out;
+ orig_entry->flags = flags;
+ goto sync_flags;
}
orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
+ orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
spin_lock_bh(&tt_global->list_lock);
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
+sync_flags:
+ batadv_tt_global_sync_flags(tt_global);
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
}
/* the change can carry possible "attribute" flags like the
- * TT_CLIENT_WIFI, therefore they have to be copied in the
+ * TT_CLIENT_TEMP, therefore they have to be copied in the
* client entry
*/
- common->flags |= flags;
+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
}
add_orig_entry:
/* add the new orig_entry (if needed) or update it */
- batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
+ flags & BATADV_TT_SYNC_MASK);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_tt_orig_list_entry *orig,
bool best)
{
+ u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
void *hdr;
struct batadv_orig_node_vlan *vlan;
u8 last_ttvn;
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
- nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+ struct batadv_tt_orig_list_entry *tt_orig;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* find out if this global entry is announced by this
* originator
*/
- if (!batadv_tt_global_entry_has_orig(tt_global,
- orig_node))
+ tt_orig = batadv_tt_global_orig_entry_find(tt_global,
+ orig_node);
+ if (!tt_orig)
continue;
/* use network order to read the VID: this ensures that
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* compute the CRC on flags that have to be kept in sync
* among nodes
*/
- flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+ flags = tt_orig->flags;
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+
+ batadv_tt_orig_list_entry_put(tt_orig);
}
rcu_read_unlock();
}
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ea43a6449247..a62795868794 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
* struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
* @orig_node: pointer to orig node announcing this non-mesh client
* @ttvn: translation table version number which added the non-mesh client
+ * @flags: per orig entry TT sync flags
* @list: list node for batadv_tt_global_entry::orig_list
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
struct batadv_tt_orig_list_entry {
struct batadv_orig_node *orig_node;
u8 ttvn;
+ u8 flags;
struct hlist_node list;
struct kref refcount;
struct rcu_head rcu;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8515f8fe0460..ce15a06d5558 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
return skb->ip_summed != CHECKSUM_PARTIAL &&
- skb->ip_summed != CHECKSUM_NONE;
+ skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE;
}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c4c6e1969ed0..2ae8f54cb321 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int taglen;
for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
- if (optptr[0] == IPOPT_CIPSO)
+ switch (optptr[0]) {
+ case IPOPT_CIPSO:
return optptr;
- taglen = optptr[1];
+ case IPOPT_END:
+ return NULL;
+ case IPOPT_NOOP:
+ taglen = 1;
+ break;
+ default:
+ taglen = optptr[1];
+ }
optlen -= taglen;
optptr += taglen;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8e0257d01200..1540db65241a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -450,6 +450,7 @@ out_unlock:
out:
NAPI_GRO_CB(skb)->flush |= flush;
skb_gro_remcsum_cleanup(skb, &grc);
+ skb->remcsum_offload = 0;
return pp;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2920e0cb09f8..53de1424c13c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
+#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
return;
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_jiffies32;
}
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
/* Offset the time elapsed after installing regular RTO */
if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
- struct sk_buff *skb = tcp_write_queue_head(sk);
- u64 rto_time_stamp = skb->skb_mstamp +
- jiffies_to_usecs(rto);
- s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
+ s64 delta_us = tcp_rto_delta_us(sk);
/* delta_us may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
}
}
+/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
+static void tcp_set_xmit_timer(struct sock *sk)
+{
+ if (!tcp_schedule_loss_probe(sk))
+ tcp_rearm_rto(sk);
+}
+
/* If we get here, the whole TSO packet has not been acked. */
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
{
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
ca_rtt_us, sack->rate);
if (flag & FLAG_ACKED) {
- tcp_rearm_rto(sk);
+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
if (unlikely(icsk->icsk_mtup.probe_size &&
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
*/
- tcp_rearm_rto(sk);
+ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
}
if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt))
goto invalid_ack;
- if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
- tcp_rearm_rto(sk);
-
if (after(ack, prior_snd_una)) {
flag |= FLAG_SND_UNA_ADVANCED;
icsk->icsk_retransmits = 0;
@@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
&sack_state);
+ if (tp->tlp_high_seq)
+ tcp_process_tlp_ack(sk, ack, flag);
+ /* If needed, reset TLP/RTO timer; RACK may later override this. */
+ if (flag & FLAG_SET_XMIT_TIMER)
+ tcp_set_xmit_timer(sk);
+
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
}
- if (tp->tlp_high_seq)
- tcp_process_tlp_ack(sk, ack, flag);
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS)
- tcp_schedule_loss_probe(sk);
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
lost = tp->lost - lost; /* freshly marked lost */
tcp_rate_gen(sk, delivered, lost, sack_state.rate);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2f1588bf73da..b7661a68d498 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- u32 timeout, tlp_time_stamp, rto_time_stamp;
u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
+ u32 timeout, rto_delta_us;
- /* No consecutive loss probes. */
- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
- tcp_rearm_rto(sk);
- return false;
- }
/* Don't do any loss probe on a Fast Open connection before 3WHS
* finishes.
*/
if (tp->fastopen_rsk)
return false;
- /* TLP is only scheduled when next timer event is RTO. */
- if (icsk->icsk_pending != ICSK_TIME_RETRANS)
- return false;
-
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
@@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
(rtt + (rtt >> 1) + TCP_DELACK_MAX));
timeout = max_t(u32, timeout, msecs_to_jiffies(10));
- /* If RTO is shorter, just schedule TLP in its place. */
- tlp_time_stamp = tcp_jiffies32 + timeout;
- rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
- if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
- s32 delta = rto_time_stamp - tcp_jiffies32;
- if (delta > 0)
- timeout = delta;
- }
+ /* If the RTO formula yields an earlier time, then use that time. */
+ rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
+ if (rto_delta_us > 0)
+ timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
TCP_RTO_MAX);
@@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
int err;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
+
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
+ return -EHOSTUNREACH; /* Routing failure or similar. */
+
tcp_connect_init(sk);
if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c0feeeef962a..e906014890b6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
goto death;
}
- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
+ if (!sock_flag(sk, SOCK_KEEPOPEN) ||
+ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
goto out;
elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 781250151d40..0932c85b42af 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4d30c96a819d..a640fbcba15d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY;
+ nrt->rt6i_protocol = RTPROT_REDIRECT;
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
if (ip6_ins_rt(nrt))
@@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
+ .fc_protocol = RTPROT_RA,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
@@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
.fc_ifindex = dev->ifindex,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
+ .fc_protocol = RTPROT_RA,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = dev_net(dev),
@@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
- if (rt->rt6i_flags & RTF_DYNAMIC)
- rtm->rtm_protocol = RTPROT_REDIRECT;
- else if (rt->rt6i_flags & RTF_ADDRCONF) {
- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
- rtm->rtm_protocol = RTPROT_RA;
- else
- rtm->rtm_protocol = RTPROT_KERNEL;
- }
if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a2267f80febb..e7d378c032cb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e10624aa6959..9722bf839d9d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty);
- if (rds_ib_ring_low(&ic->i_recv_ring))
+ if (rds_ib_ring_low(&ic->i_recv_ring)) {
rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
+ rds_ib_stats_inc(s_ib_rx_refill_from_cq);
+ }
}
int rds_ib_recv_path(struct rds_conn_path *cp)
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
if (rds_conn_up(conn)) {
rds_ib_attempt_ack(ic);
rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+ rds_ib_stats_inc(s_ib_rx_refill_from_thread);
}
return ret;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 36f0ced9e60c..94ba5cfab860 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
static unsigned int xt_net_id;
static struct tc_action_ops act_xt_ops;
-static int ipt_init_target(struct xt_entry_target *t, char *table,
- unsigned int hook)
+static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+ char *table, unsigned int hook)
{
struct xt_tgchk_param par;
struct xt_target *target;
@@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
return PTR_ERR(target);
t->u.kernel.target = target;
+ par.net = net;
par.table = table;
par.entryinfo = NULL;
par.target = target;
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
[TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
};
-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
+static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int ovr, int bind)
{
+ struct tc_action_net *tn = net_generic(net, id);
struct nlattr *tb[TCA_IPT_MAX + 1];
struct tcf_ipt *ipt;
struct xt_entry_target *td, *t;
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
if (unlikely(!t))
goto err2;
- err = ipt_init_target(t, tname, hook);
+ err = ipt_init_target(net, t, tname, hook);
if (err < 0)
goto err3;
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind)
{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
+ return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
+ bind);
}
static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind)
{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
+ return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
+ bind);
}
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index 7598361ef1f1..da2172ff9662 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -11,6 +11,8 @@
# define __NR_bpf 280
# elif defined(__sparc__)
# define __NR_bpf 349
+# elif defined(__s390__)
+# define __NR_bpf 351
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 256f571f2ab5..e5bbb090bf88 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -39,6 +39,8 @@
# define __NR_bpf 280
# elif defined(__sparc__)
# define __NR_bpf 349
+# elif defined(__s390__)
+# define __NR_bpf 351
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c
index 71729d47eb85..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/test_pkt_md_access.c
@@ -12,12 +12,23 @@
int _version SEC("version") = 1;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
+#else
+#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
+#define TEST_FIELD(TYPE, FIELD, MASK) \
+ { \
+ TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
+ TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
+ if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
+ return TC_ACT_SHOT; \
+ }
+#endif
SEC("test1")
int process(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index addea82f76c9..d3ed7324105e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,7 @@
* License as published by the Free Software Foundation.
*/
+#include <endian.h>
#include <asm/types.h>
#include <linux/types.h>
#include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
"check skb->hash byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
"check skb->hash byte load not permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
"check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
"check skb->hash half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
"check bpf_perf_event_data->sample_period word load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
"check skb->data half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
#else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
"check skb->tc_classid half load not permitted for lwt prog",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
#else