]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Aug 2017 05:33:47 +0000 (22:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Aug 2017 05:33:47 +0000 (22:33 -0700)
Pull drm fixes from Dave Airlie:
 "Nothing too earth shattering here, it just seems like lots of little
  things all over the place.

  msm has probably the larger amount of changes, but they all seem fine,
  otherwise, some rockchip, i915, etnaviv and exynos fixes, along with
  one nouveau regression fix for some older GPUs"

* tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux: (35 commits)
  drm/nouveau/disp/nv04: avoid creation of output paths
  drm: make DRM_STM default n
  drm/exynos: forbid creating framebuffers from too small GEM buffers
  drm/etnaviv: Fix off-by-one error in reloc checking
  drm/i915: fix backlight invert for non-zero minimum brightness
  drm/i915/shrinker: Wrap need_resched() inside preempt-disable
  drm/i915/perf: fix flex eu registers programming
  drm/i915: Fix out-of-bounds array access in bdw_load_gamma_lut
  drm/i915/gvt: Change the max length of mmio_reg_rw from 4 to 8
  drm/i915/gvt: Initialize MMIO Block with HW state
  drm/rockchip: vop: report error when check resource error
  drm/rockchip: vop: round_up pitches to word align
  drm/rockchip: vop: fix NV12 video display error
  drm/rockchip: vop: fix iommu page fault when resume
  drm/i915/gvt: clean workload queue if error happened
  drm/i915/gvt: change resetting to resetting_eng
  drm/msm: gpu: don't abuse dma_alloc for non-DMA allocations
  drm/msm: gpu: call qcom_mdt interfaces only for ARCH_QCOM
  drm/msm/adreno: Prevent unclocked access when retrieving timestamps
  drm/msm: Remove __user from __u64 data types
  ...

162 files changed:
Documentation/gpio/gpio-legacy.txt
MAINTAINERS
arch/arm/include/asm/tlb.h
arch/ia64/include/asm/tlb.h
arch/mips/net/ebpf_jit.c [new file with mode: 0644]
arch/s390/include/asm/tlb.h
arch/s390/net/bpf_jit_comp.c
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/spitfire.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/um/include/asm/tlb.h
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/device.h [deleted file]
arch/xtensa/include/asm/param.h [deleted file]
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/mm/cache.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/blk-mq.c
drivers/block/sunvdc.c
drivers/block/zram/zram_drv.c
drivers/char/random.c
drivers/crypto/inside-secure/safexcel_hash.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/i2c-core-acpi.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core.h
drivers/i2c/muxes/Kconfig
drivers/infiniband/core/addr.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/isdn/hysdn/hysdn_proclog.c
drivers/mtd/nand/atmel/nand-controller.c
drivers/mtd/nand/atmel/pmecc.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_timings.c
drivers/mtd/nand/sunxi_nand.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/cpts.h
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/pci/pci.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
drivers/pinctrl/zte/pinctrl-zx.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_main.c
drivers/scsi/sg.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-pci.c
fs/proc/meminfo.c
fs/proc/task_mmu.c
fs/userfaultfd.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log_cil.c
include/asm-generic/tlb.h
include/linux/cpuhotplug.h
include/linux/device.h
include/linux/i2c.h
include/linux/mlx4/device.h
include/linux/mlx5/qp.h
include/linux/mm_types.h
include/linux/mtd/nand.h
include/linux/pci.h
include/linux/pinctrl/pinconf-generic.h
include/linux/ptp_clock_kernel.h
include/net/tcp.h
kernel/fork.c
kernel/futex.c
kernel/power/snapshot.c
lib/fault-inject.c
lib/test_kmod.c
mm/balloon_compaction.c
mm/debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/util.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/core/dev.c
net/ipv4/af_inet.c
net/ipv4/cipso_ipv4.c
net/ipv4/fou.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/udp_offload.c
net/packet/af_packet.c
net/rds/ib_recv.c
net/sched/act_ipt.c
net/tipc/node.c
scripts/get_maintainer.pl
scripts/parse-maintainers.pl
tools/build/feature/test-bpf.c
tools/lib/bpf/bpf.c
tools/testing/selftests/bpf/test_pkt_md_access.c
tools/testing/selftests/bpf/test_verifier.c

index b34fd94f70898a7f65c2a0313349588411eb8e81..5eacc147ea870c80bb06c38d43bd5b662c171194 100644 (file)
@@ -459,7 +459,7 @@ pin controller?
 
 This is done by registering "ranges" of pins, which are essentially
 cross-reference tables. These are described in
-Documentation/pinctrl.txt
+Documentation/driver-api/pinctl.rst
 
 While the pin allocation is totally managed by the pinctrl subsystem,
 gpio (under gpiolib) is still maintained by gpio drivers. It may happen
index 44cb004c765d5bc3e9b71844b08fca7f204cae61..6f7721d1634c2eb7247538f2cb4d85fa1be1a458 100644 (file)
@@ -1161,7 +1161,7 @@ M:        Brendan Higgins <brendanhiggins@google.com>
 R:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 R:     Joel Stanley <joel@jms.id.au>
 L:     linux-i2c@vger.kernel.org
-L:     openbmc@lists.ozlabs.org
+L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/irqchip/irq-aspeed-i2c-ic.c
 F:     drivers/i2c/busses/i2c-aspeed.c
@@ -5834,7 +5834,7 @@ F:        drivers/staging/greybus/spi.c
 F:     drivers/staging/greybus/spilib.c
 F:     drivers/staging/greybus/spilib.h
 
-GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
+GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
 M:     Bryan O'Donoghue <pure.logic@nexus-software.ie>
 S:     Maintained
 F:     drivers/staging/greybus/loopback.c
@@ -10383,7 +10383,7 @@ L:      linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/
-F:     Documentation/pinctrl.txt
+F:     Documentation/driver-api/pinctl.rst
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
@@ -14004,6 +14004,7 @@ F:      drivers/block/virtio_blk.c
 F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
+F:     mm/balloon_compaction.c
 
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
index 3f2eb76243e3c5f9d387959acae740ce871e5afa..d5562f9ce60079139d360e5d6afac59469051454 100644 (file)
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->range_start = start;
+               tlb->range_end = end;
+       }
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index fced197b96264e01b20743e90706ed20cf30b242..cbe5ac3699bf0f9dbdfd726c112f6fc6bd1271f0 100644 (file)
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
  * collected.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force)
+               tlb->need_flush = 1;
        /*
         * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
         * tlb->end_addr.
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644 (file)
index 0000000..3f87b96
--- /dev/null
@@ -0,0 +1,1950 @@
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO    0
+#define MIPS_R_AT      1
+#define MIPS_R_V0      2       /* BPF_R0 */
+#define MIPS_R_V1      3
+#define MIPS_R_A0      4       /* BPF_R1 */
+#define MIPS_R_A1      5       /* BPF_R2 */
+#define MIPS_R_A2      6       /* BPF_R3 */
+#define MIPS_R_A3      7       /* BPF_R4 */
+#define MIPS_R_A4      8       /* BPF_R5 */
+#define MIPS_R_T4      12      /* BPF_AX */
+#define MIPS_R_T5      13
+#define MIPS_R_T6      14
+#define MIPS_R_T7      15
+#define MIPS_R_S0      16      /* BPF_R6 */
+#define MIPS_R_S1      17      /* BPF_R7 */
+#define MIPS_R_S2      18      /* BPF_R8 */
+#define MIPS_R_S3      19      /* BPF_R9 */
+#define MIPS_R_S4      20      /* BPF_TCC */
+#define MIPS_R_S5      21
+#define MIPS_R_S6      22
+#define MIPS_R_S7      23
+#define MIPS_R_T8      24
+#define MIPS_R_T9      25
+#define MIPS_R_SP      29
+#define MIPS_R_RA      31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0   BIT(0)
+#define EBPF_SAVE_S1   BIT(1)
+#define EBPF_SAVE_S2   BIT(2)
+#define EBPF_SAVE_S3   BIT(3)
+#define EBPF_SAVE_S4   BIT(4)
+#define EBPF_SAVE_RA   BIT(5)
+#define EBPF_SEEN_FP   BIT(6)
+#define EBPF_SEEN_TC   BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register.  The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+       /* uninitialized */
+       REG_UNKNOWN,
+       /* not known to be 32-bit compatible. */
+       REG_64BIT,
+       /* 32-bit compatible, no truncation needed for 64-bit ops. */
+       REG_64BIT_32BIT,
+       /* 32-bit compatible, need truncation for 64-bit ops. */
+       REG_32BIT,
+       /* 32-bit zero extended. */
+       REG_32BIT_ZERO_EX,
+       /* 32-bit no sign/zero extension needed. */
+       REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf:               The sk_filter
+ * @stack_size:                eBPF stack size
+ * @tmp_offset:                eBPF $sp offset to 8-byte temporary memory
+ * @idx:               Instruction index
+ * @flags:             JIT flags
+ * @offsets:           Instruction offsets
+ * @target:            Memory location for the compiled filter
+ * @reg_val_types      Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+       const struct bpf_prog *skf;
+       int stack_size;
+       int tmp_offset;
+       u32 idx;
+       u32 flags;
+       u32 *offsets;
+       u32 *target;
+       u64 *reg_val_types;
+       unsigned int long_b_conversion:1;
+       unsigned int gen_b_offsets:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+       *rvt &= ~(7ull << (reg * 3));
+       *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+                                         int index, int reg)
+{
+       return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...)                     \
+do {                                                   \
+       if ((ctx)->target != NULL) {                    \
+               u32 *p = &(ctx)->target[ctx->idx];      \
+               uasm_i_##func(&p, ##__VA_ARGS__);       \
+       }                                               \
+       (ctx)->idx++;                                   \
+} while (0)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+       unsigned long target_va, base_va;
+       unsigned int r;
+
+       if (!ctx->target)
+               return 0;
+
+       base_va = (unsigned long)ctx->target;
+       target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+       if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+               return (unsigned int)-1;
+       r = target_va & 0x0ffffffful;
+       return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+       if (!ctx->gen_b_offsets)
+               return 0;
+
+       /*
+        * We want a pc-relative branch.  tgt is the instruction offset
+        * we want to jump to.
+
+        * Branch on MIPS:
+        * I: target_offset <- sign_extend(offset)
+        * I+1: PC += target_offset (delay slot)
+        *
+        * ctx->idx currently points to the branch instruction
+        * but the offset is added to the delay slot so we need
+        * to subtract 4.
+        */
+       return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+               (ctx->idx * 4) - 4;
+}
+
+int bpf_jit_enable __read_mostly;
+
+enum which_ebpf_reg {
+       src_reg,
+       src_reg_no_fp,
+       dst_reg,
+       dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI.  We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
+                    enum which_ebpf_reg w)
+{
+       int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+               insn->src_reg : insn->dst_reg;
+
+       switch (ebpf_reg) {
+       case BPF_REG_0:
+               return MIPS_R_V0;
+       case BPF_REG_1:
+               return MIPS_R_A0;
+       case BPF_REG_2:
+               return MIPS_R_A1;
+       case BPF_REG_3:
+               return MIPS_R_A2;
+       case BPF_REG_4:
+               return MIPS_R_A3;
+       case BPF_REG_5:
+               return MIPS_R_A4;
+       case BPF_REG_6:
+               ctx->flags |= EBPF_SAVE_S0;
+               return MIPS_R_S0;
+       case BPF_REG_7:
+               ctx->flags |= EBPF_SAVE_S1;
+               return MIPS_R_S1;
+       case BPF_REG_8:
+               ctx->flags |= EBPF_SAVE_S2;
+               return MIPS_R_S2;
+       case BPF_REG_9:
+               ctx->flags |= EBPF_SAVE_S3;
+               return MIPS_R_S3;
+       case BPF_REG_10:
+               if (w == dst_reg || w == src_reg_no_fp)
+                       goto bad_reg;
+               ctx->flags |= EBPF_SEEN_FP;
+               /*
+                * Needs special handling, return something that
+                * cannot be clobbered just in case.
+                */
+               return MIPS_R_ZERO;
+       case BPF_REG_AX:
+               return MIPS_R_T4;
+       default:
+bad_reg:
+               WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+               return -EINVAL;
+       }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ *  Entry $sp ------>   +--------------------------------+
+ *                      |   $ra  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s0  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s1  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s2  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s3  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s4  (optional)              |
+ *                      +--------------------------------+
+ *                      |   tmp-storage  (if $ra saved)  |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ *                      |   BPF_REG_10 relative storage  |
+ *                      |    MAX_BPF_STACK (optional)    |
+ *                      |      .                         |
+ *                      |      .                         |
+ *                      |      .                         |
+ *     $sp -------->    +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+       int stack_adjust = 0;
+       int store_offset;
+       int locals_size;
+
+       if (ctx->flags & EBPF_SAVE_RA)
+               /*
+                * If RA we are doing a function call and may need
+                * extra 8-byte tmp area.
+                */
+               stack_adjust += 16;
+       if (ctx->flags & EBPF_SAVE_S0)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S1)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S2)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S3)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S4)
+               stack_adjust += 8;
+
+       BUILD_BUG_ON(MAX_BPF_STACK & 7);
+       locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+       stack_adjust += locals_size;
+       ctx->tmp_offset = locals_size;
+
+       ctx->stack_size = stack_adjust;
+
+       /*
+        * First instruction initializes the tail call count (TCC).
+        * On tail call we skip this instruction, and the TCC is
+        * passed in $v1 from the caller.
+        */
+       emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+       if (stack_adjust)
+               emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+       else
+               return 0;
+
+       store_offset = stack_adjust - 8;
+
+       if (ctx->flags & EBPF_SAVE_RA) {
+               emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S0) {
+               emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S1) {
+               emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S2) {
+               emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S3) {
+               emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S4) {
+               emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+
+       if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+               emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+       return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       int stack_adjust = ctx->stack_size;
+       int store_offset = stack_adjust - 8;
+       int r0 = MIPS_R_V0;
+
+       if (dest_reg == MIPS_R_RA &&
+           get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+               /* Don't let zero extended value escape. */
+               emit_instr(ctx, sll, r0, r0, 0);
+
+       if (ctx->flags & EBPF_SAVE_RA) {
+               emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S0) {
+               emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S1) {
+               emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S2) {
+               emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S3) {
+               emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S4) {
+               emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       emit_instr(ctx, jr, dest_reg);
+
+       if (stack_adjust)
+               emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+       else
+               emit_instr(ctx, nop);
+
+       return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+                          struct jit_ctx *ctx)
+{
+       if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+               emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+       } else {
+               int lower = (s16)(insn->imm & 0xffff);
+               int upper = insn->imm - lower;
+
+               emit_instr(ctx, lui, reg, upper >> 16);
+               emit_instr(ctx, addiu, reg, reg, lower);
+       }
+
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+                       int idx)
+{
+       int upper_bound, lower_bound;
+       int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+       if (dst < 0)
+               return dst;
+
+       switch (BPF_OP(insn->code)) {
+       case BPF_MOV:
+       case BPF_ADD:
+               upper_bound = S16_MAX;
+               lower_bound = S16_MIN;
+               break;
+       case BPF_SUB:
+               upper_bound = -(int)S16_MIN;
+               lower_bound = -(int)S16_MAX;
+               break;
+       case BPF_AND:
+       case BPF_OR:
+       case BPF_XOR:
+               upper_bound = 0xffff;
+               lower_bound = 0;
+               break;
+       case BPF_RSH:
+       case BPF_LSH:
+       case BPF_ARSH:
+               /* Shift amounts are truncated, no need for bounds */
+               upper_bound = S32_MAX;
+               lower_bound = S32_MIN;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /*
+        * Immediate move clobbers the register, so no sign/zero
+        * extension needed.
+        */
+       if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+           BPF_OP(insn->code) != BPF_MOV &&
+           get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+               emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+       /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+       if (BPF_CLASS(insn->code) == BPF_ALU &&
+           BPF_OP(insn->code) != BPF_LSH &&
+           BPF_OP(insn->code) != BPF_MOV &&
+           get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+               emit_instr(ctx, sll, dst, dst, 0);
+
+       if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+               /* single insn immediate case */
+               switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+               case BPF_ALU64 | BPF_MOV:
+                       emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_AND:
+               case BPF_ALU | BPF_AND:
+                       emit_instr(ctx, andi, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_OR:
+               case BPF_ALU | BPF_OR:
+                       emit_instr(ctx, ori, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_XOR:
+               case BPF_ALU | BPF_XOR:
+                       emit_instr(ctx, xori, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_ADD:
+                       emit_instr(ctx, daddiu, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_SUB:
+                       emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_RSH:
+                       emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_RSH:
+                       emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU64 | BPF_LSH:
+                       emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_LSH:
+                       emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU64 | BPF_ARSH:
+                       emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_ARSH:
+                       emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU | BPF_MOV:
+                       emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+                       break;
+               case BPF_ALU | BPF_ADD:
+                       emit_instr(ctx, addiu, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU | BPF_SUB:
+                       emit_instr(ctx, addiu, dst, dst, -insn->imm);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               /* multi insn immediate case */
+               if (BPF_OP(insn->code) == BPF_MOV) {
+                       gen_imm_to_reg(insn, dst, ctx);
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+                       case BPF_ALU64 | BPF_AND:
+                       case BPF_ALU | BPF_AND:
+                               emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_OR:
+                       case BPF_ALU | BPF_OR:
+                               emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_XOR:
+                       case BPF_ALU | BPF_XOR:
+                               emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_ADD:
+                               emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_SUB:
+                               emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU | BPF_ADD:
+                               emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU | BPF_SUB:
+                               emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void * __must_check
+ool_skb_header_pointer(const struct sk_buff *skb, int offset,
+                      int len, void *buffer)
+{
+       return skb_header_pointer(skb, offset, len, buffer);
+}
+
+static int size_to_len(const struct bpf_insn *insn)
+{
+       switch (BPF_SIZE(insn->code)) {
+       case BPF_B:
+               return 1;
+       case BPF_H:
+               return 2;
+       case BPF_W:
+               return 4;
+       case BPF_DW:
+               return 8;
+       }
+       return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+       if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+               emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+       } else if (value >= 0xffffffff80000000ull ||
+                  (value < 0x80000000 && value > 0xffff)) {
+               emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+               emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+       } else {
+               int i;
+               bool seen_part = false;
+               int needed_shift = 0;
+
+               for (i = 0; i < 4; i++) {
+                       u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+                       if (seen_part && needed_shift > 0 && (part || i == 3)) {
+                               emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+                               needed_shift = 0;
+                       }
+                       if (part) {
+                               if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+                                       emit_instr(ctx, lui, dst, (s32)(s16)part);
+                                       needed_shift = -16;
+                               } else {
+                                       emit_instr(ctx, ori, dst,
+                                                  seen_part ? dst : MIPS_R_ZERO,
+                                                  (unsigned int)part);
+                               }
+                               seen_part = true;
+                       }
+                       if (seen_part)
+                               needed_shift += 16;
+               }
+       }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+       int off, b_off;
+
+       ctx->flags |= EBPF_SEEN_TC;
+       /*
+        * if (index >= array->map.max_entries)
+        *     goto out;
+        */
+       off = offsetof(struct bpf_array, map.max_entries);
+       emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+       emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+       /*
+        * if (--TCC < 0)
+        *     goto out;
+        */
+       /* Delay slot */
+       emit_instr(ctx, daddiu, MIPS_R_T5,
+                  (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+       /*
+        * prog = array->ptrs[index];
+        * if (prog == NULL)
+        *     goto out;
+        */
+       /* Delay slot */
+       emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+       emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+       off = offsetof(struct bpf_array, ptrs);
+       emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+       /* Delay slot */
+       emit_instr(ctx, nop);
+
+       /* goto *(prog->bpf_func + 4); */
+       off = offsetof(struct bpf_prog, bpf_func);
+       emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+       /* All systems are go... propagate TCC */
+       emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+       /* Skip first instruction (TCC initialization) */
+       emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+       return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool use_bbit_insns(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON:
+       case CPU_CAVIUM_OCTEON_PLUS:
+       case CPU_CAVIUM_OCTEON2:
+       case CPU_CAVIUM_OCTEON3:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_bad_offset(int b_off)
+{
+       return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+                         int this_idx, int exit_idx)
+{
+       int src, dst, r, td, ts, mem_off, b_off;
+       bool need_swap, did_move, cmp_eq;
+       unsigned int target;
+       u64 t64;
+       s64 t64s;
+
+       switch (insn->code) {
+       case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+               r = gen_imm_insn(insn, ctx, this_idx);
+               if (r < 0)
+                       return r;
+               break;
+       case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               if (insn->imm == 1) /* Mult by 1 is a nop */
+                       break;
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+               emit_instr(ctx, mflo, dst);
+               break;
+       case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+               break;
+       case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               if (insn->imm == 1) /* Mult by 1 is a nop */
+                       break;
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, multu, dst, MIPS_R_AT);
+               emit_instr(ctx, mflo, dst);
+               break;
+       case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+               break;
+       case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+       case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) { /* Div by zero */
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+               }
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               if (insn->imm == 1) {
+                       /* div by 1 is a nop, mod by 1 is zero */
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+                       break;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, divu, dst, MIPS_R_AT);
+               if (BPF_OP(insn->code) == BPF_DIV)
+                       emit_instr(ctx, mflo, dst);
+               else
+                       emit_instr(ctx, mfhi, dst);
+               break;
+       case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+       case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) { /* Div by zero */
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+               }
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+               if (insn->imm == 1) {
+                       /* div by 1 is a nop, mod by 1 is zero */
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+                       break;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+               if (BPF_OP(insn->code) == BPF_DIV)
+                       emit_instr(ctx, mflo, dst);
+               else
+                       emit_instr(ctx, mfhi, dst);
+               break;
+       case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+               src = ebpf_to_mips_reg(ctx, insn, src_reg);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               did_move = false;
+               if (insn->src_reg == BPF_REG_10) {
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+                               did_move = true;
+                       } else {
+                               emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+                               src = MIPS_R_AT;
+                       }
+               } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                       int tmp_reg = MIPS_R_AT;
+
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               tmp_reg = dst;
+                               did_move = true;
+                       }
+                       emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+                       emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+                       src = MIPS_R_AT;
+               }
+               switch (BPF_OP(insn->code)) {
+               case BPF_MOV:
+                       if (!did_move)
+                               emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+                       break;
+               case BPF_ADD:
+                       emit_instr(ctx, daddu, dst, dst, src);
+                       break;
+               case BPF_SUB:
+                       emit_instr(ctx, dsubu, dst, dst, src);
+                       break;
+               case BPF_XOR:
+                       emit_instr(ctx, xor, dst, dst, src);
+                       break;
+               case BPF_OR:
+                       emit_instr(ctx, or, dst, dst, src);
+                       break;
+               case BPF_AND:
+                       emit_instr(ctx, and, dst, dst, src);
+                       break;
+               case BPF_MUL:
+                       emit_instr(ctx, dmultu, dst, src);
+                       emit_instr(ctx, mflo, dst);
+                       break;
+               case BPF_DIV:
+               case BPF_MOD:
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+                       emit_instr(ctx, ddivu, dst, src);
+                       if (BPF_OP(insn->code) == BPF_DIV)
+                               emit_instr(ctx, mflo, dst);
+                       else
+                               emit_instr(ctx, mfhi, dst);
+                       break;
+               case BPF_LSH:
+                       emit_instr(ctx, dsllv, dst, dst, src);
+                       break;
+               case BPF_RSH:
+                       emit_instr(ctx, dsrlv, dst, dst, src);
+                       break;
+               case BPF_ARSH:
+                       emit_instr(ctx, dsrav, dst, dst, src);
+                       break;
+               default:
+                       pr_err("ALU64_REG NOT HANDLED\n");
+                       return -EINVAL;
+               }
+               break;
+       case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               did_move = false;
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+                       int tmp_reg = MIPS_R_AT;
+
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               tmp_reg = dst;
+                               did_move = true;
+                       }
+                       /* sign extend */
+                       emit_instr(ctx, sll, tmp_reg, src, 0);
+                       src = MIPS_R_AT;
+               }
+               switch (BPF_OP(insn->code)) {
+               case BPF_MOV:
+                       if (!did_move)
+                               emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+                       break;
+               case BPF_ADD:
+                       emit_instr(ctx, addu, dst, dst, src);
+                       break;
+               case BPF_SUB:
+                       emit_instr(ctx, subu, dst, dst, src);
+                       break;
+               case BPF_XOR:
+                       emit_instr(ctx, xor, dst, dst, src);
+                       break;
+               case BPF_OR:
+                       emit_instr(ctx, or, dst, dst, src);
+                       break;
+               case BPF_AND:
+                       emit_instr(ctx, and, dst, dst, src);
+                       break;
+               case BPF_MUL:
+                       emit_instr(ctx, mul, dst, dst, src);
+                       break;
+               case BPF_DIV:
+               case BPF_MOD:
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+                       emit_instr(ctx, divu, dst, src);
+                       if (BPF_OP(insn->code) == BPF_DIV)
+                               emit_instr(ctx, mflo, dst);
+                       else
+                               emit_instr(ctx, mfhi, dst);
+                       break;
+               case BPF_LSH:
+                       emit_instr(ctx, sllv, dst, dst, src);
+                       break;
+               case BPF_RSH:
+                       emit_instr(ctx, srlv, dst, dst, src);
+                       break;
+               default:
+                       pr_err("ALU_REG NOT HANDLED\n");
+                       return -EINVAL;
+               }
+               break;
+       case BPF_JMP | BPF_EXIT:
+               if (this_idx + 1 < exit_idx) {
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, nop);
+               }
+               break;
+       case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+       case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+               cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) {
+                       src = MIPS_R_ZERO;
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       src = MIPS_R_AT;
+               }
+               goto jeq_common;
+       case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+       case BPF_JMP | BPF_JNE | BPF_X:
+       case BPF_JMP | BPF_JSGT | BPF_X:
+       case BPF_JMP | BPF_JSGE | BPF_X:
+       case BPF_JMP | BPF_JGT | BPF_X:
+       case BPF_JMP | BPF_JGE | BPF_X:
+       case BPF_JMP | BPF_JSET | BPF_X:
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (td == REG_32BIT && ts != REG_32BIT) {
+                       emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+                       src = MIPS_R_AT;
+               } else if (ts == REG_32BIT && td != REG_32BIT) {
+                       emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+                       dst = MIPS_R_AT;
+               }
+               if (BPF_OP(insn->code) == BPF_JSET) {
+                       emit_instr(ctx, and, MIPS_R_AT, dst, src);
+                       cmp_eq = false;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JSGT) {
+                       emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_instr(ctx, blez, MIPS_R_AT, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               } else if (BPF_OP(insn->code) == BPF_JSGE) {
+                       emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JGT) {
+                       /* dst or src could be AT */
+                       emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+                       emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+                       /* SP known to be non-zero, movz becomes boolean not */
+                       emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
+                       emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+                       emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JGE) {
+                       emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else { /* JNE/JEQ case */
+                       cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+               }
+jeq_common:
+               /*
+                * If the next insn is EXIT and we are jumping arround
+                * only it, invert the sense of the compare and
+                * conditionally jump to the exit.  Poor man's branch
+                * chaining.
+                */
+               if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off)) {
+                               target = j_target(ctx, exit_idx);
+                               if (target == (unsigned int)-1)
+                                       return -E2BIG;
+                               cmp_eq = !cmp_eq;
+                               b_off = 4 * 3;
+                               if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                                       ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                                       ctx->long_b_conversion = 1;
+                               }
+                       }
+
+                       if (cmp_eq)
+                               emit_instr(ctx, bne, dst, src, b_off);
+                       else
+                               emit_instr(ctx, beq, dst, src, b_off);
+                       emit_instr(ctx, nop);
+                       if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+                               emit_instr(ctx, j, target);
+                               emit_instr(ctx, nop);
+                       }
+                       return 2; /* We consumed the exit. */
+               }
+               b_off = b_imm(this_idx + insn->off + 1, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, this_idx + insn->off + 1);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+                       cmp_eq = !cmp_eq;
+                       b_off = 4 * 3;
+                       if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                               ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                               ctx->long_b_conversion = 1;
+                       }
+               }
+
+               if (cmp_eq)
+                       emit_instr(ctx, beq, dst, src, b_off);
+               else
+                       emit_instr(ctx, bne, dst, src, b_off);
+               emit_instr(ctx, nop);
+               if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+                       emit_instr(ctx, j, target);
+                       emit_instr(ctx, nop);
+               }
+               break;
+       case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+       case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+               cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+
+               if (insn->imm == 0) {
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               if (cmp_eq)
+                                       emit_instr(ctx, bltz, dst, b_off);
+                               else
+                                       emit_instr(ctx, blez, dst, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       if (cmp_eq)
+                               emit_instr(ctx, bgez, dst, b_off);
+                       else
+                               emit_instr(ctx, bgtz, dst, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               }
+               /*
+                * only "LT" compare available, so we must use imm + 1
+                * to generate "GT"
+                */
+               t64s = insn->imm + (cmp_eq ? 0 : 1);
+               if (t64s >= S16_MIN && t64s <= S16_MAX) {
+                       emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+                       src = MIPS_R_AT;
+                       dst = MIPS_R_ZERO;
+                       cmp_eq = true;
+                       goto jeq_common;
+               }
+               emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+               emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = true;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JGT | BPF_K:
+       case BPF_JMP | BPF_JGE | BPF_K:
+               cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+               /*
+                * only "LT" compare available, so we must use imm + 1
+                * to generate "GT"
+                */
+               t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
+               if (t64s >= 0 && t64s <= S16_MAX) {
+                       emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
+                       src = MIPS_R_AT;
+                       dst = MIPS_R_ZERO;
+                       cmp_eq = true;
+                       goto jeq_common;
+               }
+               emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+               emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = true;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+
+               if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               }
+               t64 = (u32)insn->imm;
+               emit_const_to_reg(ctx, MIPS_R_AT, t64);
+               emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = false;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JA:
+               /*
+                * Prefer relative branch for easier debugging, but
+                * fall back if needed.
+                */
+               b_off = b_imm(this_idx + insn->off + 1, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, this_idx + insn->off + 1);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+                       emit_instr(ctx, j, target);
+               } else {
+                       emit_instr(ctx, b, b_off);
+               }
+               emit_instr(ctx, nop);
+               break;
+       case BPF_LD | BPF_DW | BPF_IMM:
+               if (insn->src_reg != 0)
+                       return -EINVAL;
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+               emit_const_to_reg(ctx, dst, t64);
+               return 2; /* Double slot insn */
+
+       case BPF_JMP | BPF_CALL:
+               ctx->flags |= EBPF_SAVE_RA;
+               t64s = (s64)insn->imm + (s64)__bpf_call_base;
+               emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+               emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+               /* delay slot */
+               emit_instr(ctx, nop);
+               break;
+
+       case BPF_JMP | BPF_TAIL_CALL:
+               if (emit_bpf_tail_call(ctx, this_idx))
+                       return -EINVAL;
+               break;
+
+       case BPF_LD | BPF_B | BPF_ABS:
+       case BPF_LD | BPF_H | BPF_ABS:
+       case BPF_LD | BPF_W | BPF_ABS:
+       case BPF_LD | BPF_DW | BPF_ABS:
+               ctx->flags |= EBPF_SAVE_RA;
+
+               gen_imm_to_reg(insn, MIPS_R_A1, ctx);
+               emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+
+               if (insn->imm < 0) {
+                       emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
+               } else {
+                       emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+                       emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+               }
+               goto ld_skb_common;
+
+       case BPF_LD | BPF_B | BPF_IND:
+       case BPF_LD | BPF_H | BPF_IND:
+       case BPF_LD | BPF_W | BPF_IND:
+       case BPF_LD | BPF_DW | BPF_IND:
+               ctx->flags |= EBPF_SAVE_RA;
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               if (src < 0)
+                       return src;
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (ts == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, MIPS_R_A1, src, 0);
+                       src = MIPS_R_A1;
+               }
+               if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+                       emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
+               }
+               /* truncate to 32-bit int */
+               emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
+               emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+               emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
+
+               emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
+               emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+               emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+               emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
+
+ld_skb_common:
+               emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+               /* delay slot move */
+               emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
+
+               /* Check the error value */
+               b_off = b_imm(exit_idx, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, exit_idx);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+
+                       if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                               ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                               ctx->long_b_conversion = 1;
+                       }
+                       emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
+                       emit_instr(ctx, nop);
+                       emit_instr(ctx, j, target);
+                       emit_instr(ctx, nop);
+               } else {
+                       emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, nop);
+               }
+
+#ifdef __BIG_ENDIAN
+               need_swap = false;
+#else
+               need_swap = true;
+#endif
+               dst = MIPS_R_V0;
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
+                       if (need_swap)
+                               emit_instr(ctx, wsbh, dst, dst);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
+                       if (need_swap) {
+                               emit_instr(ctx, wsbh, dst, dst);
+                               emit_instr(ctx, rotr, dst, dst, 16);
+                       }
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
+                       if (need_swap) {
+                               emit_instr(ctx, dsbh, dst, dst);
+                               emit_instr(ctx, dshd, dst, dst);
+                       }
+                       break;
+               }
+
+               break;
+       case BPF_ALU | BPF_END | BPF_FROM_BE:
+       case BPF_ALU | BPF_END | BPF_FROM_LE:
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (insn->imm == 64 && td == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+               if (insn->imm != 64 &&
+                   (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+
+#ifdef __BIG_ENDIAN
+               need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+               need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+               if (insn->imm == 16) {
+                       if (need_swap)
+                               emit_instr(ctx, wsbh, dst, dst);
+                       emit_instr(ctx, andi, dst, dst, 0xffff);
+               } else if (insn->imm == 32) {
+                       if (need_swap) {
+                               emit_instr(ctx, wsbh, dst, dst);
+                               emit_instr(ctx, rotr, dst, dst, 16);
+                       }
+               } else { /* 64-bit*/
+                       if (need_swap) {
+                               emit_instr(ctx, dsbh, dst, dst);
+                               emit_instr(ctx, dshd, dst, dst);
+                       }
+               }
+               break;
+
+       case BPF_ST | BPF_B | BPF_MEM:
+       case BPF_ST | BPF_H | BPF_MEM:
+       case BPF_ST | BPF_W | BPF_MEM:
+       case BPF_ST | BPF_DW | BPF_MEM:
+               if (insn->dst_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       dst = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+                       if (dst < 0)
+                               return dst;
+                       mem_off = insn->off;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+                       break;
+               }
+               break;
+
+       case BPF_LDX | BPF_B | BPF_MEM:
+       case BPF_LDX | BPF_H | BPF_MEM:
+       case BPF_LDX | BPF_W | BPF_MEM:
+       case BPF_LDX | BPF_DW | BPF_MEM:
+               if (insn->src_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       src = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+                       if (src < 0)
+                               return src;
+                       mem_off = insn->off;
+               }
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, lbu, dst, mem_off, src);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, lhu, dst, mem_off, src);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, lw, dst, mem_off, src);
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, ld, dst, mem_off, src);
+                       break;
+               }
+               break;
+
+       case BPF_STX | BPF_B | BPF_MEM:
+       case BPF_STX | BPF_H | BPF_MEM:
+       case BPF_STX | BPF_W | BPF_MEM:
+       case BPF_STX | BPF_DW | BPF_MEM:
+       case BPF_STX | BPF_W | BPF_XADD:
+       case BPF_STX | BPF_DW | BPF_XADD:
+               if (insn->dst_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       dst = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+                       if (dst < 0)
+                               return dst;
+                       mem_off = insn->off;
+               }
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               if (src < 0)
+                       return dst;
+               if (BPF_MODE(insn->code) == BPF_XADD) {
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_W:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+                               emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+                               /*
+                                * On failure back up to LL (-4
+                                * instructions of 4 bytes each
+                                */
+                               emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+                               emit_instr(ctx, nop);
+                               break;
+                       case BPF_DW:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+                                       emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+                               emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+                               emit_instr(ctx, nop);
+                               break;
+                       }
+               } else { /* BPF_MEM */
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_B:
+                               emit_instr(ctx, sb, src, mem_off, dst);
+                               break;
+                       case BPF_H:
+                               emit_instr(ctx, sh, src, mem_off, dst);
+                               break;
+                       case BPF_W:
+                               emit_instr(ctx, sw, src, mem_off, dst);
+                               break;
+                       case BPF_DW:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+                                       emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, sd, src, mem_off, dst);
+                               break;
+                       }
+               }
+               break;
+
+       default:
+               pr_err("NOT HANDLED %d - (%02x)\n",
+                      this_idx, (unsigned int)insn->code);
+               return -EINVAL;
+       }
+       return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       const struct bpf_insn *insn;
+       int i, r;
+
+       for (i = 0; i < prog->len; ) {
+               insn = prog->insnsi + i;
+               if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+                       /* dead instruction, don't emit it. */
+                       i++;
+                       continue;
+               }
+
+               if (ctx->target == NULL)
+                       ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+               r = build_one_insn(insn, ctx, i, prog->len);
+               if (r < 0)
+                       return r;
+               i += r;
+       }
+       /* epilogue offset */
+       if (ctx->target == NULL)
+               ctx->offsets[i] = ctx->idx * 4;
+
+       /*
+        * All exits have an offset of the epilogue, some offsets may
+        * not have been set due to banch-around threading, so set
+        * them now.
+        */
+       if (ctx->target == NULL)
+               for (i = 0; i < prog->len; i++) {
+                       insn = prog->insnsi + i;
+                       if (insn->code == (BPF_JMP | BPF_EXIT))
+                               ctx->offsets[i] = ctx->idx * 4;
+               }
+       return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+                                  int start_idx, bool follow_taken)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       const struct bpf_insn *insn;
+       u64 exit_rvt = initial_rvt;
+       u64 *rvt = ctx->reg_val_types;
+       int idx;
+       int reg;
+
+       for (idx = start_idx; idx < prog->len; idx++) {
+               rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+               insn = prog->insnsi + idx;
+               switch (BPF_CLASS(insn->code)) {
+               case BPF_ALU:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD:
+                       case BPF_SUB:
+                       case BPF_MUL:
+                       case BPF_DIV:
+                       case BPF_OR:
+                       case BPF_AND:
+                       case BPF_LSH:
+                       case BPF_RSH:
+                       case BPF_NEG:
+                       case BPF_MOD:
+                       case BPF_XOR:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       case BPF_MOV:
+                               if (BPF_SRC(insn->code)) {
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               } else {
+                                       /* IMM to REG move*/
+                                       if (insn->imm >= 0)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               }
+                               break;
+                       case BPF_END:
+                               if (insn->imm == 64)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               else if (insn->imm == 32)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               else /* insn->imm == 16 */
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_ALU64:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_MOV:
+                               if (BPF_SRC(insn->code)) {
+                                       /* REG to REG move*/
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               } else {
+                                       /* IMM to REG move*/
+                                       if (insn->imm >= 0)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+                               }
+                               break;
+                       default:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_LD:
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_DW:
+                               if (BPF_MODE(insn->code) == BPF_IMM) {
+                                       s64 val;
+
+                                       val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+                                       if (val > 0 && val <= S32_MAX)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else if (val >= S32_MIN && val <= S32_MAX)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                                       rvt[idx] |= RVT_DONE;
+                                       idx++;
+                               } else {
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               }
+                               break;
+                       case BPF_B:
+                       case BPF_H:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       case BPF_W:
+                               if (BPF_MODE(insn->code) == BPF_IMM)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg,
+                                                        insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+                               else
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_LDX:
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_DW:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               break;
+                       case BPF_B:
+                       case BPF_H:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       case BPF_W:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_JMP:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_EXIT:
+                               rvt[idx] = RVT_DONE | exit_rvt;
+                               rvt[prog->len] = exit_rvt;
+                               return idx;
+                       case BPF_JA:
+                               rvt[idx] |= RVT_DONE;
+                               idx += insn->off;
+                               break;
+                       case BPF_JEQ:
+                       case BPF_JGT:
+                       case BPF_JGE:
+                       case BPF_JSET:
+                       case BPF_JNE:
+                       case BPF_JSGT:
+                       case BPF_JSGE:
+                               if (follow_taken) {
+                                       rvt[idx] |= RVT_BRANCH_TAKEN;
+                                       idx += insn->off;
+                                       follow_taken = false;
+                               } else {
+                                       rvt[idx] |= RVT_FALL_THROUGH;
+                               }
+                               break;
+                       case BPF_CALL:
+                               set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+                               /* Upon call return, argument registers are clobbered. */
+                               for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+                                       set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+                               rvt[idx] |= RVT_DONE;
+                               break;
+                       default:
+                               WARN(1, "Unhandled BPF_JMP case.\n");
+                               rvt[idx] |= RVT_DONE;
+                               break;
+                       }
+                       break;
+               default:
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               }
+       }
+       return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn.  This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       u64 exit_rvt;
+       int reg;
+       int i;
+
+       /*
+        * 11 registers * 3 bits/reg leaves top bits free for other
+        * uses.  Bit-62..63 used to see if we have visited an insn.
+        */
+       exit_rvt = 0;
+
+       /* Upon entry, argument registers are 64-bit. */
+       for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+               set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+       /*
+        * First follow all conditional branches on the fall-through
+        * edge of control flow..
+        */
+       reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+       /*
+        * Then repeatedly find the first conditional branch where
+        * both edges of control flow have not been taken, and follow
+        * the branch taken edge.  We will end up restarting the
+        * search once per conditional branch insn.
+        */
+       for (i = 0; i < prog->len; i++) {
+               u64 rvt = ctx->reg_val_types[i];
+
+               if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+                   (rvt & RVT_VISITED_MASK) == 0)
+                       continue;
+               if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+                       reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+               } else { /* RVT_BRANCH_TAKEN */
+                       WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+                       reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+               }
+               goto restart_search;
+       }
+       /*
+        * Eventually all conditional branches have been followed on
+        * both branches and we are done.  Any insn that has not been
+        * visited at this point is dead.
+        */
+
+       return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+       u32 *p;
+
+       /* We are guaranteed to have aligned memory. */
+       for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+               uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+       struct bpf_prog *orig_prog = prog;
+       bool tmp_blinded = false;
+       struct bpf_prog *tmp;
+       struct bpf_binary_header *header = NULL;
+       struct jit_ctx ctx;
+       unsigned int image_size;
+       u8 *image_ptr;
+
+       if (!bpf_jit_enable || !cpu_has_mips64r2)
+               return prog;
+
+       tmp = bpf_jit_blind_constants(prog);
+       /* If blinding was requested and we failed during blinding,
+        * we must fall back to the interpreter.
+        */
+       if (IS_ERR(tmp))
+               return orig_prog;
+       if (tmp != prog) {
+               tmp_blinded = true;
+               prog = tmp;
+       }
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+       if (ctx.offsets == NULL)
+               goto out_err;
+
+       ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+       if (ctx.reg_val_types == NULL)
+               goto out_err;
+
+       ctx.skf = prog;
+
+       if (reg_val_propagate(&ctx))
+               goto out_err;
+
+       /*
+        * First pass discovers used resources and instruction offsets
+        * assuming short branches are used.
+        */
+       if (build_int_body(&ctx))
+               goto out_err;
+
+       /*
+        * If no calls are made (EBPF_SAVE_RA), then tail call count
+        * in $v1, else we must save in n$s4.
+        */
+       if (ctx.flags & EBPF_SEEN_TC) {
+               if (ctx.flags & EBPF_SAVE_RA)
+                       ctx.flags |= EBPF_SAVE_S4;
+               else
+                       ctx.flags |= EBPF_TCC_IN_V1;
+       }
+
+       /*
+        * Second pass generates offsets, if any branches are out of
+        * range a jump-around long sequence is generated, and we have
+        * to try again from the beginning to generate the new
+        * offsets.  This is done until no additional conversions are
+        * necessary.
+        */
+       do {
+               ctx.idx = 0;
+               ctx.gen_b_offsets = 1;
+               ctx.long_b_conversion = 0;
+               if (gen_int_prologue(&ctx))
+                       goto out_err;
+               if (build_int_body(&ctx))
+                       goto out_err;
+               if (build_int_epilogue(&ctx, MIPS_R_RA))
+                       goto out_err;
+       } while (ctx.long_b_conversion);
+
+       image_size = 4 * ctx.idx;
+
+       header = bpf_jit_binary_alloc(image_size, &image_ptr,
+                                     sizeof(u32), jit_fill_hole);
+       if (header == NULL)
+               goto out_err;
+
+       ctx.target = (u32 *)image_ptr;
+
+       /* Third pass generates the code */
+       ctx.idx = 0;
+       if (gen_int_prologue(&ctx))
+               goto out_err;
+       if (build_int_body(&ctx))
+               goto out_err;
+       if (build_int_epilogue(&ctx, MIPS_R_RA))
+               goto out_err;
+
+       /* Update the icache */
+       flush_icache_range((unsigned long)ctx.target,
+                          (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+
+       if (bpf_jit_enable > 1)
+               /* Dump JIT code */
+               bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+       bpf_jit_binary_lock_ro(header);
+       prog->bpf_func = (void *)ctx.target;
+       prog->jited = 1;
+       prog->jited_len = image_size;
+out_normal:
+       if (tmp_blinded)
+               bpf_jit_prog_release_other(prog, prog == orig_prog ?
+                                          tmp : orig_prog);
+       kfree(ctx.offsets);
+       kfree(ctx.reg_val_types);
+
+       return prog;
+
+out_err:
+       prog = orig_prog;
+       if (header)
+               bpf_jit_binary_free(header);
+       goto out_normal;
+}
index 7317b3108a88859a91523c45f1e52c08cb22fdc4..2eb8ff0d6fca443543c32ac80ff690b4b67be1ef 100644 (file)
@@ -47,10 +47,9 @@ struct mmu_table_batch {
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
-                                 struct mm_struct *mm,
-                                 unsigned long start,
-                                 unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
-                                 unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+       }
+
        tlb_flush_mmu(tlb);
 }
 
index 01c6fbc3e85b62fdec83bacea0f76a88126bfe84..1803797fc885cf799337b5d61f30ae726628b8d6 100644 (file)
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
                insn_count = bpf_jit_insn(jit, fp, i);
                if (insn_count < 0)
                        return -1;
-               jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+               /* Next instruction address */
+               jit->addrs[i + insn_count] = jit->prg;
        }
        bpf_jit_epilogue(jit);
 
index 46e0d635e36f711aff9a88c45955905d7fbf3cc2..51a8bc967e75f1e3c96a70783e9da439310edbcb 100644 (file)
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
-       if (tlb->fullmm)
+       if (tlb->fullmm || force)
                flush_tlb_mm(tlb->mm);
 
        /* keep the page table cache within bounds */
index 1d8321c827a8821bb4e9f4989eb883cd761370db..1b1286d0506910c0f9a92ab6af14e272dd008d61 100644 (file)
 #define SUN4V_CHIP_NIAGARA5    0x05
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
+#define SUN4V_CHIP_SPARC_M8    0x08
 #define SUN4V_CHIP_SPARC64X    0x8a
 #define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
+/*
+ * The following CPU_ID_xxx constants are used
+ * to identify the CPU type in the setup phase
+ * (see head_64.S)
+ */
+#define CPU_ID_NIAGARA1                ('1')
+#define CPU_ID_NIAGARA2                ('2')
+#define CPU_ID_NIAGARA3                ('3')
+#define CPU_ID_NIAGARA4                ('4')
+#define CPU_ID_NIAGARA5                ('5')
+#define CPU_ID_M6              ('6')
+#define CPU_ID_M7              ('7')
+#define CPU_ID_M8              ('8')
+#define CPU_ID_SONOMA1         ('N')
+
 #ifndef __ASSEMBLY__
 
 enum ultra_tlb_layout {
index 493e023a468a919c61d77451e43e0a4a2e414bbe..ef4f18f7a67402ed8baceb2ea05ee7f6368cc404 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_M8:
+               sparc_cpu_type = "SPARC-M8";
+               sparc_fpu_type = "SPARC-M8 integrated FPU";
+               sparc_pmu_type = "sparc-m8";
+               break;
+
        case SUN4V_CHIP_SPARC_SN:
                sparc_cpu_type = "SPARC-SN";
                sparc_fpu_type = "SPARC-SN integrated FPU";
index 45c820e1cba5d949ff936f15392ca3c0c8578a34..90d550bbfeefe484f1560940f111235f26332d7a 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
index 41a4073286671eff51f275bfca4ae6d9d01db74d..78e0211753d28f14f955af865704248b1e5daf24 100644 (file)
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
         nop
 
 70:    ldub    [%g1 + 7], %g2
-       cmp     %g2, '3'
+       cmp     %g2, CPU_ID_NIAGARA3
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA3, %g4
-       cmp     %g2, '4'
+       cmp     %g2, CPU_ID_NIAGARA4
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA4, %g4
-       cmp     %g2, '5'
+       cmp     %g2, CPU_ID_NIAGARA5
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA5, %g4
-       cmp     %g2, '6'
+       cmp     %g2, CPU_ID_M6
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M6, %g4
-       cmp     %g2, '7'
+       cmp     %g2, CPU_ID_M7
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
-       cmp     %g2, 'N'
+       cmp     %g2, CPU_ID_M8
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_M8, %g4
+       cmp     %g2, CPU_ID_SONOMA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
 91:    sethi   %hi(prom_cpu_compatible), %g1
        or      %g1, %lo(prom_cpu_compatible), %g1
        ldub    [%g1 + 17], %g2
-       cmp     %g2, '1'
+       cmp     %g2, CPU_ID_NIAGARA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA1, %g4
-       cmp     %g2, '2'
+       cmp     %g2, CPU_ID_NIAGARA2
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA2, %g4
        
@@ -600,6 +603,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_M8
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_SN
index 4d9c3e13c15056b5d60e7ccd266b36cfe29d2c00..150ee7d4b059a69e174dff7c7d16ff906f73e1ed 100644 (file)
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
-           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
+
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
+       case SUN4V_CHIP_SPARC_SN:
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
+               break;
+       default:
+               break;
+       }
 
        sun4v_hvapi_init();
 }
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
index fed73f14aa49befee59b93b0fcab02f65f7e10d2..afa0099f374852e0cf093088d942512008a45a68 100644 (file)
@@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
                        break;
                case SUN4V_CHIP_SPARC_M7:
                case SUN4V_CHIP_SPARC_SN:
-               default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
                        sparc64_va_hole_bottom = 0x0008000000000000UL;
                        max_phys_bits = 49;
                        break;
+               case SUN4V_CHIP_SPARC_M8:
+               default:
+                       /* M8 and later support 54-bit virtual addresses.
+                        * However, restricting M8 and above VA bits to 53
+                        * as 4-level page table cannot support more than
+                        * 53 VA bits.
+                        */
+                       sparc64_va_hole_top =    0xfff0000000000000UL;
+                       sparc64_va_hole_bottom = 0x0010000000000000UL;
+                       max_phys_bits = 51;
+                       break;
                }
        }
 
@@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
@@ -2313,6 +2324,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
index 600a2e9bfee2feea2a6dbc8b91d2a5a872d9d8d3..344d95619d0334659e6f4a9f3a5bff70ae95f67c 100644 (file)
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+               tlb->need_flush = 1;
+       }
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index 2d716ebc5a5e90d62c3759e01a512e0ff6ab9de4..dff7cc39437caba214fac506f57009837f2223c8 100644 (file)
@@ -1,5 +1,6 @@
 generic-y += bug.h
 generic-y += clkdev.h
+generic-y += device.h
 generic-y += div64.h
 generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
@@ -17,6 +18,7 @@ generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += param.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += rwsem.h
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
deleted file mode 100644 (file)
index 1deeb8e..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#ifndef _ASM_XTENSA_DEVICE_H
-#define _ASM_XTENSA_DEVICE_H
-
-struct dev_archdata {
-};
-
-struct pdev_archdata {
-};
-
-#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h
deleted file mode 100644 (file)
index 0a70e78..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/param.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _XTENSA_PARAM_H
-#define _XTENSA_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ            CONFIG_HZ       /* internal timer frequency */
-# define USER_HZ       100             /* for user interfaces in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ)      /* frequnzy at which times() counts */
-#endif /* _XTENSA_PARAM_H */
index d159e9b9c01837ba5aa9e77d50c3c14e475ce367..672391003e40fac4f814fe5b6aa1dc04e791c81b 100644 (file)
@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
 }
 EXPORT_SYMBOL(__sync_fetch_and_or_4);
 
-#ifdef CONFIG_NET
 /*
  * Networking support
  */
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif /* CONFIG_NET */
 
 /*
  * Architecture-specific symbols
index 1a804a2f9a5be6212c57febc01f6d28f47b8c91a..3c75c4e597da8f086f65de51201e0d37d6672733 100644 (file)
@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
        clear_page_alias(kvaddr, paddr);
        preempt_enable();
 }
+EXPORT_SYMBOL(clear_user_highpage);
 
 void copy_user_highpage(struct page *dst, struct page *src,
                        unsigned long vaddr, struct vm_area_struct *vma)
@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
        copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
        preempt_enable();
 }
-
-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(copy_user_highpage);
 
 /*
  * Any time the kernel writes to a user page cache page, or it is about to
@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
 
        /* There shouldn't be an entry in the cache for this page anymore. */
 }
-
+EXPORT_SYMBOL(flush_dcache_page);
 
 /*
  * For now, flush the whole cache. FIXME??
@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
        __flush_invalidate_dcache_all();
        __invalidate_icache_all();
 }
+EXPORT_SYMBOL(local_flush_cache_range);
 
 /* 
  * Remove any entry in the cache for this page. 
@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
        __flush_invalidate_dcache_page_alias(virt, phys);
        __invalidate_icache_page_alias(virt, phys);
 }
+EXPORT_SYMBOL(local_flush_cache_page);
 
-#endif
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
 
 void
 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 
        flush_tlb_page(vma, addr);
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
        if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
                unsigned long phys = page_to_phys(page);
@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
  * flush_dcache_page() on the page.
  */
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                unsigned long vaddr, void *dst, const void *src,
index 63e771ab56d80ade8cd5b5e8ccd86f2fac5f0573..859f0a8c97c8a1bf58b6ed4dc016c0ce0fb355ad 100644 (file)
@@ -71,17 +71,29 @@ struct bfq_service_tree {
  *
  * bfq_sched_data is the basic scheduler queue.  It supports three
  * ioprio_classes, and can be used either as a toplevel queue or as an
- * intermediate queue on a hierarchical setup.  @next_in_service
- * points to the active entity of the sched_data service trees that
- * will be scheduled next. It is used to reduce the number of steps
- * needed for each hierarchical-schedule update.
+ * intermediate queue in a hierarchical setup.
  *
  * The supported ioprio_classes are the same as in CFQ, in descending
  * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
  * Requests from higher priority queues are served before all the
  * requests from lower priority queues; among requests of the same
  * queue requests are served according to B-WF2Q+.
- * All the fields are protected by the queue lock of the containing bfqd.
+ *
+ * The schedule is implemented by the service trees, plus the field
+ * @next_in_service, which points to the entity on the active trees
+ * that will be served next, if 1) no changes in the schedule occurs
+ * before the current in-service entity is expired, 2) the in-service
+ * queue becomes idle when it expires, and 3) if the entity pointed by
+ * in_service_entity is not a queue, then the in-service child entity
+ * of the entity pointed by in_service_entity becomes idle on
+ * expiration. This peculiar definition allows for the following
+ * optimization, not yet exploited: while a given entity is still in
+ * service, we already know which is the best candidate for next
+ * service among the other active entitities in the same parent
+ * entity. We can then quickly compare the timestamps of the
+ * in-service entity with those of such best candidate.
+ *
+ * All fields are protected by the lock of the containing bfqd.
  */
 struct bfq_sched_data {
        /* entity in service */
index 979f8f21b7e2b17268b2db3c9510c333fccbe10f..911aa7431dbeb08d6db591c1a0f577bd3af56e22 100644 (file)
@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
 
 /*
  * This function tells whether entity stops being a candidate for next
- * service, according to the following logic.
+ * service, according to the restrictive definition of the field
+ * next_in_service. In particular, this function is invoked for an
+ * entity that is about to be set in service.
  *
- * This function is invoked for an entity that is about to be set in
- * service. If such an entity is a queue, then the entity is no longer
- * a candidate for next service (i.e, a candidate entity to serve
- * after the in-service entity is expired). The function then returns
- * true.
+ * If entity is a queue, then the entity is no longer a candidate for
+ * next service according to the that definition, because entity is
+ * about to become the in-service queue. This function then returns
+ * true if entity is a queue.
  *
- * In contrast, the entity could stil be a candidate for next service
- * if it is not a queue, and has more than one child. In fact, even if
- * one of its children is about to be set in service, other children
- * may still be the next to serve. As a consequence, a non-queue
- * entity is not a candidate for next-service only if it has only one
- * child. And only if this condition holds, then the function returns
- * true for a non-queue entity.
+ * In contrast, entity could still be a candidate for next service if
+ * it is not a queue, and has more than one active child. In fact,
+ * even if one of its children is about to be set in service, other
+ * active children may still be the next to serve, for the parent
+ * entity, even according to the above definition. As a consequence, a
+ * non-queue entity is not a candidate for next-service only if it has
+ * only one active child. And only if this condition holds, then this
+ * function returns true for a non-queue entity.
  */
 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
 {
@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
 
        bfqg = container_of(entity, struct bfq_group, entity);
 
+       /*
+        * The field active_entities does not always contain the
+        * actual number of active children entities: it happens to
+        * not account for the in-service entity in case the latter is
+        * removed from its active tree (which may get done after
+        * invoking the function bfq_no_longer_next_in_service in
+        * bfq_get_next_queue). Fortunately, here, i.e., while
+        * bfq_no_longer_next_in_service is not yet completed in
+        * bfq_get_next_queue, bfq_active_extract has not yet been
+        * invoked, and thus active_entities still coincides with the
+        * actual number of active entities.
+        */
        if (bfqg->active_entities == 1)
                return true;
 
@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
  * one of its children receives a new request.
  *
  * Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possible extracting it
+ * inserts entity into its active tree, ater possibly extracting it
  * from its idle tree.
  */
 static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
                entity->start = entity->finish;
                /*
                 * In addition, if the entity had more than one child
-                * when set in service, then was not extracted from
+                * when set in service, then it was not extracted from
                 * the active tree. This implies that the position of
                 * the entity in the active tree may need to be
                 * changed now, because we have just updated the start
@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
                 * time in a moment (the requeueing is then, more
                 * precisely, a repositioning in this case). To
                 * implement this repositioning, we: 1) dequeue the
-                * entity here, 2) update the finish time and
-                * requeue the entity according to the new
-                * timestamps below.
+                * entity here, 2) update the finish time and requeue
+                * the entity according to the new timestamps below.
                 */
                if (entity->tree)
                        bfq_active_extract(st, entity);
@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
 
 
 /**
- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
- *                      and activate, requeue or reposition all ancestors
- *                      for which such an update becomes necessary.
+ * bfq_activate_requeue_entity - activate or requeue an entity representing a
+ *                              bfq_queue, and activate, requeue or reposition
+ *                              all ancestors for which such an update becomes
+ *                              necessary.
  * @entity: the entity to activate.
  * @non_blocking_wait_rq: true if this entity was waiting for a request
  * @requeue: true if this is a requeue, which implies that bfqq is
@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
  * @ins_into_idle_tree: if false, the entity will not be put into the
  *                     idle tree.
  *
- * Deactivates an entity, independently from its previous state.  Must
+ * Deactivates an entity, independently of its previous state.  Must
  * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it on the idle
+ * from that tree, and if necessary and allowed, puts it into the idle
  * tree.
  */
 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
        st = bfq_entity_service_tree(entity);
        is_in_service = entity == sd->in_service_entity;
 
-       if (is_in_service)
+       if (is_in_service) {
                bfq_calc_finish(entity, entity->service);
+               sd->in_service_entity = NULL;
+       }
 
        if (entity->tree == &st->active)
                bfq_active_extract(st, entity);
@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
 /**
  * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
  * @entity: the entity to deactivate.
- * @ins_into_idle_tree: true if the entity can be put on the idle tree
+ * @ins_into_idle_tree: true if the entity can be put into the idle tree
  */
 static void bfq_deactivate_entity(struct bfq_entity *entity,
                                  bool ins_into_idle_tree,
@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
                         */
                        bfq_update_next_in_service(sd, NULL);
 
-               if (sd->next_in_service)
+               if (sd->next_in_service || sd->in_service_entity) {
                        /*
-                        * The parent entity is still backlogged,
-                        * because next_in_service is not NULL. So, no
-                        * further upwards deactivation must be
-                        * performed.  Yet, next_in_service has
-                        * changed.  Then the schedule does need to be
-                        * updated upwards.
+                        * The parent entity is still active, because
+                        * either next_in_service or in_service_entity
+                        * is not NULL. So, no further upwards
+                        * deactivation must be performed.  Yet,
+                        * next_in_service has changed. Then the
+                        * schedule does need to be updated upwards.
+                        *
+                        * NOTE If in_service_entity is not NULL, then
+                        * next_in_service may happen to be NULL,
+                        * although the parent entity is evidently
+                        * active. This happens if 1) the entity
+                        * pointed by in_service_entity is the only
+                        * active entity in the parent entity, and 2)
+                        * according to the definition of
+                        * next_in_service, the in_service_entity
+                        * cannot be considered as
+                        * next_in_service. See the comments on the
+                        * definition of next_in_service for details.
                         */
                        break;
+               }
 
                /*
                 * If we get here, then the parent is no more
@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
 
                /*
                 * If entity is no longer a candidate for next
-                * service, then we extract it from its active tree,
-                * for the following reason. To further boost the
-                * throughput in some special case, BFQ needs to know
-                * which is the next candidate entity to serve, while
-                * there is already an entity in service. In this
-                * respect, to make it easy to compute/update the next
-                * candidate entity to serve after the current
-                * candidate has been set in service, there is a case
-                * where it is necessary to extract the current
-                * candidate from its service tree. Such a case is
-                * when the entity just set in service cannot be also
-                * a candidate for next service. Details about when
-                * this conditions holds are reported in the comments
-                * on the function bfq_no_longer_next_in_service()
-                * invoked below.
+                * service, then it must be extracted from its active
+                * tree, so as to make sure that it won't be
+                * considered when computing next_in_service. See the
+                * comments on the function
+                * bfq_no_longer_next_in_service() for details.
                 */
                if (bfq_no_longer_next_in_service(entity))
                        bfq_active_extract(bfq_entity_service_tree(entity),
                                           entity);
 
                /*
-                * For the same reason why we may have just extracted
-                * entity from its active tree, we may need to update
-                * next_in_service for the sched_data of entity too,
-                * regardless of whether entity has been extracted.
-                * In fact, even if entity has not been extracted, a
-                * descendant entity may get extracted. Such an event
-                * would cause a change in next_in_service for the
-                * level of the descendant entity, and thus possibly
-                * back to upper levels.
+                * Even if entity is not to be extracted according to
+                * the above check, a descendant entity may get
+                * extracted in one of the next iterations of this
+                * loop. Such an event could cause a change in
+                * next_in_service for the level of the descendant
+                * entity, and thus possibly back to this level.
                 *
-                * We cannot perform the resulting needed update
-                * before the end of this loop, because, to know which
-                * is the correct next-to-serve candidate entity for
-                * each level, we need first to find the leaf entity
-                * to set in service. In fact, only after we know
-                * which is the next-to-serve leaf entity, we can
-                * discover whether the parent entity of the leaf
-                * entity becomes the next-to-serve, and so on.
+                * However, we cannot perform the resulting needed
+                * update of next_in_service for this level before the
+                * end of the whole loop, because, to know which is
+                * the correct next-to-serve candidate entity for each
+                * level, we need first to find the leaf entity to set
+                * in service. In fact, only after we know which is
+                * the next-to-serve leaf entity, we can discover
+                * whether the parent entity of the leaf entity
+                * becomes the next-to-serve, and so on.
                 */
-
        }
 
        bfqq = bfq_entity_to_bfqq(entity);
index 041f7b7fa0d6def444e9349b6cf748afc8e89b2d..211ef367345f270f300cb5cdcb9a63c0a50d2420 100644 (file)
@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        struct elevator_queue *e = q->elevator;
        struct request *rq;
        unsigned int tag;
+       struct blk_mq_ctx *local_ctx = NULL;
 
        blk_queue_enter_live(q);
        data->q = q;
        if (likely(!data->ctx))
-               data->ctx = blk_mq_get_ctx(q);
+               data->ctx = local_ctx = blk_mq_get_ctx(q);
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
        if (op & REQ_NOWAIT)
@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
        tag = blk_mq_get_tag(data);
        if (tag == BLK_MQ_TAG_FAIL) {
+               if (local_ctx) {
+                       blk_mq_put_ctx(local_ctx);
+                       data->ctx = NULL;
+               }
                blk_queue_exit(q);
                return NULL;
        }
@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
-       blk_mq_put_ctx(alloc_data.ctx);
-       blk_queue_exit(q);
-
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
+       blk_mq_put_ctx(alloc_data.ctx);
+       blk_queue_exit(q);
+
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
-       blk_queue_exit(q);
-
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
+       blk_queue_exit(q);
+
        return rq;
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
index 6b16ead1da5871abcef5b2233733f281158596a8..ad9749463d4fa9a382afa7f24587bbbe3a2efcc9 100644 (file)
@@ -875,6 +875,56 @@ static void print_version(void)
                printk(KERN_INFO "%s", version);
 }
 
+struct vdc_check_port_data {
+       int     dev_no;
+       char    *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+       struct vio_dev *vdev = to_vio_dev(dev);
+       struct vdc_check_port_data *port_data;
+
+       port_data = (struct vdc_check_port_data *)arg;
+
+       if ((vdev->dev_no == port_data->dev_no) &&
+           (!(strcmp((char *)&vdev->type, port_data->type))) &&
+               dev_get_drvdata(dev)) {
+               /* This device has already been configured
+                * by vdc_port_probe()
+                */
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+       struct vdc_check_port_data port_data;
+       struct device *dev;
+
+       port_data.dev_no = vdev->dev_no;
+       port_data.type = (char *)&vdev->type;
+
+       dev = device_find_child(vdev->dev.parent, &port_data,
+                               vdc_device_probed);
+
+       if (dev)
+               return true;
+
+       return false;
+}
+
 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 {
        struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto err_out_release_mdesc;
        }
 
+       /* Check if this device is part of an mpgroup */
+       if (vdc_port_mpgroup_check(vdev)) {
+               printk(KERN_WARNING
+                       "VIO: Ignoring extra vdisk port %s",
+                       dev_name(&vdev->dev));
+               goto err_out_release_mdesc;
+       }
+
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        err = -ENOMEM;
        if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        if (err)
                goto err_out_free_tx_ring;
 
+       /* Note that the device driver_data is used to determine
+        * whether the port has been probed.
+        */
        dev_set_drvdata(&vdev->dev, port);
 
        mdesc_release(hp);
index 856d5dc02451d44b59695127994017877cd02b38..3b1b6340ba13a2977ffd0a13424ce95322f67f0e 100644 (file)
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct zram *zram = dev_to_zram(dev);
-       char compressor[CRYPTO_MAX_ALG_NAME];
+       char compressor[ARRAY_SIZE(zram->compressor)];
        size_t sz;
 
        strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                return -EBUSY;
        }
 
-       strlcpy(zram->compressor, compressor, sizeof(compressor));
+       strcpy(zram->compressor, compressor);
        up_write(&zram->init_lock);
        return len;
 }
index afa3ce7d3e729a1ad1485d129aa1d26646292f74..8ad92707e45f23b890203d5c5468d47473acf636 100644 (file)
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
        print_once = true;
 #endif
-       pr_notice("random: %s called from %pF with crng_init=%d\n",
+       pr_notice("random: %s called from %pS with crng_init=%d\n",
                  func_name, caller, crng_init);
 }
 
index 8527a5899a2f7b6a3245a4a52ca4c0283b2f4666..3f819399cd95519a9956ed1d3ecba76fa2aa62b4 100644 (file)
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
        if (ret)
                return ret;
 
-       memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
-       memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
-
-       for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+       for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
                if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
                    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
                        ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
                }
        }
 
+       memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+       memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
        return 0;
 }
 
index 1006b230b236f1c977d1c1a7d9bf32a268b7e263..65fa29591d21641fd1bd4e4484d8daeef56f9bdb 100644 (file)
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
 
 config I2C_VERSATILE
        tristate "ARM Versatile/Realview I2C bus support"
-       depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+       depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
        select I2C_ALGOBIT
        help
          Say yes if you want to support the I2C serial bus on ARMs Versatile
index 2ea6d0d25a01a33069bce293ab6858947a0cb01f..143a8fd582b4aeb905ea25b416261a5c1f44a6e9 100644 (file)
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        }
 
        acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+       /* Some broken DSTDs use 1MiHz instead of 1MHz */
+       if (acpi_speed == 1048576)
+               acpi_speed = 1000000;
        /*
         * Find bus speed from the "clock-frequency" device property, ACPI
         * or by using fast mode if neither is set.
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        if (dev->clk_freq != 100000 && dev->clk_freq != 400000
            && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
                dev_err(&pdev->dev,
-                       "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+                       "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+                       dev->clk_freq);
                ret = -EINVAL;
                goto exit_reset;
        }
index 4842ec3a5451ed479446fc13352405aca45697d2..a9126b3cda61bc95f6a9d1282821ab7552484534 100644 (file)
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
                dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
 }
 
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       if (!(client && matches))
+               return NULL;
+
+       return acpi_match_device(matches, &client->dev);
+}
+
 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
                                           void *data, void **return_value)
 {
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
 
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
 {
        struct i2c_adapter *adapter = i2c_verify_adapter(dev);
 
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
        return ACPI_HANDLE(dev) == (acpi_handle)data;
 }
 
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
 {
        return ACPI_COMPANION(dev) == data;
 }
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
        struct device *dev;
 
        dev = bus_find_device(&i2c_bus_type, NULL, handle,
-                             i2c_acpi_match_adapter);
+                             i2c_acpi_find_match_adapter);
        return dev ? i2c_verify_adapter(dev) : NULL;
 }
 
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+       dev = bus_find_device(&i2c_bus_type, NULL, adev,
+                             i2c_acpi_find_match_device);
        return dev ? i2c_verify_client(dev) : NULL;
 }
 
index c89dac7fd2e7b793217119f2ccee849cf75ebcfe..12822a4b8f8f09b5c080f7338a89e0ea00cbb4f2 100644 (file)
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
         * Tree match table entry is supplied for the probing device.
         */
        if (!driver->id_table &&
+           !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
            !i2c_of_match_device(dev->driver->of_match_table, client))
                return -ENODEV;
 
index 3b63f5e5b89cbda662a580c387bfa2d23e2ebcef..3d3d9bf02101bddf06fc6597f107cc3ac8e3beb8 100644 (file)
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
 int i2c_check_7bit_addr_validity_strict(unsigned short addr);
 
 #ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client);
 void i2c_acpi_register_devices(struct i2c_adapter *adap);
 #else /* CONFIG_ACPI */
 static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       return NULL;
+}
 #endif /* CONFIG_ACPI */
 extern struct notifier_block i2c_acpi_notifier;
 
index 2c64d0e0740f0db0c4427af9d6602bc8aaa0ea24..17121329bb793a615e8969a15327e3f07035cdbb 100644 (file)
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
          different sets of pins at run-time.
 
          This driver can also be built as a module. If so, the module will be
-         called pinctrl-i2cmux.
+         called i2c-mux-pinctrl.
 
 config I2C_MUX_REG
        tristate "Register-based I2C multiplexer"
index 01236cef7bfb1affe07e4214cf6d8baf6ca2a2a1..437522ca97b4b62fd79b8e84fa643ff9c4751ccd 100644 (file)
@@ -61,6 +61,7 @@ struct addr_req {
        void (*callback)(int status, struct sockaddr *src_addr,
                         struct rdma_dev_addr *addr, void *context);
        unsigned long timeout;
+       struct delayed_work work;
        int status;
        u32 seq;
 };
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
 }
 EXPORT_SYMBOL(rdma_translate_ip);
 
-static void set_timeout(unsigned long time)
+static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
 {
        unsigned long delay;
 
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
        if ((long)delay < 0)
                delay = 0;
 
-       mod_delayed_work(addr_wq, &work, delay);
+       mod_delayed_work(addr_wq, delayed_work, delay);
 }
 
 static void queue_req(struct addr_req *req)
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
 
        list_add(&req->list, &temp_req->list);
 
-       if (req_list.next == &req->list)
-               set_timeout(req->timeout);
+       set_timeout(&req->work, req->timeout);
        mutex_unlock(&lock);
 }
 
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
        return ret;
 }
 
+static void process_one_req(struct work_struct *_work)
+{
+       struct addr_req *req;
+       struct sockaddr *src_in, *dst_in;
+
+       mutex_lock(&lock);
+       req = container_of(_work, struct addr_req, work.work);
+
+       if (req->status == -ENODATA) {
+               src_in = (struct sockaddr *)&req->src_addr;
+               dst_in = (struct sockaddr *)&req->dst_addr;
+               req->status = addr_resolve(src_in, dst_in, req->addr,
+                                          true, req->seq);
+               if (req->status && time_after_eq(jiffies, req->timeout)) {
+                       req->status = -ETIMEDOUT;
+               } else if (req->status == -ENODATA) {
+                       /* requeue the work for retrying again */
+                       set_timeout(&req->work, req->timeout);
+                       mutex_unlock(&lock);
+                       return;
+               }
+       }
+       list_del(&req->list);
+       mutex_unlock(&lock);
+
+       req->callback(req->status, (struct sockaddr *)&req->src_addr,
+               req->addr, req->context);
+       put_client(req->client);
+       kfree(req);
+}
+
 static void process_req(struct work_struct *work)
 {
        struct addr_req *req, *temp_req;
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
                                                   true, req->seq);
                        if (req->status && time_after_eq(jiffies, req->timeout))
                                req->status = -ETIMEDOUT;
-                       else if (req->status == -ENODATA)
+                       else if (req->status == -ENODATA) {
+                               set_timeout(&req->work, req->timeout);
                                continue;
+                       }
                }
                list_move_tail(&req->list, &done_list);
        }
 
-       if (!list_empty(&req_list)) {
-               req = list_entry(req_list.next, struct addr_req, list);
-               set_timeout(req->timeout);
-       }
        mutex_unlock(&lock);
 
        list_for_each_entry_safe(req, temp_req, &done_list, list) {
                list_del(&req->list);
+               /* It is safe to cancel other work items from this work item
+                * because at a time there can be only one work item running
+                * with this single threaded work queue.
+                */
+               cancel_delayed_work(&req->work);
                req->callback(req->status, (struct sockaddr *) &req->src_addr,
                        req->addr, req->context);
                put_client(req->client);
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
        req->context = context;
        req->client = client;
        atomic_inc(&client->refcount);
+       INIT_DELAYED_WORK(&req->work, process_one_req);
        req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
 
        req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
                        req->status = -ECANCELED;
                        req->timeout = jiffies;
                        list_move(&req->list, &req_list);
-                       set_timeout(req->timeout);
+                       set_timeout(&req->work, req->timeout);
                        break;
                }
        }
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
        if (event == NETEVENT_NEIGH_UPDATE) {
                struct neighbour *neigh = ctx;
 
-               if (neigh->nud_state & NUD_VALID) {
-                       set_timeout(jiffies);
-               }
+               if (neigh->nud_state & NUD_VALID)
+                       set_timeout(&work, jiffies);
        }
        return 0;
 }
@@ -820,7 +854,7 @@ static struct notifier_block nb = {
 
 int addr_init(void)
 {
-       addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
+       addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
        if (!addr_wq)
                return -ENOMEM;
 
index 2c98533a0203b084fb198a3eb8088a0bac59522c..c551d2b275fdf339310a087bef9c6e821d7c7e09 100644 (file)
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
                            int out_len)
 {
        struct ib_uverbs_resize_cq      cmd;
-       struct ib_uverbs_resize_cq_resp resp;
+       struct ib_uverbs_resize_cq_resp resp = {};
        struct ib_udata                 udata;
        struct ib_cq                    *cq;
        int                             ret = -EINVAL;
index 3d2609608f589625d0077167fa2e66a00430b89f..c023e2c81b8f2b06443452f91edcc506b46b6d17 100644 (file)
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
        if (atomic_dec_and_test(&file->device->refcount))
                ib_uverbs_comp_dev(file->device);
 
+       kobject_put(&file->device->kobj);
        kfree(file);
 }
 
@@ -917,7 +918,6 @@ err:
 static int ib_uverbs_close(struct inode *inode, struct file *filp)
 {
        struct ib_uverbs_file *file = filp->private_data;
-       struct ib_uverbs_device *dev = file->device;
 
        mutex_lock(&file->cleanup_mutex);
        if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
                         ib_uverbs_release_async_event_file);
 
        kref_put(&file->ref, ib_uverbs_release_file);
-       kobject_put(&dev->kobj);
 
        return 0;
 }
index fb98ed67d5bc684b8cc0b941d7140986b95aa99b..7f8fe443df46f5b562ac3b2561e19226e3ab6b68 100644 (file)
@@ -895,7 +895,6 @@ static const struct {
 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
        [IB_QPS_RESET] = {
                [IB_QPS_RESET] = { .valid = 1 },
-               [IB_QPS_ERR] =   { .valid = 1 },
                [IB_QPS_INIT]  = {
                        .valid = 1,
                        .req_param = {
index 23fad6d969440bd2bd50a0c8b0dbafe8a92f4ae9..2540b65e242cebcf5b7c9fd60f936bc35bbf019b 100644 (file)
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
                        continue;
 
                free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
-               if (IS_ERR(free_mr->mr_free_qp[i])) {
+               if (!free_mr->mr_free_qp[i]) {
                        dev_err(dev, "Create loop qp failed!\n");
                        goto create_lp_qp_failed;
                }
index ae0746754008798fc0c4ab7e940f736c376a72f1..3d701c7a4c9140e488b7427d9d901a4ea77d2786 100644 (file)
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
 
        if (qp->ibqp.qp_type != IB_QPT_RC) {
                av = *wqe;
-               if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+               if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
                        *wqe += sizeof(struct mlx5_av);
                else
                        *wqe += sizeof(struct mlx5_base_av);
index ff50a7bd66d864506ec65aef1b63f45ce5d36e36..7ac25059c40f94aad951b28351cf425ebe573197 100644 (file)
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
        unsigned long flags;
 
        struct rw_semaphore vlan_rwsem;
+       struct mutex mcast_mutex;
 
        struct rb_root  path_tree;
        struct list_head path_list;
index f87d104837dcfab7f0e35b5b7fcae1e021599bfc..d69410c2ed97bdeceb17aedb2a7fe6049c59c310 100644 (file)
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
        case IB_CM_REQ_RECEIVED:
                return ipoib_cm_req_handler(cm_id, event);
        case IB_CM_DREQ_RECEIVED:
-               p = cm_id->context;
                ib_send_cm_drep(cm_id, NULL, 0);
                /* Fall through */
        case IB_CM_REJ_RECEIVED:
index 7871379342f48fa77b2e6e8279ca774b4c49ad2f..184a22f4802773efc67131093f4ab4fcc89cd276 100644 (file)
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
        IPOIB_NETDEV_STAT(tx_bytes),
        IPOIB_NETDEV_STAT(tx_errors),
        IPOIB_NETDEV_STAT(rx_dropped),
-       IPOIB_NETDEV_STAT(tx_dropped)
+       IPOIB_NETDEV_STAT(tx_dropped),
+       IPOIB_NETDEV_STAT(multicast),
 };
 
 #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
index 57a9655e844deb1cc2eb57d9485f98e195368ac5..2e075377242e2baccc54cda5859d5b3ba7e768d0 100644 (file)
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 
        ++dev->stats.rx_packets;
        dev->stats.rx_bytes += skb->len;
+       if (skb->pkt_type == PACKET_MULTICAST)
+               dev->stats.multicast++;
 
        skb->dev = dev;
        if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
        return pending;
 }
 
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+                                       struct ib_qp *qp,
+                                       enum ib_qp_state new_state)
+{
+       struct ib_qp_attr qp_attr;
+       struct ib_qp_init_attr query_init_attr;
+       int ret;
+
+       ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+       if (ret) {
+               ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+               return;
+       }
+       /* print according to the new-state and the previous state.*/
+       if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+               ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+       else
+               ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+                          new_state, qp_attr.qp_state);
+}
+
 int ipoib_ib_dev_stop_default(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
         */
        qp_attr.qp_state = IB_QPS_ERR;
        if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
-               ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
+               check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
 
        /* Wait for all sends and receives to complete */
        begin = jiffies;
index 4ce315c92b480fa705c30b33ffd7253b4cfded3b..6c77df34869dfb719d66787f6ccbb7637b042d36 100644 (file)
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
        int i, wait_flushed = 0;
 
        init_completion(&priv->ntbl.flushed);
+       set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
 
        spin_lock_irqsave(&priv->lock, flags);
 
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
 
        ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
        init_completion(&priv->ntbl.deleted);
-       set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
 
        /* Stop GC if called at init fail need to cancel work */
        stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
        .ndo_tx_timeout          = ipoib_timeout,
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
        .ndo_get_iflink          = ipoib_get_iflink,
+       .ndo_get_stats64         = ipoib_get_stats,
 };
 
 void ipoib_setup_common(struct net_device *dev)
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
        priv->dev = dev;
        spin_lock_init(&priv->lock);
        init_rwsem(&priv->vlan_rwsem);
+       mutex_init(&priv->mcast_mutex);
 
        INIT_LIST_HEAD(&priv->path_list);
        INIT_LIST_HEAD(&priv->child_intfs);
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
        priv->dev->dev_id = port - 1;
 
        result = ib_query_port(hca, port, &attr);
-       if (!result)
-               priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
-       else {
+       if (result) {
                printk(KERN_WARNING "%s: ib_query_port %d failed\n",
                       hca->name, port);
                goto device_init_failed;
        }
 
+       priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
        /* MTU will be reset when mcast join happens */
        priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
        priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
                printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
                       hca->name, port, result);
                goto device_init_failed;
-       } else
-               memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+       }
+
+       memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+              sizeof(union ib_gid));
        set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
        result = ipoib_dev_init(priv->dev, hca, port);
-       if (result < 0) {
+       if (result) {
                printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
                       hca->name, port, result);
                goto device_init_failed;
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
        ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+       ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
 #endif
 
        /*
index 057f58e6afca249744f2d9013021e3c1c5d6417f..93e149efc1f5fc0382b61dcfc9f84d786d8b52ca 100644 (file)
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
 int ipoib_mcast_stop_thread(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
-       unsigned long flags;
 
        ipoib_dbg_mcast(priv, "stopping multicast thread\n");
 
-       spin_lock_irqsave(&priv->lock, flags);
-       cancel_delayed_work(&priv->mcast_task);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       flush_workqueue(priv->wq);
+       cancel_delayed_work_sync(&priv->mcast_task);
 
        return 0;
 }
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
 {
        struct ipoib_mcast *mcast, *tmcast;
 
+       /*
+        * make sure the in-flight joins have finished before we attempt
+        * to leave
+        */
+       list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+                       wait_for_completion(&mcast->done);
+
        list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
                ipoib_mcast_leave(mcast->dev, mcast);
                ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        struct ipoib_mcast *mcast, *tmcast;
        unsigned long flags;
 
+       mutex_lock(&priv->mcast_mutex);
        ipoib_dbg_mcast(priv, "flushing multicast list\n");
 
        spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
        ipoib_mcast_remove_list(&remove_list);
+       mutex_unlock(&priv->mcast_mutex);
 }
 
 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        netif_addr_unlock(dev);
        local_irq_restore(flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
        ipoib_mcast_remove_list(&remove_list);
 
        /*
index 7b5fd8fb1761d1912be615ee14571e420dc4c92b..aaca0b3d662eb18bda0848652c309985d5f6ad0e 100644 (file)
@@ -44,7 +44,6 @@ struct procdata {
        char log_name[15];      /* log filename */
        struct log_data *log_head, *log_tail;   /* head and tail for queue */
        int if_used;            /* open count for interface */
-       int volatile del_lock;  /* lock for delete operations */
        unsigned char logtmp[LOG_MAX_LINELEN];
        wait_queue_head_t rd_queue;
 };
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
 {
        struct log_data *ib;
        struct procdata *pd = card->proclog;
-       int i;
        unsigned long flags;
 
        if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
        else
                pd->log_tail->next = ib;        /* follows existing messages */
        pd->log_tail = ib;      /* new tail */
-       i = pd->del_lock++;     /* get lock state */
-       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 
        /* delete old entrys */
-       if (!i)
-               while (pd->log_head->next) {
-                       if ((pd->log_head->usage_cnt <= 0) &&
-                           (pd->log_head->next->usage_cnt <= 0)) {
-                               ib = pd->log_head;
-                               pd->log_head = pd->log_head->next;
-                               kfree(ib);
-                       } else
-                               break;
-               }               /* pd->log_head->next */
-       pd->del_lock--;         /* release lock level */
+       while (pd->log_head->next) {
+               if ((pd->log_head->usage_cnt <= 0) &&
+                   (pd->log_head->next->usage_cnt <= 0)) {
+                       ib = pd->log_head;
+                       pd->log_head = pd->log_head->next;
+                       kfree(ib);
+               } else {
+                       break;
+               }
+       }               /* pd->log_head->next */
+
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
        wake_up_interruptible(&(pd->rd_queue));         /* announce new entry */
 }                              /* put_log_buffer */
 
index d922a88e407f119bbf52aae494c632e08d113e7f..2c8baa0c2c4e11f2b5d1c39d4c3ad795d634135a 100644 (file)
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
         * tRC < 30ns implies EDO mode. This controller does not support this
         * mode.
         */
-       if (conf->timings.sdr.tRC_min < 30)
+       if (conf->timings.sdr.tRC_min < 30000)
                return -ENOTSUPP;
 
        atmel_smc_cs_conf_init(smcconf);
index 55a8ee5306ea992f39bbcd0c2cbaa5e40ad90214..8c210a5776bcbea1677e422dc3f06397c831d9cc 100644 (file)
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
                 */
                struct platform_device *pdev = to_platform_device(userdev);
                const struct atmel_pmecc_caps *caps;
+               const struct of_device_id *match;
 
                /* No PMECC engine available. */
                if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
 
                caps = &at91sam9g45_caps;
 
-               /*
-                * Try to find the NFC subnode and extract the associated caps
-                * from there.
-                */
-               np = of_find_compatible_node(userdev->of_node, NULL,
-                                            "atmel,sama5d3-nfc");
-               if (np) {
-                       const struct of_device_id *match;
-
-                       match = of_match_node(atmel_pmecc_legacy_match, np);
-                       if (match && match->data)
-                               caps = match->data;
-
-                       of_node_put(np);
-               }
+               /* Find the caps associated to the NAND dev node. */
+               match = of_match_node(atmel_pmecc_legacy_match,
+                                     userdev->of_node);
+               if (match && match->data)
+                       caps = match->data;
 
                pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
        }
index 5fa5ddc94834d0a27a8add0125ed27310601af09..c6c18b82f8f4eade18561edb24439a9d4737efa5 100644 (file)
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
 
        if (!section) {
                oobregion->offset = 0;
-               oobregion->length = 4;
+               if (mtd->oobsize == 16)
+                       oobregion->length = 4;
+               else
+                       oobregion->length = 3;
        } else {
+               if (mtd->oobsize == 8)
+                       return -ERANGE;
+
                oobregion->offset = 6;
                oobregion->length = ecc->total - 4;
        }
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
         * Ensure the timing mode has been changed on the chip side
         * before changing timings on the controller side.
         */
-       if (chip->onfi_version) {
+       if (chip->onfi_version &&
+           (le16_to_cpu(chip->onfi_params.opt_cmd) &
+            ONFI_OPT_CMD_SET_GET_FEATURES)) {
                u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
                        chip->onfi_timing_mode_default,
                };
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
  * @buf: the data to write
  * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
- * @cached: cached programming
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
index f06312df3669c18cb788fc033acf6b5811db8d0a..7e36d7d13c268fc2317c330f7688e188e81d6bdc 100644 (file)
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
                struct nand_sdr_timings *timings = &iface->timings.sdr;
 
                /* microseconds -> picoseconds */
-               timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
-               timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
-               timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+               timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
+               timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
+               timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
 
                /* nanoseconds -> picoseconds */
                timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
index d0b6f8f9f297ab89f355a727c333de1c5a2f7fc8..6abd142b13246f1189e189c03d3ff499665c1b0b 100644 (file)
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
         */
        chip->clk_rate = NSEC_PER_SEC / min_clk_period;
        real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+       if (real_clk_rate <= 0) {
+               dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+               return -EINVAL;
+       }
 
        /*
         * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
index 1e46418a3b74c3f351068fd6d1a3b3b5168e8ab2..264b281eb86bf1b52abb88ef67c2ce7143ba6024 100644 (file)
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
                 * all finished.
                 */
                mt7623_pad_clk_setup(ds);
+       } else {
+               u16 lcl_adv = 0, rmt_adv = 0;
+               u8 flowctrl;
+               u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+
+               switch (phydev->speed) {
+               case SPEED_1000:
+                       mcr |= PMCR_FORCE_SPEED_1000;
+                       break;
+               case SPEED_100:
+                       mcr |= PMCR_FORCE_SPEED_100;
+                       break;
+               };
+
+               if (phydev->link)
+                       mcr |= PMCR_FORCE_LNK;
+
+               if (phydev->duplex) {
+                       mcr |= PMCR_FORCE_FDX;
+
+                       if (phydev->pause)
+                               rmt_adv = LPA_PAUSE_CAP;
+                       if (phydev->asym_pause)
+                               rmt_adv |= LPA_PAUSE_ASYM;
+
+                       if (phydev->advertising & ADVERTISED_Pause)
+                               lcl_adv |= ADVERTISE_PAUSE_CAP;
+                       if (phydev->advertising & ADVERTISED_Asym_Pause)
+                               lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+                       flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+                       if (flowctrl & FLOW_CTRL_TX)
+                               mcr |= PMCR_TX_FC_EN;
+                       if (flowctrl & FLOW_CTRL_RX)
+                               mcr |= PMCR_RX_FC_EN;
+               }
+               mt7530_write(priv, MT7530_PMCR_P(port), mcr);
        }
 }
 
index b83d76b998023c38c9e67b90ca73e393f7d29fc4..74db9822eb40437a92bc21ace75971df48c1bfa4 100644 (file)
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
 #define  PMCR_TX_FC_EN                 BIT(5)
 #define  PMCR_RX_FC_EN                 BIT(4)
 #define  PMCR_FORCE_SPEED_1000         BIT(3)
+#define  PMCR_FORCE_SPEED_100          BIT(2)
 #define  PMCR_FORCE_FDX                        BIT(1)
 #define  PMCR_FORCE_LNK                        BIT(0)
 #define  PMCR_COMMON_LINK              (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
index 86058a9f3417bc59613cb9c019638dc8a85a9731..1d307f2def2d910eff7c983d9866fa88b331d869 100644 (file)
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 
        xgene_enet_gpiod_get(pdata);
 
-       if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
-               pdata->clk = devm_clk_get(&pdev->dev, NULL);
-               if (IS_ERR(pdata->clk)) {
+       pdata->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(pdata->clk)) {
+               if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
                        /* Abort if the clock is defined but couldn't be
                         * retrived. Always abort if the clock is missing on
                         * DT system as the driver can't cope with this case.
index f411936b744cb1fa4d331f7b439a181916fa118e..a1125d10c8255f6eb8fbb6046b09473c35ba52b8 100644 (file)
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
        bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
 
        spin_lock_init(&bp->lock);
+       u64_stats_init(&bp->hw_stats.syncp);
 
        bp->rx_pending = B44_DEF_RX_RING_PENDING;
        bp->tx_pending = B44_DEF_TX_RING_PENDING;
index 5333601f855f88529c04e003eae5e3d19aa59f6d..dc3052751bc13ed2248c218de01849d865dbe952 100644 (file)
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                        p = (char *)&dev->stats;
                else
                        p = (char *)priv;
+
+               if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+                       continue;
+
                p += s->stat_offset;
                data[j] = *(unsigned long *)p;
                j++;
index a3e6946796350d0a3bb79410d1f354a844ab7f60..c45e8e3b82d38da950a7cf1ca72022d120b7947e 100644 (file)
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 static void send_request_unmap(struct ibmvnic_adapter *, u8);
 static void send_login(struct ibmvnic_adapter *adapter);
 static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crqs(struct ibmvnic_adapter *);
 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 static int ibmvnic_init(struct ibmvnic_adapter *);
 static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        unsigned long timeout = msecs_to_jiffies(30000);
        struct device *dev = &adapter->vdev->dev;
+       int rc;
 
        do {
                if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
                                dev_err(dev, "Capabilities query timeout\n");
                                return -1;
                        }
+                       rc = init_sub_crqs(adapter);
+                       if (rc) {
+                               dev_err(dev,
+                                       "Initialization of SCRQ's failed\n");
+                               return -1;
+                       }
+                       rc = init_sub_crq_irqs(adapter);
+                       if (rc) {
+                               dev_err(dev,
+                                       "Initialization of SCRQ's irqs failed\n");
+                               return -1;
+                       }
                }
 
                reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
                *req_value = be64_to_cpu(crq->request_capability_rsp.number);
                ibmvnic_send_req_caps(adapter, 1);
                return;
index b936febc315a17b3d8db0af05f60938d478edefd..2194960d5855c6576ec03c870479344b099ce12b 100644 (file)
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
        if (!tx_ring->tx_bi)
                goto err;
 
+       u64_stats_init(&tx_ring->syncp);
+
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
        /* add u32 for head writeback, align after this takes care of
index 084c5358279319ed6d826aa00f135adb98b7631b..032f8ac06357aefa7a695c6685b8bbbbf7a8949e 100644 (file)
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
        if (!tx_ring->tx_buffer_info)
                goto err;
 
+       u64_stats_init(&tx_ring->syncp);
+
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
        if (!rx_ring->rx_buffer_info)
                goto err;
 
+       u64_stats_init(&rx_ring->syncp);
+
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
index c751a1d434ad7167e6b65a62f46b7295044860f8..3d4e4a5d00d1c5f81267c4a4a9675bc667709211 100644 (file)
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
                            struct ethtool_wolinfo *wol)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
+       struct mlx4_caps *caps = &priv->mdev->dev->caps;
        int err = 0;
        u64 config = 0;
        u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
        mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
                MLX4_DEV_CAP_FLAG_WOL_PORT2;
 
-       if (!(priv->mdev->dev->caps.flags & mask)) {
+       if (!(caps->flags & mask)) {
                wol->supported = 0;
                wol->wolopts = 0;
                return;
        }
 
+       if (caps->wol_port[priv->port])
+               wol->supported = WAKE_MAGIC;
+       else
+               wol->supported = 0;
+
        err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
        if (err) {
                en_err(priv, "Failed to get WoL information\n");
                return;
        }
 
-       if (config & MLX4_EN_WOL_MAGIC)
-               wol->supported = WAKE_MAGIC;
-       else
-               wol->supported = 0;
-
-       if (config & MLX4_EN_WOL_ENABLED)
+       if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
                wol->wolopts = WAKE_MAGIC;
        else
                wol->wolopts = 0;
index 436f7689a03212943d5ea70a2214774d2c940d97..bf1638044a7a89b6e911b3f5786c75597f89f2ba 100644 (file)
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
  * header, the HW adds it. To address that, we are subtracting the pseudo
  * header checksum from the checksum value provided by the HW.
  */
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
-                               struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+                              struct iphdr *iph)
 {
        __u16 length_for_csum = 0;
        __wsum csum_pseudo_header = 0;
+       __u8 ipproto = iph->protocol;
+
+       if (unlikely(ipproto == IPPROTO_SCTP))
+               return -1;
 
        length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
        csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                               length_for_csum, iph->protocol, 0);
+                                               length_for_csum, ipproto, 0);
        skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+       return 0;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
                               struct ipv6hdr *ipv6h)
 {
+       __u8 nexthdr = ipv6h->nexthdr;
        __wsum csum_pseudo_hdr = 0;
 
-       if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
-                    ipv6h->nexthdr == IPPROTO_HOPOPTS))
+       if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+                    nexthdr == IPPROTO_HOPOPTS ||
+                    nexthdr == IPPROTO_SCTP))
                return -1;
-       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
 
        csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
                                       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
        csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
-       csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+       csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+                                  (__force __wsum)htons(nexthdr));
 
        skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
        skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
        }
 
        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-               get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+               return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
-               if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
-                       return -1;
+       if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+               return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
        return 0;
 }
index 37e84a59e751d8ad44c34f5a0c7b16337a207c9e..041c0ed6592909a2d7b99cbaa51fc6cc59b7096b 100644 (file)
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [32] = "Loopback source checks support",
                [33] = "RoCEv2 support",
                [34] = "DMFS Sniffer support (UC & MC)",
-               [35] = "QinQ VST mode support",
-               [36] = "sl to vl mapping table change event support"
+               [35] = "Diag counters per port",
+               [36] = "QinQ VST mode support",
+               [37] = "sl to vl mapping table change event support",
        };
        int i;
 
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET     0x3e
 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET          0x3f
 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET         0x40
+#define QUERY_DEV_CAP_WOL_OFFSET               0x43
 #define QUERY_DEV_CAP_FLAGS_OFFSET             0x44
 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET          0x48
 #define QUERY_DEV_CAP_UAR_SZ_OFFSET            0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
        MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
        dev_cap->flags = flags | (u64)ext_flags << 32;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
+       dev_cap->wol_port[1] = !!(field & 0x20);
+       dev_cap->wol_port[2] = !!(field & 0x40);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
        dev_cap->reserved_uars = field >> 4;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
index 5343a0599253b98d6f2b9b77b65210a4e1e0abf9..b52ba01aa486a0b492cdb623cbe253c7a2e583d0 100644 (file)
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
        u32 dmfs_high_rate_qpn_range;
        struct mlx4_rate_limit_caps rl_caps;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+       bool wol_port[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_func_cap {
index a27c9c13a36ed11d577e7cd9cff1e2a9daec137d..09b9bc17bce998a99f360577a92a0211b552bf38 100644 (file)
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
        dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
+       dev->caps.wol_port[1]          = dev_cap->wol_port[1];
+       dev->caps.wol_port[2]          = dev_cap->wol_port[2];
 
        /* Save uar page shift */
        if (!mlx4_is_slave(dev)) {
index 656b2d3f1bee0e8aa5b1f328d5066d60f5e96099..5eb1606765c58064a5e2fd6677a791165c18c071 100644 (file)
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
                                                orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
                                                   MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
                                                orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        if (!bridge_port->bridge_device->multicast_enabled)
                return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        bridge_device = bridge_port->bridge_device;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
                                                               bridge_device,
                                                               mdb->vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
-               return -EINVAL;
+       if (!mlxsw_sp_port_vlan)
+               return 0;
 
        fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
 
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
        int err = 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        bridge_device = bridge_port->bridge_device;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
                                                               bridge_device,
                                                               mdb->vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
-               return -EINVAL;
+       if (!mlxsw_sp_port_vlan)
+               return 0;
 
        fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
 
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
 
 }
 
+static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_mid *mid, *tmp;
+
+       list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
+               list_del(&mid->list);
+               clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+               kfree(mid);
+       }
+}
+
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
 {
        mlxsw_sp_fdb_fini(mlxsw_sp);
-       WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+       mlxsw_sp_mids_fini(mlxsw_sp);
        WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
        kfree(mlxsw_sp->bridge);
 }
index 18750ff0ede6262d1a5fd50b8acc808c12a50756..4631ca8b8eb2780865bc2eefae51ce5c5a61f5cf 100644 (file)
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
        tx_ring->idx = idx;
        tx_ring->r_vec = r_vec;
        tx_ring->is_xdp = is_xdp;
+       u64_stats_init(&tx_ring->r_vec->tx_sync);
 
        tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
        tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
 
        rx_ring->idx = idx;
        rx_ring->r_vec = r_vec;
+       u64_stats_init(&rx_ring->r_vec->rx_sync);
 
        rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
        rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
index 9da91045d167b095ad2bb45f5bd3fa1a89255012..3eb241657368e33cb6bc7b25afcecee931a24366 100644 (file)
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
        p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
        p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
-       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+       if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
                goto err;
 
        return 0;
index 32279d21c8363d4976c6d313599fbc0e015e6a36..c2121d214f089eb1fe59af4ceb4b2b358abb8f0c 100644 (file)
 
 #include "cpts.h"
 
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+       unsigned long tmo;
+};
+
 #define cpts_read32(c, r)      readl_relaxed(&c->reg->r)
 #define cpts_write32(c, v, r)  writel_relaxed(v, &c->reg->r)
 
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+                     u16 ts_seqid, u8 ts_msgtype);
+
 static int event_expired(struct cpts_event *event)
 {
        return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
        return removed ? 0 : -1;
 }
 
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+       struct sk_buff *skb, *tmp;
+       u16 seqid;
+       u8 mtype;
+       bool found = false;
+
+       mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+       seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+       /* no need to grab txq.lock as access is always done under cpts->lock */
+       skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+               struct skb_shared_hwtstamps ssh;
+               unsigned int class = ptp_classify_raw(skb);
+               struct cpts_skb_cb_data *skb_cb =
+                                       (struct cpts_skb_cb_data *)skb->cb;
+
+               if (cpts_match(skb, class, seqid, mtype)) {
+                       u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+                       memset(&ssh, 0, sizeof(ssh));
+                       ssh.hwtstamp = ns_to_ktime(ns);
+                       skb_tstamp_tx(skb, &ssh);
+                       found = true;
+                       __skb_unlink(skb, &cpts->txq);
+                       dev_consume_skb_any(skb);
+                       dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+                               mtype, seqid);
+               } else if (time_after(jiffies, skb_cb->tmo)) {
+                       /* timeout any expired skbs over 1s */
+                       dev_dbg(cpts->dev,
+                               "expiring tx timestamp mtype %u seqid %04x\n",
+                               mtype, seqid);
+                       __skb_unlink(skb, &cpts->txq);
+                       dev_consume_skb_any(skb);
+               }
+       }
+
+       return found;
+}
+
 /*
  * Returns zero if matching event type was found.
  */
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
                event->low = lo;
                type = event_type(event);
                switch (type) {
+               case CPTS_EV_TX:
+                       if (cpts_match_tx_ts(cpts, event)) {
+                               /* if the new event matches an existing skb,
+                                * then don't queue it
+                                */
+                               break;
+                       }
                case CPTS_EV_PUSH:
                case CPTS_EV_RX:
-               case CPTS_EV_TX:
                        list_del_init(&event->list);
                        list_add_tail(&event->list, &cpts->events);
                        break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
        return -EOPNOTSUPP;
 }
 
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+       unsigned long delay = cpts->ov_check_period;
+       struct timespec64 ts;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+       if (!skb_queue_empty(&cpts->txq))
+               delay = CPTS_SKB_TX_WORK_TIMEOUT;
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       return (long)delay;
+}
+
 static struct ptp_clock_info cpts_info = {
        .owner          = THIS_MODULE,
        .name           = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
        .gettime64      = cpts_ptp_gettime,
        .settime64      = cpts_ptp_settime,
        .enable         = cpts_ptp_enable,
+       .do_aux_work    = cpts_overflow_check,
 };
 
-static void cpts_overflow_check(struct work_struct *work)
-{
-       struct timespec64 ts;
-       struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
-
-       cpts_ptp_gettime(&cpts->info, &ts);
-       pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
-       schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
-}
-
 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
                      u16 ts_seqid, u8 ts_msgtype)
 {
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
                return 0;
 
        spin_lock_irqsave(&cpts->lock, flags);
-       cpts_fifo_read(cpts, CPTS_EV_PUSH);
+       cpts_fifo_read(cpts, -1);
        list_for_each_safe(this, next, &cpts->events) {
                event = list_entry(this, struct cpts_event, list);
                if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
                        break;
                }
        }
+
+       if (ev_type == CPTS_EV_TX && !ns) {
+               struct cpts_skb_cb_data *skb_cb =
+                               (struct cpts_skb_cb_data *)skb->cb;
+               /* Not found, add frame to queue for processing later.
+                * The periodic FIFO check will handle this.
+                */
+               skb_get(skb);
+               /* get the timestamp for timeouts */
+               skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+               __skb_queue_tail(&cpts->txq, skb);
+               ptp_schedule_worker(cpts->clock, 0);
+       }
        spin_unlock_irqrestore(&cpts->lock, flags);
 
        return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
 {
        int err, i;
 
+       skb_queue_head_init(&cpts->txq);
        INIT_LIST_HEAD(&cpts->events);
        INIT_LIST_HEAD(&cpts->pool);
        for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
        }
        cpts->phc_index = ptp_clock_index(cpts->clock);
 
-       schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
+       ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
        return 0;
 
 err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
        if (WARN_ON(!cpts->clock))
                return;
 
-       cancel_delayed_work_sync(&cpts->overflow_work);
-
        ptp_clock_unregister(cpts->clock);
        cpts->clock = NULL;
 
        cpts_write32(cpts, 0, int_enable);
        cpts_write32(cpts, 0, control);
 
+       /* Drop all packet */
+       skb_queue_purge(&cpts->txq);
+
        clk_disable(cpts->refclk);
 }
 EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
        cpts->dev = dev;
        cpts->reg = (struct cpsw_cpts __iomem *)regs;
        spin_lock_init(&cpts->lock);
-       INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
 
        ret = cpts_of_parse(cpts, node);
        if (ret)
index 01ea82ba9cdca7e83a03f36d9ef1f43ec4267bc0..73d73faf0f38748327cf5ca241f7a5c685f10275 100644 (file)
@@ -119,13 +119,13 @@ struct cpts {
        u32 cc_mult; /* for the nominal frequency */
        struct cyclecounter cc;
        struct timecounter tc;
-       struct delayed_work overflow_work;
        int phc_index;
        struct clk *refclk;
        struct list_head events;
        struct list_head pool;
        struct cpts_event pool_data[CPTS_MAX_EVENTS];
        unsigned long ov_check_period;
+       struct sk_buff_head txq;
 };
 
 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
index de8156c6b2925741534a45a6c3a28a3afe9d1ad6..2bbda71818adb022853964dd6d51a14c26f7cd19 100644 (file)
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
        if (data[IFLA_GENEVE_ID]) {
                __u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
 
-               if (vni >= GENEVE_VID_MASK)
+               if (vni >= GENEVE_N_VID)
                        return -ERANGE;
        }
 
index 1542e837fdfa777e96155f041bc6d2072946d0cc..f38e32a7ec9c979ac4524c31e09da375a6e0606c 100644 (file)
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
 
        gtp->dev = dev;
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
index d6c25580f8dd636dc43fe464adb339f597fdbcee..12cc64bfcff83c3c28c1bf97033dfd0162848b0e 100644 (file)
@@ -765,7 +765,8 @@ struct netvsc_device {
        u32 max_chn;
        u32 num_chn;
 
-       refcount_t sc_offered;
+       atomic_t open_chn;
+       wait_queue_head_t subchan_open;
 
        struct rndis_device *extension;
 
index 0a9167dd72fb94e50692fa70ec9bc50fd99f733e..d18c3326a1f782b403de4a10ef057196beb3aaa5 100644 (file)
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
        init_completion(&net_device->channel_init_wait);
+       init_waitqueue_head(&net_device->subchan_open);
 
        return net_device;
 }
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
                struct netvsc_channel *nvchan = &net_device->chan_table[i];
 
                nvchan->channel = device->channel;
+               u64_stats_init(&nvchan->tx_stats.syncp);
+               u64_stats_init(&nvchan->rx_stats.syncp);
        }
 
        /* Enable NAPI handler before init callbacks */
index 85c00e1c52b6aa9309e782df639e03f444ffc1be..d6308ffda53ec797acf5f9e6038bf0a230008b55 100644 (file)
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        else
                netif_napi_del(&nvchan->napi);
 
-       if (refcount_dec_and_test(&nvscdev->sc_offered))
-               complete(&nvscdev->channel_init_wait);
+       atomic_inc(&nvscdev->open_chn);
+       wake_up(&nvscdev->subchan_open);
 }
 
 int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
-       refcount_set(&net_device->sc_offered, 0);
-
        net_device->extension = rndis_device;
        rndis_device->ndev = net;
 
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
                rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
                                                        net_device->num_chn);
 
+       atomic_set(&net_device->open_chn, 1);
        num_rss_qs = net_device->num_chn - 1;
        if (num_rss_qs == 0)
                return 0;
 
-       refcount_set(&net_device->sc_offered, num_rss_qs);
        vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
 
        init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret)
                goto out;
 
+       wait_for_completion(&net_device->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                ret = -ENODEV;
                goto out;
        }
-       wait_for_completion(&net_device->channel_init_wait);
 
        net_device->num_chn = 1 +
                init_packet->msg.v5_msg.subchn_comp.num_subchannels;
 
+       /* wait for all sub channels to open */
+       wait_event(net_device->subchan_open,
+                  atomic_read(&net_device->open_chn) == net_device->num_chn);
+
        /* ignore failues from setting rss parameters, still have channels */
        rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
                                   net_device->num_chn);
index f37e3c1fd4e73f27e46564a6ef2739ff658523df..8dab74a81303277aed5cf24844941bdcadfcef1a 100644 (file)
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
 
        netdev_lockdep_set_classes(dev);
 
-       ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+       ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
 
index bd4303944e4405d543a399f6fc529df9ffd41e02..a404552555d488c832e7758293d7d4c1e229e679 100644 (file)
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
        spin_unlock(&pch->downl);
        /* see if there is anything from the attached unit to be sent */
        if (skb_queue_empty(&pch->file.xq)) {
-               read_lock(&pch->upl);
                ppp = pch->ppp;
                if (ppp)
-                       ppp_xmit_process(ppp);
-               read_unlock(&pch->upl);
+                       __ppp_xmit_process(ppp);
        }
 }
 
 static void ppp_channel_push(struct channel *pch)
 {
-       local_bh_disable();
-
-       __ppp_channel_push(pch);
-
-       local_bh_enable();
+       read_lock_bh(&pch->upl);
+       if (pch->ppp) {
+               (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+               __ppp_channel_push(pch);
+               (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+       } else {
+               __ppp_channel_push(pch);
+       }
+       read_unlock_bh(&pch->upl);
 }
 
 /*
index d1092421aaa7e7b69ba926d508155574f072ba61..9a4171b9094760871cf4396c99b2236bccd15193 100644 (file)
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                           struct asix_rx_fixup_info *rx);
 int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
+void asix_rx_fixup_common_free(struct asix_common_private *dp);
 
 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                              gfp_t flags);
index 7847436c441e3c16b91fb0d72c3f07061d4e772d..522d2900cd1dd942ae58c407920289e95f0ae5e3 100644 (file)
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                               value, index, data, size);
 }
 
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+       /* Reset the variables that have a lifetime outside of
+        * asix_rx_fixup_internal() so that future processing starts from a
+        * known set of initial conditions.
+        */
+
+       if (rx->ax_skb) {
+               /* Discard any incomplete Ethernet frame in the netdev buffer */
+               kfree_skb(rx->ax_skb);
+               rx->ax_skb = NULL;
+       }
+
+       /* Assume the Data header 32-bit word is at the start of the current
+        * or next URB socket buffer so reset all the state variables.
+        */
+       rx->remaining = 0;
+       rx->split_head = false;
+       rx->header = 0;
+}
+
 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                           struct asix_rx_fixup_info *rx)
 {
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                if (size != ((~rx->header >> 16) & 0x7ff)) {
                        netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
                                   rx->remaining);
-                       if (rx->ax_skb) {
-                               kfree_skb(rx->ax_skb);
-                               rx->ax_skb = NULL;
-                               /* Discard the incomplete netdev Ethernet frame
-                                * and assume the Data header is at the start of
-                                * the current URB socket buffer.
-                                */
-                       }
-                       rx->remaining = 0;
+                       reset_asix_rx_fixup_info(rx);
                }
        }
 
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                        if (size != ((~rx->header >> 16) & 0x7ff)) {
                                netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
                                           rx->header, offset);
+                               reset_asix_rx_fixup_info(rx);
                                return 0;
                        }
                        if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
                                netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
                                           size);
+                               reset_asix_rx_fixup_info(rx);
                                return 0;
                        }
 
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                if (rx->ax_skb) {
                        skb_put_data(rx->ax_skb, skb->data + offset,
                                     copy_length);
-                       if (!rx->remaining)
+                       if (!rx->remaining) {
                                usbnet_skb_return(dev, rx->ax_skb);
+                               rx->ax_skb = NULL;
+                       }
                }
 
                offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
        if (skb->len != offset) {
                netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
                           skb->len, offset);
+               reset_asix_rx_fixup_info(rx);
                return 0;
        }
 
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
        return asix_rx_fixup_internal(dev, skb, rx);
 }
 
+void asix_rx_fixup_common_free(struct asix_common_private *dp)
+{
+       struct asix_rx_fixup_info *rx;
+
+       if (!dp)
+               return;
+
+       rx = &dp->rx_fixup_info;
+
+       if (rx->ax_skb) {
+               kfree_skb(rx->ax_skb);
+               rx->ax_skb = NULL;
+       }
+}
+
 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                              gfp_t flags)
 {
index a3aa0a27dfe56b22121a0571cc4eaca1b2bbee03..b2ff88e69a819cc3098a720ece238d8847d6be57 100644 (file)
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
 static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
+       asix_rx_fixup_common_free(dev->driver_priv);
        kfree(dev->driver_priv);
 }
 
index 5833f7e2a127811aa2298ded2bc62b1d06ae1e9d..b99a7fb09f8e31827a725151b415967699cdfa27 100644 (file)
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        /* Init LTM */
        lan78xx_init_ltm(dev);
 
-       dev->net->hard_header_len += TX_OVERHEAD;
-       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
        if (dev->udev->speed == USB_SPEED_SUPER) {
                buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
                return ret;
        }
 
+       dev->net->hard_header_len += TX_OVERHEAD;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
        /* Init all registers */
        ret = lan78xx_reset(dev);
 
-       lan78xx_mdio_init(dev);
+       ret = lan78xx_mdio_init(dev);
 
        dev->net->flags |= IFF_MULTICAST;
 
        pdata->wol = WAKE_MAGIC;
 
-       return 0;
+       return ret;
 }
 
 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
        udev = interface_to_usbdev(intf);
        udev = usb_get_dev(udev);
 
-       ret = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct lan78xx_net));
        if (!netdev) {
-                       dev_err(&intf->dev, "Error: OOM\n");
-                       goto out1;
+               dev_err(&intf->dev, "Error: OOM\n");
+               ret = -ENOMEM;
+               goto out1;
        }
 
        /* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
        ret = register_netdev(netdev);
        if (ret != 0) {
                netif_err(dev, probe, netdev, "couldn't register the device\n");
-               goto out2;
+               goto out3;
        }
 
        usb_set_intfdata(intf, dev);
index 5894e3c9468f590e6b50144901b3f3b606e4ee69..8c373360827108855717f6d139034ff9f264bf0b 100644 (file)
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
+       {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
 static void qmi_wwan_disconnect(struct usb_interface *intf)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
-       struct qmi_wwan_state *info = (void *)&dev->data;
+       struct qmi_wwan_state *info;
        struct list_head *iter;
        struct net_device *ldev;
 
+       /* called twice if separate control and data intf */
+       if (!dev)
+               return;
+       info = (void *)&dev->data;
        if (info->flags & QMI_WWAN_FLAG_MUX) {
                if (!rtnl_trylock()) {
                        restart_syscall();
index 96aa7e6cf214cc332eba6d54fcd626bee917c633..e17baac70f439f86c723732cc0298eaa5eee15de 100644 (file)
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
+       skb->remcsum_offload = 0;
        NAPI_GRO_CB(skb)->flush |= flush;
 
        return pp;
index af0cc3456dc1b48b1325c06c5edd2ca8cc22a640..b4b7eab2940024024c46ead23d6b1c415fa146f7 100644 (file)
@@ -4259,6 +4259,41 @@ int pci_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_reset_function);
 
+/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device.  This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset.  It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+       int rc;
+
+       rc = pci_probe_reset_function(dev);
+       if (rc)
+               return rc;
+
+       pci_dev_save_and_disable(dev);
+
+       rc = __pci_reset_function_locked(dev);
+
+       pci_dev_restore(dev);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
 /**
  * pci_try_reset_function - quiesce and reset a PCI device function
  * @dev: PCI device to reset
index 20f1b44939944614ff270c757fc7152f901e9f09..04e929fd0ffee494cc744cf495e5acd9e437ea6b 100644 (file)
@@ -1547,6 +1547,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
        },
+       {
+               .ident = "HP Chromebook 11 G5 (Setzer)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+               },
+       },
        {
                .ident = "Acer Chromebook R11 (Cyan)",
                .matches = {
index 4d4ef42a39b5faaa1969d20a5aeeedffef90074c..86c4b3fab7b0ea8f0abfdf36e5a2b035e024ec66 100644 (file)
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
 
 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
 static const unsigned int mrfld_pwm0_pins[] = { 144 };
 static const unsigned int mrfld_pwm1_pins[] = { 145 };
 static const unsigned int mrfld_pwm2_pins[] = { 132 };
index f024e25787fc603c3469ea75de53452743d6af16..0c6d7812d6fd981b95f9d526cb9d6645e4b7855d 100644 (file)
@@ -37,7 +37,7 @@
 #define IRQ_STATUS     0x10
 #define IRQ_WKUP       0x18
 
-#define NB_FUNCS 2
+#define NB_FUNCS 3
 #define GPIO_PER_REG   32
 
 /**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
                .funcs = {_func1, "gpio"}       \
        }
 
+#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
+       {                                       \
+               .name = _name,                  \
+               .start_pin = _start,            \
+               .npins = _nr,                   \
+               .reg_mask = _mask,              \
+               .val = {_v1, _v2, _v3}, \
+               .funcs = {_f1, _f2, "gpio"}     \
+       }
+
 #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
                      _f1, _f2)                         \
        {                                               \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
        PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
        PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
        PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
-       PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+       PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
        PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
        PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
        PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
        PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
-       PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"),
+       PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+                      "mii", "mii_err"),
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_sb = {
-       .nr_pins = 29,
+       .nr_pins = 30,
        .name = "GPIO2",
        .groups = armada_37xx_sb_groups,
        .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
 {
        int f;
 
-       for (f = 0; f < NB_FUNCS; f++)
+       for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
                if (!strcmp(grp->funcs[f], func))
                        return f;
 
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
                for (j = 0; j < grp->extra_npins; j++)
                        grp->pins[i+j] = grp->extra_pin + j;
 
-               for (f = 0; f < NB_FUNCS; f++) {
+               for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
                        int ret;
                        /* check for unique functions and count groups */
                        ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
                        struct armada_37xx_pin_group *gp = &info->groups[g];
                        int f;
 
-                       for (f = 0; f < NB_FUNCS; f++) {
+                       for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
                                if (strcmp(gp->funcs[f], name) == 0) {
                                        *groups = gp->name;
                                        groups++;
index 159580c04b14b138c5ec78b6768db2224d63fb58..47a392bc73c821203abe1c7cb1e966db9a40a3ba 100644 (file)
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
                  SUNXI_FUNCTION_VARIANT(0x3, "emac",   /* ETXD1 */
                                         PINCTRL_SUN7I_A20),
                  SUNXI_FUNCTION(0x4, "keypad"),        /* IN6 */
+                 SUNXI_FUNCTION(0x5, "sim"),           /* DET */
                  SUNXI_FUNCTION_IRQ(0x6, 16),          /* EINT16 */
                  SUNXI_FUNCTION(0x7, "csi1")),         /* D16 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
index a433a306a2d06ae11dd2a8c61830e0c76c4e7862..c75e094b2d90779f92570a534fd1c8d53e6a9e97 100644 (file)
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
 static const int usb1_muxvals[] = {0, 0};
 static const unsigned usb2_pins[] = {184, 185};
 static const int usb2_muxvals[] = {0, 0};
-static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_pins[] = {187, 188};
 static const int usb3_muxvals[] = {0, 0};
 static const unsigned port_range0_pins[] = {
        300, 301, 302, 303, 304, 305, 306, 307,         /* PORT0x */
index 787e3967bd5c5741aeb7a2cb96c18e38901092ed..f828ee340a98238052448d15a5f7604c286926dd 100644 (file)
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        struct zx_pinctrl_soc_info *info = zpctl->info;
        const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
        struct zx_pin_data *data = pindesc->drv_data;
-       struct zx_mux_desc *mux = data->muxes;
-       u32 mask = (1 << data->width) - 1;
-       u32 offset = data->offset;
-       u32 bitpos = data->bitpos;
+       struct zx_mux_desc *mux;
+       u32 mask, offset, bitpos;
        struct function_desc *func;
        unsigned long flags;
        u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        if (!data)
                return -EINVAL;
 
+       mux = data->muxes;
+       mask = (1 << data->width) - 1;
+       offset = data->offset;
+       bitpos = data->bitpos;
+
        func = pinmux_generic_get_function(pctldev, func_selector);
        if (!func)
                return -EINVAL;
index b77435783ef332c30963f84e280cefbedbdb8e3b..7eacc1c4b3b10e1103e6e9c895112eb176245faa 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
 
 #include "ptp_private.h"
 
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
        kfree(ptp);
 }
 
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+       struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+                                            aux_work.work);
+       struct ptp_clock_info *info = ptp->info;
+       long delay;
+
+       delay = info->do_aux_work(info);
+
+       if (delay >= 0)
+               kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
 /* public interface */
 
 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        mutex_init(&ptp->pincfg_mux);
        init_waitqueue_head(&ptp->tsev_wq);
 
+       if (ptp->info->do_aux_work) {
+               char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
+
+               kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+               ptp->kworker = kthread_create_worker(0, worker_name ?
+                                                    worker_name : info->name);
+               kfree(worker_name);
+               if (IS_ERR(ptp->kworker)) {
+                       err = PTR_ERR(ptp->kworker);
+                       pr_err("failed to create ptp aux_worker %d\n", err);
+                       goto kworker_err;
+               }
+       }
+
        err = ptp_populate_pin_groups(ptp);
        if (err)
                goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
 no_device:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
+       if (ptp->kworker)
+               kthread_destroy_worker(ptp->kworker);
+kworker_err:
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        ptp->defunct = 1;
        wake_up_interruptible(&ptp->tsev_wq);
 
+       if (ptp->kworker) {
+               kthread_cancel_delayed_work_sync(&ptp->aux_work);
+               kthread_destroy_worker(ptp->kworker);
+       }
+
        /* Release the clock's resources. */
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
 }
 EXPORT_SYMBOL(ptp_find_pin);
 
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+       return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
index d95888974d0c67f1e4cf4d3c2229ba4e901a2d87..b86f1bfecd6f2329cdd19c16da49c562e9e86fc6 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/cdev.h>
 #include <linux/device.h>
+#include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/posix-clock.h>
 #include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
        struct attribute_group pin_attr_group;
        /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
        const struct attribute_group *pin_attr_groups[2];
+       struct kthread_worker *kworker;
+       struct kthread_delayed_work aux_work;
 };
 
 /*
index 8975cd32139047cf03afe9a93544cc919eaed661..d42e758518ed92e33dfd6c6aaf2a4ee2ec198ff0 100644 (file)
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct rtable *rt = (struct rtable *) dst;
                __be32 *pkey = &ip_hdr(skb)->daddr;
 
-               if (rt->rt_gateway)
+               if (rt && rt->rt_gateway)
                        pkey = &rt->rt_gateway;
 
                /* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct rt6_info *rt = (struct rt6_info *) dst;
                struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
 
-               if (!ipv6_addr_any(&rt->rt6i_gateway))
+               if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
                        pkey = &rt->rt6i_gateway;
 
                /* IPv6 */
index 707ee2f5954d0ac0890c6f05967f7acd24157704..4591113c49de3af951908ed2257f6f5e88663b96 100644 (file)
@@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
                return -EBUSY;
        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
                return -EFAULT;
-       if (qd.cnum == -1)
+       if (qd.cnum == -1) {
+               if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
+                       return -EINVAL;
                qd.cnum = qd.id;
-       else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
-       {
+       } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
                        return -EINVAL;
                qd.instance = dev->scsi_host_ptr->host_no;
index 7dfe709a713837b075cd2021c00ee6a5fb44d978..6844ba36161638d995f3d5d5135c160fc54bc9ed 100644 (file)
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
 };
 
 /**
- * bnx2fc_percpu_thread_create - Create a receive thread for an
- *                              online CPU
+ * bnx2fc_cpu_online - Create a receive thread for an  online CPU
  *
  * @cpu: cpu index for the online cpu
  */
-static void bnx2fc_percpu_thread_create(unsigned int cpu)
+static int bnx2fc_cpu_online(unsigned int cpu)
 {
        struct bnx2fc_percpu_s *p;
        struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
        thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
                                        (void *)p, cpu_to_node(cpu),
                                        "bnx2fc_thread/%d", cpu);
+       if (IS_ERR(thread))
+               return PTR_ERR(thread);
+
        /* bind thread to the cpu */
-       if (likely(!IS_ERR(thread))) {
-               kthread_bind(thread, cpu);
-               p->iothread = thread;
-               wake_up_process(thread);
-       }
+       kthread_bind(thread, cpu);
+       p->iothread = thread;
+       wake_up_process(thread);
+       return 0;
 }
 
-static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+static int bnx2fc_cpu_offline(unsigned int cpu)
 {
        struct bnx2fc_percpu_s *p;
        struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
        thread = p->iothread;
        p->iothread = NULL;
 
-
        /* Free all work in the list */
        list_for_each_entry_safe(work, tmp, &p->work_list, list) {
                list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
 
        if (thread)
                kthread_stop(thread);
-}
-
-
-static int bnx2fc_cpu_online(unsigned int cpu)
-{
-       printk(PFX "CPU %x online: Create Rx thread\n", cpu);
-       bnx2fc_percpu_thread_create(cpu);
-       return 0;
-}
-
-static int bnx2fc_cpu_dead(unsigned int cpu)
-{
-       printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
-       bnx2fc_percpu_thread_destroy(cpu);
        return 0;
 }
 
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
                spin_lock_init(&p->fp_work_lock);
        }
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2fc_percpu_thread_create(cpu);
-
-       rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                      "scsi/bnx2fc:online",
-                                      bnx2fc_cpu_online, NULL);
+       rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
+                              bnx2fc_cpu_online, bnx2fc_cpu_offline);
        if (rc < 0)
-               goto stop_threads;
+               goto stop_thread;
        bnx2fc_online_state = rc;
 
-       cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
-                                 NULL, bnx2fc_cpu_dead);
-       put_online_cpus();
-
        cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
-
        return 0;
 
-stop_threads:
-       for_each_online_cpu(cpu)
-               bnx2fc_percpu_thread_destroy(cpu);
-       put_online_cpus();
+stop_thread:
        kthread_stop(l2_thread);
 free_wq:
        destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
        struct fcoe_percpu_s *bg;
        struct task_struct *l2_thread;
        struct sk_buff *skb;
-       unsigned int cpu = 0;
 
        /*
         * NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
        if (l2_thread)
                kthread_stop(l2_thread);
 
-       get_online_cpus();
-       /* Destroy per cpu threads */
-       for_each_online_cpu(cpu) {
-               bnx2fc_percpu_thread_destroy(cpu);
-       }
-
-       cpuhp_remove_state_nocalls(bnx2fc_online_state);
-       cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
-
-       put_online_cpus();
+       cpuhp_remove_state(bnx2fc_online_state);
 
        destroy_workqueue(bnx2fc_wq);
        /*
index 913c750205ce2a3f1a90f5d255d98d3b39f90145..26de61d65a4d259fa41e7e070648f775031a9662 100644 (file)
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
        return work;
 }
 
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+       unsigned int cpu = wqe % num_possible_cpus();
+       struct bnx2fc_percpu_s *fps;
+       struct bnx2fc_work *work;
+
+       fps = &per_cpu(bnx2fc_percpu, cpu);
+       spin_lock_bh(&fps->fp_work_lock);
+       if (fps->iothread) {
+               work = bnx2fc_alloc_work(tgt, wqe);
+               if (work) {
+                       list_add_tail(&work->list, &fps->work_list);
+                       wake_up_process(fps->iothread);
+                       spin_unlock_bh(&fps->fp_work_lock);
+                       return;
+               }
+       }
+       spin_unlock_bh(&fps->fp_work_lock);
+       bnx2fc_process_cq_compl(tgt, wqe);
+}
+
 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 {
        struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
                        /* Unsolicited event notification */
                        bnx2fc_process_unsol_compl(tgt, wqe);
                } else {
-                       /* Pending work request completion */
-                       struct bnx2fc_work *work = NULL;
-                       struct bnx2fc_percpu_s *fps = NULL;
-                       unsigned int cpu = wqe % num_possible_cpus();
-
-                       fps = &per_cpu(bnx2fc_percpu, cpu);
-                       spin_lock_bh(&fps->fp_work_lock);
-                       if (unlikely(!fps->iothread))
-                               goto unlock;
-
-                       work = bnx2fc_alloc_work(tgt, wqe);
-                       if (work)
-                               list_add_tail(&work->list,
-                                             &fps->work_list);
-unlock:
-                       spin_unlock_bh(&fps->fp_work_lock);
-
-                       /* Pending work request completion */
-                       if (fps->iothread && work)
-                               wake_up_process(fps->iothread);
-                       else
-                               bnx2fc_process_cq_compl(tgt, wqe);
+                       bnx2fc_pending_work(tgt, wqe);
                        num_free_sqes++;
                }
                cqe++;
index 86afc002814cd07bbc3b55b64ca31201208a514b..4ebcda8d9500439941bd630f8b1187875a7beac5 100644 (file)
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
 
 
 /**
- * bnx2i_percpu_thread_create - Create a receive thread for an
- *                             online CPU
+ * bnx2i_cpu_online - Create a receive thread for an online CPU
  *
  * @cpu:       cpu index for the online cpu
  */
-static void bnx2i_percpu_thread_create(unsigned int cpu)
+static int bnx2i_cpu_online(unsigned int cpu)
 {
        struct bnx2i_percpu_s *p;
        struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
        thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
                                        cpu_to_node(cpu),
                                        "bnx2i_thread/%d", cpu);
+       if (IS_ERR(thread))
+               return PTR_ERR(thread);
+
        /* bind thread to the cpu */
-       if (likely(!IS_ERR(thread))) {
-               kthread_bind(thread, cpu);
-               p->iothread = thread;
-               wake_up_process(thread);
-       }
+       kthread_bind(thread, cpu);
+       p->iothread = thread;
+       wake_up_process(thread);
+       return 0;
 }
 
-
-static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+static int bnx2i_cpu_offline(unsigned int cpu)
 {
        struct bnx2i_percpu_s *p;
        struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
        spin_unlock_bh(&p->p_work_lock);
        if (thread)
                kthread_stop(thread);
-}
-
-static int bnx2i_cpu_online(unsigned int cpu)
-{
-       pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
-       bnx2i_percpu_thread_create(cpu);
-       return 0;
-}
-
-static int bnx2i_cpu_dead(unsigned int cpu)
-{
-       pr_info("CPU %x offline: Remove Rx thread\n", cpu);
-       bnx2i_percpu_thread_destroy(cpu);
        return 0;
 }
 
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
                p->iothread = NULL;
        }
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_create(cpu);
-
-       err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                      "scsi/bnx2i:online",
-                                      bnx2i_cpu_online, NULL);
+       err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
+                               bnx2i_cpu_online, bnx2i_cpu_offline);
        if (err < 0)
-               goto remove_threads;
+               goto unreg_driver;
        bnx2i_online_state = err;
-
-       cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
-                                 NULL, bnx2i_cpu_dead);
-       put_online_cpus();
        return 0;
 
-remove_threads:
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_destroy(cpu);
-       put_online_cpus();
+unreg_driver:
        cnic_unregister_driver(CNIC_ULP_ISCSI);
 unreg_xport:
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
 static void __exit bnx2i_mod_exit(void)
 {
        struct bnx2i_hba *hba;
-       unsigned cpu = 0;
 
        mutex_lock(&bnx2i_dev_lock);
        while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
        }
        mutex_unlock(&bnx2i_dev_lock);
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_destroy(cpu);
-
-       cpuhp_remove_state_nocalls(bnx2i_online_state);
-       cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
-       put_online_cpus();
+       cpuhp_remove_state(bnx2i_online_state);
 
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
        cnic_unregister_driver(CNIC_ULP_ISCSI);
index 4d038926a4558c45b8685ca08c4976233a7ed5e7..351f06dfc5a0dac7bf4f7606166611848876d477 100644 (file)
@@ -528,7 +528,8 @@ struct fip_vlan {
 #define QEDF_WRITE                    (1 << 0)
 #define MAX_FIBRE_LUNS                 0xffffffff
 
-#define QEDF_MAX_NUM_CQS               8
+#define MIN_NUM_CPUS_MSIX(x)   min_t(u32, x->dev_info.num_cqs, \
+                                       num_online_cpus())
 
 /*
  * PCI function probe defines
index 7786c97e033fdcdd9643a41dd1d0bb928036d65b..1d13c9ca517de7e2cec033bdf050498fafbfeccc 100644 (file)
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
         * we allocation is the minimum off:
         *
         * Number of CPUs
-        * Number of MSI-X vectors
-        * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
+        * Number allocated by qed for our PCI function
         */
-       qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
-           num_online_cpus());
+       qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
                   qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                goto err1;
        }
 
+       /* Learn information crucial for qedf to progress */
+       rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+       if (rc) {
+               QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
+               goto err1;
+       }
+
        /* queue allocation code should come here
         * order should be
         *      slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
        }
        qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
 
-       /* Learn information crucial for qedf to progress */
-       rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
-       if (rc) {
-               QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
-               goto err1;
-       }
-
        /* Record BDQ producer doorbell addresses */
        qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
        qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
index 4fe606b000b4461c05e441cbb787edcfec51e72b..d7ff71e0c85c6ecd525d0d59d3f3f0da63952b47 100644 (file)
@@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
        return count;
 }
 
-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
-{
-       switch (hp->dxfer_direction) {
-       case SG_DXFER_NONE:
-               if (hp->dxferp || hp->dxfer_len > 0)
-                       return false;
-               return true;
-       case SG_DXFER_FROM_DEV:
-               /*
-                * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
-                * can either be NULL or != NULL so there's no point in checking
-                * it either. So just return true.
-                */
-               return true;
-       case SG_DXFER_TO_DEV:
-       case SG_DXFER_TO_FROM_DEV:
-               if (!hp->dxferp || hp->dxfer_len == 0)
-                       return false;
-               return true;
-       case SG_DXFER_UNKNOWN:
-               if ((!hp->dxferp && hp->dxfer_len) ||
-                   (hp->dxferp && hp->dxfer_len == 0))
-                       return false;
-               return true;
-       default:
-               return false;
-       }
-}
-
 static int
 sg_common_write(Sg_fd * sfp, Sg_request * srp,
                unsigned char *cmnd, int timeout, int blocking)
@@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
                        "sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
                        (int) cmnd[0], (int) hp->cmd_len));
 
-       if (!sg_is_valid_dxfer(hp))
+       if (hp->dxfer_len >= SZ_256M)
                return -EINVAL;
 
        k = sg_start_req(srp, cmnd);
index c8989c62a2621b88cf8b9d0c3001a37a31d5e151..858fefd67ebed72dbe59cc4e07806be1b8895577 100644 (file)
@@ -1150,3 +1150,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
                        PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+       /*
+        * Our dear uPD72020{1,2} friend only partially resets when
+        * asked to via the XHCI interface, and may end up doing DMA
+        * at the wrong addresses, as it keeps the top 32bit of some
+        * addresses from its previous programming under obscure
+        * circumstances.
+        * Give it a good wack at probe time. Unfortunately, this
+        * needs to happen before we've had a chance to discover any
+        * quirk, or the system will be in a rather bad state.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+           (pdev->device == 0x0014 || pdev->device == 0x0015))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
index 6559944801987728a1db6ba31f09db51b92362e3..5582cbafecd4c1a3ddc5443d6cc9182a9b9bc89f 100644 (file)
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
 #else
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
index 5b0fa553c8bc940e88a6db731cf6dfeb0c9fb971..8071c8fdd15e741b008af64075cda3c87072bfb4 100644 (file)
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        driver = (struct hc_driver *)id->driver_data;
 
+       /* For some HW implementation, a XHCI reset is just not enough... */
+       if (usb_xhci_needs_pci_reset(dev)) {
+               dev_info(&dev->dev, "Resetting\n");
+               if (pci_reset_function_locked(dev))
+                       dev_warn(&dev->dev, "Reset failed");
+       }
+
        /* Prevent runtime suspending between USB-2 and USB-3 initialization */
        pm_runtime_get_noresume(&dev->dev);
 
index 8a428498d6b21f08c8c26ef184ff9f4332b5cdd0..509a61668d902b84f6756e2ed1bcb22a6d7020a5 100644 (file)
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_FILE_MAPPED));
        show_val_kb(m, "Shmem:          ", i.sharedram);
        show_val_kb(m, "Slab:           ",
-                   global_page_state(NR_SLAB_RECLAIMABLE) +
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 
        show_val_kb(m, "SReclaimable:   ",
-                   global_page_state(NR_SLAB_RECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE));
        show_val_kb(m, "SUnreclaim:     ",
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
index b836fd61ed878a38d25d5ffe44bb86e30066955c..fe8f3265e8779ac18a5694ef600c024f9e88f281 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
+#include <linux/uaccess.h>
 
 #include <asm/elf.h>
-#include <linux/uaccess.h>
+#include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        enum clear_refs_types type;
+       struct mmu_gather tlb;
        int itype;
        int rv;
 
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                }
 
                down_read(&mm->mmap_sem);
+               tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(mm, 0, -1);
-               flush_tlb_mm(mm);
+               tlb_finish_mmu(&tlb, 0, -1);
                up_read(&mm->mmap_sem);
 out_mm:
                mmput(mm);
index 06ea26b8c996f3cc7a9d6fd177260f89394fb325..b0d5897bc4e6d0e019c79f65b6d41df1d3b0d050 100644 (file)
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                                   uffdio_copy.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
                return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                                     uffdio_zeropage.range.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
                return -EFAULT;
index ceef77c0416ad5833c2b513006496c8d57a4c62f..ff48f00968100df0de830892f0c9aed7bca6d74d 100644 (file)
@@ -874,7 +874,6 @@ xfs_ialloc(
        case S_IFREG:
        case S_IFDIR:
                if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
-                       uint64_t        di_flags2 = 0;
                        uint            di_flags = 0;
 
                        if (S_ISDIR(mode)) {
@@ -911,20 +910,23 @@ xfs_ialloc(
                                di_flags |= XFS_DIFLAG_NODEFRAG;
                        if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
                                di_flags |= XFS_DIFLAG_FILESTREAM;
-                       if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
-                               di_flags2 |= XFS_DIFLAG2_DAX;
 
                        ip->i_d.di_flags |= di_flags;
-                       ip->i_d.di_flags2 |= di_flags2;
                }
                if (pip &&
                    (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
                    pip->i_d.di_version == 3 &&
                    ip->i_d.di_version == 3) {
+                       uint64_t        di_flags2 = 0;
+
                        if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
-                               ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+                               di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
                                ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
                        }
+                       if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
+                               di_flags2 |= XFS_DIFLAG2_DAX;
+
+                       ip->i_d.di_flags2 |= di_flags2;
                }
                /* FALLTHROUGH */
        case S_IFLNK:
index fbe72b134bef219e80420063bfe2db2abc594e49..43aa42a3a5d319fffff98417bff98055083f1a75 100644 (file)
@@ -539,6 +539,7 @@ xlog_discard_endio(
 
        INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
        queue_work(xfs_discard_wq, &ctx->discard_endio_work);
+       bio_put(bio);
 }
 
 static void
index 8afa4335e5b2bfd0c42c00e1b1506d4e1f7377ac..faddde44de8c902e6884e64eeb8b22bd0d11b75a 100644 (file)
@@ -112,10 +112,11 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+       struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
-                                                       unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                        unsigned long start, unsigned long end, bool force);
 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
                                   int page_size);
 
index b56573bf440db4b85f8f11f678edb9e28d4e6cd8..82b30e638430fa6e9b3b410feb84bc79a4bf2b13 100644 (file)
@@ -39,8 +39,6 @@ enum cpuhp_state {
        CPUHP_PCI_XGENE_DEAD,
        CPUHP_IOMMU_INTEL_DEAD,
        CPUHP_LUSTRE_CFS_DEAD,
-       CPUHP_SCSI_BNX2FC_DEAD,
-       CPUHP_SCSI_BNX2I_DEAD,
        CPUHP_WORKQUEUE_PREP,
        CPUHP_POWER_NUMA_PREPARE,
        CPUHP_HRTIMERS_PREPARE,
index 723cd54b94da84f95cd18934d14198b6d21040dd..beabdbc0842059b36a2c6b22b882e04ddc968f75 100644 (file)
@@ -843,7 +843,7 @@ struct dev_links_info {
  *             hibernation, system resume and during runtime PM transitions
  *             along with subsystem-level and driver-level callbacks.
  * @pins:      For device pin management.
- *             See Documentation/pinctrl.txt for details.
+ *             See Documentation/driver-api/pinctl.rst for details.
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
index 00ca5b86a753f8023cad87ce02cbe90748255383..d501d3956f13f041864dc25f0d7e8724ea2b5210 100644 (file)
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
 #define I2C_CLASS_DDC          (1<<3)  /* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD          (1<<7)  /* Memory modules */
-#define I2C_CLASS_DEPRECATED   (1<<8)  /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED   (1<<8)
 
 /* Internal numbers to terminate lists */
 #define I2C_CLIENT_END         0xfffeU
index aad5d81dfb444aeb0dcb4b92aef475023aa897c1..b54517c05e9ab20fff33e3526fe0ea8de1e702bb 100644 (file)
@@ -620,6 +620,7 @@ struct mlx4_caps {
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
        u32                     vf_caps;
+       bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
 };
 
index 6f41270d80c03128bdeb60e5c6fc1b6ca2b5fe54..f378dc0e7eaf4db75eab8606f03df4e9269602e4 100644 (file)
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
 
 enum {
        MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
index 7f384bb62d8ec6bc7eafa25828b0716be63c7ccb..3cadee0a350889f748e7b1a999b449ae003e9c3f 100644 (file)
@@ -487,14 +487,12 @@ struct mm_struct {
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
        /*
         * An operation with batched TLB flushing is going on. Anything that
         * can move process memory needs to flush the TLB when moving a
         * PROT_NONE or PROT_NUMA mapped page.
         */
-       bool tlb_flush_pending;
-#endif
+       atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        /* See flush_tlb_batched_pending() */
        bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+                               unsigned long start, unsigned long end);
+
 /*
  * Memory barriers to keep this state in sync are graciously provided by
  * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
  */
 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 {
-       barrier();
-       return mm->tlb_flush_pending;
+       return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+       return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+       atomic_set(&mm->tlb_flush_pending, 0);
 }
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 {
-       mm->tlb_flush_pending = true;
+       atomic_inc(&mm->tlb_flush_pending);
 
        /*
-        * Guarantee that the tlb_flush_pending store does not leak into the
+        * Guarantee that the tlb_flush_pending increase does not leak into the
         * critical section updating the page tables
         */
        smp_mb__before_spinlock();
 }
+
 /* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
-       barrier();
-       mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
-       return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 {
+       /*
+        * Guarantee that the tlb_flush_pending does not not leak into the
+        * critical section, since we must order the PTE change and changes to
+        * the pending TLB flush indication. We could have relied on TLB flush
+        * as a memory barrier, but this behavior is not clearly documented.
+        */
+       smp_mb__before_atomic();
+       atomic_dec(&mm->tlb_flush_pending);
 }
-#endif
 
 struct vm_fault;
 
index 892148c448cce2c9c9e9e59b2d44941283635cbd..5216d2eb22891010187b86a5911a10b17aedf273 100644 (file)
@@ -681,10 +681,10 @@ struct nand_buffers {
  * @tWW_min: WP# transition to WE# low
  */
 struct nand_sdr_timings {
-       u32 tBERS_max;
+       u64 tBERS_max;
        u32 tCCS_min;
-       u32 tPROG_max;
-       u32 tR_max;
+       u64 tPROG_max;
+       u64 tR_max;
        u32 tALH_min;
        u32 tADL_min;
        u32 tALS_min;
index 4869e66dd659a6bc8fe4ad90df2ed9d3ff98ccac..a75c136738529db410baf870f3baafc6e178a5a0 100644 (file)
@@ -1067,6 +1067,7 @@ void pcie_flr(struct pci_dev *dev);
 int __pci_reset_function(struct pci_dev *dev);
 int __pci_reset_function_locked(struct pci_dev *dev);
 int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
 int pci_try_reset_function(struct pci_dev *dev);
 int pci_probe_reset_slot(struct pci_slot *slot);
 int pci_reset_slot(struct pci_slot *slot);
index 231d3075815adfa63d462a236e66f214c3216117..e91d1b6a260d5996583a4365d4020524ad57700a 100644 (file)
@@ -81,8 +81,8 @@
  *     it.
  * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
  *     value on the line. Use argument 1 to indicate high level, argument 0 to
- *     indicate low level. (Please see Documentation/pinctrl.txt, section
- *     "GPIO mode pitfalls" for a discussion around this parameter.)
+ *     indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ *     section "GPIO mode pitfalls" for a discussion around this parameter.)
  * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
  *     supplies, the argument to this parameter (on a custom format) tells
  *     the driver which alternative power source to use.
index a026bfd089db81191b5a322979d4bf58c2777dc5..51349d124ee5d47ded05a433028d765858c61f00 100644 (file)
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
  *            parameter func: the desired function to use.
  *            parameter chan: the function channel index to use.
  *
+ * @do_work:  Request driver to perform auxiliary (periodic) operations
+ *           Driver should return delay of the next auxiliary work scheduling
+ *           time (>=0) or negative value in case further scheduling
+ *           is not required.
+ *
  * Drivers should embed their ptp_clock_info within a private
  * structure, obtaining a reference to it using container_of().
  *
@@ -126,6 +131,7 @@ struct ptp_clock_info {
                      struct ptp_clock_request *request, int on);
        int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
                      enum ptp_pin_function func, unsigned int chan);
+       long (*do_aux_work)(struct ptp_clock_info *ptp);
 };
 
 struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
 int ptp_find_pin(struct ptp_clock *ptp,
                 enum ptp_pin_function func, unsigned int chan);
 
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ * @delay:  number of jiffies to wait before queuing
+ *          See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
 #else
 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                                                   struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
 static inline int ptp_find_pin(struct ptp_clock *ptp,
                               enum ptp_pin_function func, unsigned int chan)
 { return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+                                     unsigned long delay)
+{ return -EOPNOTSUPP; }
+
 #endif
 
 #endif
index 70483296157f87acdf5acd5e96eaa910119ba220..ada65e767b28dfcabb662a7b08f65c6fc04f5b73 100644 (file)
@@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                             u64 xmit_time);
 extern void tcp_rack_reo_timeout(struct sock *sk);
 
+/* At how many usecs into the future should the RTO fire? */
+static inline s64 tcp_rto_delta_us(const struct sock *sk)
+{
+       const struct sk_buff *skb = tcp_write_queue_head(sk);
+       u32 rto = inet_csk(sk)->icsk_rto;
+       u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+
+       return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+}
+
 /*
  * Save and compile IPv4 options, return a pointer to it
  */
index 17921b0390b4f91113bcf8c9ccac5c1225751460..e075b7780421dee1d8243b9dc178248398c5f189 100644 (file)
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_init_aio(mm);
        mm_init_owner(mm, p);
        mmu_notifier_mm_init(mm);
-       clear_tlb_flush_pending(mm);
+       init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
        mm->pmd_huge_pte = NULL;
 #endif
index 16dbe4c938953a70a49faf0a5264af8c19a9491f..f50b434756c18eb0c200ec6e3b4db16231062f24 100644 (file)
@@ -670,13 +670,14 @@ again:
                 * this reference was taken by ihold under the page lock
                 * pinning the inode in place so i_lock was unnecessary. The
                 * only way for this check to fail is if the inode was
-                * truncated in parallel so warn for now if this happens.
+                * truncated in parallel which is almost certainly an
+                * application bug. In such a case, just retry.
                 *
                 * We are not calling into get_futex_key_refs() in file-backed
                 * cases, therefore a successful atomic_inc return below will
                 * guarantee that get_futex_key() will still imply smp_mb(); (B).
                 */
-               if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+               if (!atomic_inc_not_zero(&inode->i_count)) {
                        rcu_read_unlock();
                        put_page(page);
 
index 222317721c5a09291c6b78fc839e722b2196b177..0972a8e09d082d99c7f197cbe6bd4fdb6475ba33 100644 (file)
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 {
        unsigned long size;
 
-       size = global_page_state(NR_SLAB_RECLAIMABLE)
+       size = global_node_page_state(NR_SLAB_RECLAIMABLE)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
index 7d315fdb9f13d9b17d8a2aa129c75790c7599bdb..cf7b129b0b2b08adcc1aae98f990c384761532dc 100644 (file)
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
        if (in_task()) {
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
 
-               if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
-                       goto fail;
+               if (fail_nth) {
+                       if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+                               goto fail;
 
-               return false;
+                       return false;
+               }
        }
 
        /* No need to check any other properties if the probability is 0 */
index 6c1d678bcf8b00ff7b2d2fc70747045e6c14327a..ff9148969b9233ba7502b992b026d31e100be497 100644 (file)
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_driver);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "driver:\tEMTPY\n");
+                               "driver:\tEMPTY\n");
 
        if (config->test_fs)
                len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_fs);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "fs:\tEMTPY\n");
+                               "fs:\tEMPTY\n");
 
        mutex_unlock(&test_dev->config_mutex);
 
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
                                                      strlen(test_str));
                break;
        case TEST_KMOD_FS_TYPE:
-               break;
                kfree_const(config->test_fs);
                config->test_driver = NULL;
                copied = config_copy_test_fs(config, test_str,
                                             strlen(test_str));
+               break;
        default:
                mutex_unlock(&test_dev->config_mutex);
                return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
                                            int (*test_sync)(struct kmod_test_device *test_dev))
 {
        int ret;
-       long new;
+       unsigned long new;
        unsigned int old_val;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
                                             unsigned int max)
 {
        int ret;
-       long new;
+       unsigned long new;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
        struct kmod_test_device *test_dev = NULL;
        int ret;
 
-       mutex_unlock(&reg_dev_mutex);
+       mutex_lock(&reg_dev_mutex);
 
        /* int should suffice for number of devices, test for wrap */
        if (unlikely(num_test_devs + 1) < 0) {
index 9075aa54e95517cdbb1094f04e72c36357401e52..b06d9fe23a28c14f71c3263daaa84965dadeee45 100644 (file)
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 {
        unsigned long flags;
        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
-                               __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
+                                      __GFP_NOMEMALLOC | __GFP_NORETRY);
        if (!page)
                return NULL;
 
index db1cd26d8752022b7f8b576cdff78f5412209d39..5715448ab0b53db5d8bd4b64d47706f7deaaf7a6 100644 (file)
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
                "tlb_flush_pending %d\n"
-#endif
                "def_flags: %#lx(%pGv)\n",
 
                mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-               mm->tlb_flush_pending,
-#endif
+               atomic_read(&mm->tlb_flush_pending),
                mm->def_flags, &mm->def_flags
        );
 }
index 86975dec0ba160feadfb8aa0d13b8f2be943638d..216114f6ef0b7f8c09378edd3615d6a39527ead0 100644 (file)
@@ -1495,6 +1495,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                goto clear_pmdnuma;
        }
 
+       /*
+        * The page_table_lock above provides a memory barrier
+        * with change_protection_range.
+        */
+       if (mm_tlb_flush_pending(vma->vm_mm))
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and access rights restored.
index a1a0ac0ad6f67ad479916fcbc43036973ddca824..31e207cb399bebd11371e46eb26f625a5b74487c 100644 (file)
@@ -4062,9 +4062,9 @@ out:
        return ret;
 out_release_unlock:
        spin_unlock(ptl);
-out_release_nounlock:
        if (vm_shared)
                unlock_page(page);
+out_release_nounlock:
        put_page(page);
        goto out;
 }
index 4dc92f138786988c4ef0f9d371ff8a48b2e6e905..db20f8436bc3c15bf05f86ccec5e7b1f80d807cc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                goto out_unlock;
 
        if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
+           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
+                                               mm_tlb_flush_pending(mm)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
index f65beaad319be4c597f9a071771e5f376234d753..e158f7ac67300b10b8827fe6825667506095f550 100644 (file)
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
        return true;
 }
 
-/* tlb_gather_mmu
- *     Called to initialize an (on-stack) mmu_gather structure for page-table
- *     tear-down from @mm. The @fullmm argument is used when @mm is without
- *     users and we're going to destroy the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
@@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
        struct mmu_gather_batch *batch, *next;
 
+       if (force)
+               __tlb_adjust_range(tlb, start, end - start);
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
+{
+       arch_tlb_gather_mmu(tlb, mm, start, end);
+       inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end)
+{
+       /*
+        * If there are parallel threads are doing PTE changes on same range
+        * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+        * flush by batching, a thread has stable TLB entry can fail to flush
+        * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+        * forcefully if we detect parallel PTE batching threads.
+        */
+       bool force = mm_tlb_flush_nested(tlb->mm);
+
+       arch_tlb_finish_mmu(tlb, start, end, force);
+       dec_tlb_flush_pending(tlb->mm);
+}
+
 /*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
index 62767155187356d54d1fa7333ad402e76183ca0b..d68a41da6abb0743d6b09cc49c5c9524463715c3 100644 (file)
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-       /*
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (mm_tlb_flush_pending(mm))
-               flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
index 4180ad8cc9c5e70c661efc8f30416af40e9c0066..bd0f409922cb2fc133f9fecba64a839380d4f937 100644 (file)
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       set_tlb_flush_pending(mm);
+       inc_tlb_flush_pending(mm);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        /* Only flush the TLB if we actually modified any entries: */
        if (pages)
                flush_tlb_range(vma, start, end);
-       clear_tlb_flush_pending(mm);
+       dec_tlb_flush_pending(mm);
 
        return pages;
 }
index fc32aa81f3593537cc2b11d5f63b5c5f517097a4..6d00f746c2fd96452661fde3f704289eed7f1f70 100644 (file)
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
         * Part of the reclaimable slab consists of items that are in use,
         * and cannot be freed. Cap this estimate at the low watermark.
         */
-       available += global_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+                        wmark_low);
 
        if (available < 0)
                available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_FILE_DIRTY),
                global_node_page_state(NR_WRITEBACK),
                global_node_page_state(NR_UNSTABLE_NFS),
-               global_page_state(NR_SLAB_RECLAIMABLE),
-               global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_node_page_state(NR_SLAB_RECLAIMABLE),
+               global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
@@ -7668,7 +7669,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
index c8993c63eb259b3a5302a058ce231d1290fc9b66..c1286d47aa1fad7fee7ea5bb865a2dc7efd672f2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                .flags = PVMW_SYNC,
        };
        int *cleaned = arg;
+       bool invalidation_needed = false;
 
        while (page_vma_mapped_walk(&pvmw)) {
                int ret = 0;
-               address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pte_dirty(*pte) && !pte_write(*pte))
                                continue;
 
-                       flush_cache_page(vma, address, pte_pfn(*pte));
-                       entry = ptep_clear_flush(vma, address, pte);
+                       flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, pvmw.address, pte);
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
-                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
-                       flush_cache_page(vma, address, page_to_pfn(page));
-                       entry = pmdp_huge_clear_flush(vma, address, pmd);
+                       flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
-                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                }
 
                if (ret) {
-                       mmu_notifier_invalidate_page(vma->vm_mm, address);
                        (*cleaned)++;
+                       invalidation_needed = true;
                }
        }
 
+       if (invalidation_needed) {
+               mmu_notifier_invalidate_range(vma->vm_mm, address,
+                               address + (1UL << compound_order(page)));
+       }
+
        return true;
 }
 
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        };
        pte_t pteval;
        struct page *subpage;
-       bool ret = true;
+       bool ret = true, invalidation_needed = false;
        enum ttu_flags flags = (enum ttu_flags)arg;
 
        /* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(!pvmw.pte, page);
 
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
 
                if (!(flags & TTU_IGNORE_ACCESS)) {
-                       if (ptep_clear_flush_young_notify(vma, address,
+                       if (ptep_clear_flush_young_notify(vma, pvmw.address,
                                                pvmw.pte)) {
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+               flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
                if (should_defer_flush(mm, flags)) {
                        /*
                         * We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * transition on a cached TLB entry is written through
                         * and traps if the PTE is unmapped.
                         */
-                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+                       pteval = ptep_get_and_clear(mm, pvmw.address,
+                                                   pvmw.pte);
 
                        set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
                }
 
                /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (PageHuge(page)) {
                                int nr = 1 << compound_order(page);
                                hugetlb_count_sub(nr, mm);
-                               set_huge_swap_pte_at(mm, address,
+                               set_huge_swap_pte_at(mm, pvmw.address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
                        } else {
                                dec_mm_counter(mm, mm_counter(page));
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                        }
 
                } else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * If the page was redirtied, it cannot be
                                 * discarded. Remap the page to page table.
                                 */
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                SetPageSwapBacked(page);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        }
 
                        if (swap_duplicate(entry) < 0) {
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else
                        dec_mm_counter(mm, mm_counter_file(page));
 discard:
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_page(mm, address);
+               invalidation_needed = true;
        }
+
+       if (invalidation_needed)
+               mmu_notifier_invalidate_range(mm, address,
+                               address + (1UL << compound_order(page)));
        return ret;
 }
 
index b0aa6075d164df9ae4766876cc823394abaebc6d..6540e598244412023db650412062604b704b58b3 100644 (file)
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                         */
                        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                spin_lock(&sbinfo->shrinklist_lock);
-                               if (list_empty(&info->shrinklist)) {
+                               /*
+                                * _careful to defend against unlocked access to
+                                * ->shrink_list in shmem_unused_huge_shrink()
+                                */
+                               if (list_empty_careful(&info->shrinklist)) {
                                        list_add_tail(&info->shrinklist,
                                                        &sbinfo->shrinklist);
                                        sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge:            page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
                         * to shrink under memory pressure.
                         */
                        spin_lock(&sbinfo->shrinklist_lock);
-                       if (list_empty(&info->shrinklist)) {
+                       /*
+                        * _careful to defend against unlocked access to
+                        * ->shrink_list in shmem_unused_huge_shrink()
+                        */
+                       if (list_empty_careful(&info->shrinklist)) {
                                list_add_tail(&info->shrinklist,
                                                &sbinfo->shrinklist);
                                sbinfo->shrinklist_len++;
index 7b07ec852e01fa931b2b302e8df5cff9f17f62d6..9ecddf568fe30e5cf1fba6db8eda3b7abe96d379 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += global_page_state(NR_SLAB_RECLAIMABLE);
+               free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
index e1133bc634b5e8ed9a4639677e577a0d52e7c1d5..8a3ce79b1307b7f260ce2f64e96bdacfb9a322f0 100644 (file)
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
        return found;
 }
 
+/**
+ * batadv_tt_global_sync_flags - update TT sync flags
+ * @tt_global: the TT global entry to update sync flags in
+ *
+ * Updates the sync flag bits in the tt_global flag attribute with a logical
+ * OR of all sync flags from any of its TT orig entries.
+ */
+static void
+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
+{
+       struct batadv_tt_orig_list_entry *orig_entry;
+       const struct hlist_head *head;
+       u16 flags = BATADV_NO_FLAGS;
+
+       rcu_read_lock();
+       head = &tt_global->orig_list;
+       hlist_for_each_entry_rcu(orig_entry, head, list)
+               flags |= orig_entry->flags;
+       rcu_read_unlock();
+
+       flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
+       tt_global->common.flags = flags;
+}
+
+/**
+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * @tt_global: the TT global entry to add an orig entry in
+ * @orig_node: the originator to add an orig entry for
+ * @ttvn: translation table version number of this changeset
+ * @flags: TT sync flags
+ */
 static void
 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
-                               struct batadv_orig_node *orig_node, int ttvn)
+                               struct batadv_orig_node *orig_node, int ttvn,
+                               u8 flags)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
                 * was added during a "temporary client detection"
                 */
                orig_entry->ttvn = ttvn;
-               goto out;
+               orig_entry->flags = flags;
+               goto sync_flags;
        }
 
        orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
+       orig_entry->flags = flags;
        kref_init(&orig_entry->refcount);
 
        spin_lock_bh(&tt_global->list_lock);
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        spin_unlock_bh(&tt_global->list_lock);
        atomic_inc(&tt_global->orig_list_count);
 
+sync_flags:
+       batadv_tt_global_sync_flags(tt_global);
 out:
        if (orig_entry)
                batadv_tt_orig_list_entry_put(orig_entry);
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                }
 
                /* the change can carry possible "attribute" flags like the
-                * TT_CLIENT_WIFI, therefore they have to be copied in the
+                * TT_CLIENT_TEMP, therefore they have to be copied in the
                 * client entry
                 */
-               common->flags |= flags;
+               common->flags |= flags & (~BATADV_TT_SYNC_MASK);
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
        }
 add_orig_entry:
        /* add the new orig_entry (if needed) or update it */
-       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
+       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
+                                       flags & BATADV_TT_SYNC_MASK);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
                               struct batadv_tt_orig_list_entry *orig,
                               bool best)
 {
+       u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
        void *hdr;
        struct batadv_orig_node_vlan *vlan;
        u8 last_ttvn;
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
            nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
            nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
-           nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+           nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
                goto nla_put_failure;
 
        if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                unsigned short vid)
 {
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+       struct batadv_tt_orig_list_entry *tt_orig;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_head *head;
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        /* find out if this global entry is announced by this
                         * originator
                         */
-                       if (!batadv_tt_global_entry_has_orig(tt_global,
-                                                            orig_node))
+                       tt_orig = batadv_tt_global_orig_entry_find(tt_global,
+                                                                  orig_node);
+                       if (!tt_orig)
                                continue;
 
                        /* use network order to read the VID: this ensures that
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        /* compute the CRC on flags that have to be kept in sync
                         * among nodes
                         */
-                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       flags = tt_orig->flags;
                        crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
 
                        crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+
+                       batadv_tt_orig_list_entry_put(tt_orig);
                }
                rcu_read_unlock();
        }
index ea43a64492479809fe6bdf95b436792078f50e9f..a62795868794103d7e712ba91def5997dc3a5779 100644 (file)
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
  * @orig_node: pointer to orig node announcing this non-mesh client
  * @ttvn: translation table version number which added the non-mesh client
+ * @flags: per orig entry TT sync flags
  * @list: list node for batadv_tt_global_entry::orig_list
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
 struct batadv_tt_orig_list_entry {
        struct batadv_orig_node *orig_node;
        u8 ttvn;
+       u8 flags;
        struct hlist_node list;
        struct kref refcount;
        struct rcu_head rcu;
index 8515f8fe0460ae08e08e269a47524e0738714626..ce15a06d5558af0292cc739b42a7dc3c1d89428d 100644 (file)
@@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 {
        if (tx_path)
                return skb->ip_summed != CHECKSUM_PARTIAL &&
-                      skb->ip_summed != CHECKSUM_NONE;
+                      skb->ip_summed != CHECKSUM_UNNECESSARY;
 
        return skb->ip_summed == CHECKSUM_NONE;
 }
index 76c2077c3f5b697bf8e0d4b030b70dde8fc70345..2e548eca34898f51316275c918bb1f0f4a63526e 100644 (file)
@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 #endif
 
+       /* Some igmp sysctl, whose values are always used */
+       net->ipv4.sysctl_igmp_max_memberships = 20;
+       net->ipv4.sysctl_igmp_max_msf = 10;
+       /* IGMP reports for link-local multicast groups are enabled by default */
+       net->ipv4.sysctl_igmp_llm_reports = 1;
+       net->ipv4.sysctl_igmp_qrv = 2;
+
        return 0;
 }
 
index c4c6e1969ed0606ff9fb4ea46609f75b249e589b..2ae8f54cb32148f2499f78ecbf29259db36bd207 100644 (file)
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
        int taglen;
 
        for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
-               if (optptr[0] == IPOPT_CIPSO)
+               switch (optptr[0]) {
+               case IPOPT_CIPSO:
                        return optptr;
-               taglen = optptr[1];
+               case IPOPT_END:
+                       return NULL;
+               case IPOPT_NOOP:
+                       taglen = 1;
+                       break;
+               default:
+                       taglen = optptr[1];
+               }
                optlen -= taglen;
                optptr += taglen;
        }
index 8e0257d0120097770e37017439684a2345619f5e..1540db65241a6fd4d96b00546f13a3e3d3cd1815 100644 (file)
@@ -450,6 +450,7 @@ out_unlock:
 out:
        NAPI_GRO_CB(skb)->flush |= flush;
        skb_gro_remcsum_cleanup(skb, &grc);
+       skb->remcsum_offload = 0;
 
        return pp;
 }
index 28f14afd0dd3a392da3b84c5e791fffaf46ad254..498706b072fb70e1ffe6b5dba817816db5a4cfa7 100644 (file)
@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
                goto out_sock;
        }
 
-       /* Sysctl initialization */
-       net->ipv4.sysctl_igmp_max_memberships = 20;
-       net->ipv4.sysctl_igmp_max_msf = 10;
-       /* IGMP reports for link-local multicast groups are enabled by default */
-       net->ipv4.sysctl_igmp_llm_reports = 1;
-       net->ipv4.sysctl_igmp_qrv = 2;
        return 0;
 
 out_sock:
index 50c74cd890bc79ed6c85c958c5397d833e9aa74a..e153c40c2436109d4bca4a9caf34b90cbf000cd9 100644 (file)
@@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
@@ -1288,6 +1289,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EINVAL;
 
        if ((size + skb->len > mtu) &&
+           (skb_queue_len(&sk->sk_write_queue) == 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
                if (skb->ip_summed != CHECKSUM_PARTIAL)
index 2920e0cb09f8d3e743eb4f49c16060ba1af48ed4..53de1424c13cda5d1fec826b97cacf4f95adc99a 100644 (file)
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define FLAG_ORIG_SACK_ACKED   0x200 /* Never retransmitted data are (s)acked  */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
 #define FLAG_DSACKING_ACK      0x800 /* SACK blocks contained D-SACK info */
+#define FLAG_SET_XMIT_TIMER    0x1000 /* Set TLP or RTO timer */
 #define FLAG_SACK_RENEGING     0x2000 /* snd_una advanced to a sacked seq */
 #define FLAG_UPDATE_TS_RECENT  0x4000 /* tcp_replace_ts_recent() */
 #define FLAG_NO_CHALLENGE_ACK  0x8000 /* do not call tcp_send_challenge_ack()  */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
                return;
 
        /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
-       if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
-           (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+       if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+           (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
                tp->snd_cwnd = tp->snd_ssthresh;
                tp->snd_cwnd_stamp = tcp_jiffies32;
        }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
                /* Offset the time elapsed after installing regular RTO */
                if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
                    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
-                       struct sk_buff *skb = tcp_write_queue_head(sk);
-                       u64 rto_time_stamp = skb->skb_mstamp +
-                                            jiffies_to_usecs(rto);
-                       s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
+                       s64 delta_us = tcp_rto_delta_us(sk);
                        /* delta_us may not be positive if the socket is locked
                         * when the retrans timer fires and is rescheduled.
                         */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
        }
 }
 
+/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
+static void tcp_set_xmit_timer(struct sock *sk)
+{
+       if (!tcp_schedule_loss_probe(sk))
+               tcp_rearm_rto(sk);
+}
+
 /* If we get here, the whole TSO packet has not been acked. */
 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
 {
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                        ca_rtt_us, sack->rate);
 
        if (flag & FLAG_ACKED) {
-               tcp_rearm_rto(sk);
+               flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
                if (unlikely(icsk->icsk_mtup.probe_size &&
                             !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
                        tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
                 */
-               tcp_rearm_rto(sk);
+               flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
        }
 
        if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (after(ack, tp->snd_nxt))
                goto invalid_ack;
 
-       if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
-               tcp_rearm_rto(sk);
-
        if (after(ack, prior_snd_una)) {
                flag |= FLAG_SND_UNA_ADVANCED;
                icsk->icsk_retransmits = 0;
@@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
                                    &sack_state);
 
+       if (tp->tlp_high_seq)
+               tcp_process_tlp_ack(sk, ack, flag);
+       /* If needed, reset TLP/RTO timer; RACK may later override this. */
+       if (flag & FLAG_SET_XMIT_TIMER)
+               tcp_set_xmit_timer(sk);
+
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        }
-       if (tp->tlp_high_seq)
-               tcp_process_tlp_ack(sk, ack, flag);
 
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
                sk_dst_confirm(sk);
 
-       if (icsk->icsk_pending == ICSK_TIME_RETRANS)
-               tcp_schedule_loss_probe(sk);
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
        tcp_rate_gen(sk, delivered, lost, sack_state.rate);
index 2f1588bf73dad9b34aebee45ce738a7e9a4515ae..b7661a68d4984c485a4853441d21abe8da9e325a 100644 (file)
@@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 timeout, tlp_time_stamp, rto_time_stamp;
        u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
+       u32 timeout, rto_delta_us;
 
-       /* No consecutive loss probes. */
-       if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
-               tcp_rearm_rto(sk);
-               return false;
-       }
        /* Don't do any loss probe on a Fast Open connection before 3WHS
         * finishes.
         */
        if (tp->fastopen_rsk)
                return false;
 
-       /* TLP is only scheduled when next timer event is RTO. */
-       if (icsk->icsk_pending != ICSK_TIME_RETRANS)
-               return false;
-
        /* Schedule a loss probe in 2*RTT for SACK capable connections
         * in Open state, that are either limited by cwnd or application.
         */
@@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
                                (rtt + (rtt >> 1) + TCP_DELACK_MAX));
        timeout = max_t(u32, timeout, msecs_to_jiffies(10));
 
-       /* If RTO is shorter, just schedule TLP in its place. */
-       tlp_time_stamp = tcp_jiffies32 + timeout;
-       rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
-       if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
-               s32 delta = rto_time_stamp - tcp_jiffies32;
-               if (delta > 0)
-                       timeout = delta;
-       }
+       /* If the RTO formula yields an earlier time, then use that time. */
+       rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
+       if (rto_delta_us > 0)
+               timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
                                  TCP_RTO_MAX);
@@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
        int err;
 
        tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
+
+       if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
+               return -EHOSTUNREACH; /* Routing failure or similar. */
+
        tcp_connect_init(sk);
 
        if (unlikely(tp->repair)) {
index c0feeeef962aa31401ee90f8bd015c2aae2ef932..e906014890b64ef6a2bfe022e17358bd9659d204 100644 (file)
@@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
                goto death;
        }
 
-       if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
+       if (!sock_flag(sk, SOCK_KEEPOPEN) ||
+           ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
                goto out;
 
        elapsed = keepalive_time_when(tp);
index e6276fa3750b909615668fddf84495369bd7d369..a7c804f73990a0610bc85c02fc2dd76858973c22 100644 (file)
@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
        if (is_udplite)                                  /*     UDP-Lite      */
                csum = udplite_csum(skb);
 
-       else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
+       else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
index 781250151d40ee4559f7b90d15dccad8ffaeafd0..0932c85b42af0bc868badd1771b5cb9353c969a9 100644 (file)
@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
-       skb->ip_summed = CHECKSUM_NONE;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        /* If there is no outer header we can fake a checksum offload
         * due to the fact that we have already done the checksum in
index 162efba0d0cd851848363588318cf6ade4a5a62c..2dfe50d8d609a7a623edacbe40e93022dfac685e 100644 (file)
@@ -1381,11 +1381,12 @@ emsgsize:
         */
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : headersize)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : headersize)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen, exthdrlen,
                                          transhdrlen, mtu, flags, fl6);
index 4d30c96a819dee548ec34704a34b22774fac5da1..a640fbcba15dbf246e419d3e03da8eca0fa6901a 100644 (file)
@@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
+       nrt->rt6i_protocol = RTPROT_REDIRECT;
        nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 
        if (ip6_ins_rt(nrt))
@@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
+               .fc_protocol = RTPROT_RA,
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = net,
@@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
                                  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
+               .fc_protocol = RTPROT_RA,
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = dev_net(dev),
@@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
        rtm->rtm_flags = 0;
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
-       if (rt->rt6i_flags & RTF_DYNAMIC)
-               rtm->rtm_protocol = RTPROT_REDIRECT;
-       else if (rt->rt6i_flags & RTF_ADDRCONF) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
-                       rtm->rtm_protocol = RTPROT_RA;
-               else
-                       rtm->rtm_protocol = RTPROT_KERNEL;
-       }
 
        if (rt->rt6i_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
index a2267f80febbb6f31459097f27bd89d51d0f2b11..e7d378c032cb6ebe80323db987ca201e5ae2d845 100644 (file)
@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
 
-               skb->ip_summed = CHECKSUM_NONE;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                /* If there is no outer header we can fake a checksum offload
                 * due to the fact that we have already done the checksum in
index 0615c2a950fab992134d0071707b5b336f6fb231..008a45ca31124ed5fa54d666fce61c7982b12a2f 100644 (file)
@@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val > INT_MAX)
                        return -EINVAL;
-               po->tp_reserve = val;
-               return 0;
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_reserve = val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_LOSS:
        {
index e10624aa6959b596a2629a9f18bb25504428545f..9722bf839d9dec7fc7c7bed5cca0818389b245ba 100644 (file)
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
        if (rds_ib_ring_empty(&ic->i_recv_ring))
                rds_ib_stats_inc(s_ib_rx_ring_empty);
 
-       if (rds_ib_ring_low(&ic->i_recv_ring))
+       if (rds_ib_ring_low(&ic->i_recv_ring)) {
                rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
+               rds_ib_stats_inc(s_ib_rx_refill_from_cq);
+       }
 }
 
 int rds_ib_recv_path(struct rds_conn_path *cp)
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
        if (rds_conn_up(conn)) {
                rds_ib_attempt_ack(ic);
                rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+               rds_ib_stats_inc(s_ib_rx_refill_from_thread);
        }
 
        return ret;
index 36f0ced9e60c03297e195135ca5a8a53d1a3a27b..d516ba8178b8099f5e8e180f2e60e7a61de37811 100644 (file)
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
 static unsigned int xt_net_id;
 static struct tc_action_ops act_xt_ops;
 
-static int ipt_init_target(struct xt_entry_target *t, char *table,
-                          unsigned int hook)
+static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+                          char *table, unsigned int hook)
 {
        struct xt_tgchk_param par;
        struct xt_target *target;
@@ -49,8 +49,9 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
                return PTR_ERR(target);
 
        t->u.kernel.target = target;
+       memset(&par, 0, sizeof(par));
+       par.net       = net;
        par.table     = table;
-       par.entryinfo = NULL;
        par.target    = target;
        par.targinfo  = t->data;
        par.hook_mask = hook;
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
        [TCA_IPT_TARG]  = { .len = sizeof(struct xt_entry_target) },
 };
 
-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
+static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          const struct tc_action_ops *ops, int ovr, int bind)
 {
+       struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
        struct tcf_ipt *ipt;
        struct xt_entry_target *td, *t;
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        if (unlikely(!t))
                goto err2;
 
-       err = ipt_init_target(t, tname, hook);
+       err = ipt_init_target(net, t, tname, hook);
        if (err < 0)
                goto err3;
 
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
                        int bind)
 {
-       struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-       return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
+       return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
+                             bind);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
                       int bind)
 {
-       struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-       return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
+       return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
+                             bind);
 }
 
 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
index aeef8011ac7d82d828289f4085efe3acaa8a3945..9b4dcb6a16b50eefc04167dfdd1e509546b71bf6 100644 (file)
@@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        /* Initiate synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
-               if (!tipc_link_is_up(l)) {
-                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!tipc_link_is_up(l))
                        __tipc_node_link_up(n, bearer_id, xmitq);
-               }
                if (n->state == SELF_UP_PEER_UP) {
                        n->sync_point = syncpt;
                        tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
index 3bd5f4f302354cf8cecd78ae0ab13b9b3bf130ef..bc443201d3ef00ac2b197da0896a186891cff188 100755 (executable)
@@ -18,6 +18,7 @@ my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 use Cwd;
+use File::Find;
 
 my $cur_path = fastgetcwd() . '/';
 my $lk_path = "./";
@@ -58,6 +59,7 @@ my $from_filename = 0;
 my $pattern_depth = 0;
 my $version = 0;
 my $help = 0;
+my $find_maintainer_files = 0;
 
 my $vcs_used = 0;
 
@@ -249,6 +251,7 @@ if (!GetOptions(
                'sections!' => \$sections,
                'fe|file-emails!' => \$file_emails,
                'f|file' => \$from_filename,
+               'find-maintainer-files' => \$find_maintainer_files,
                'v|version' => \$version,
                'h|help|usage' => \$help,
                )) {
@@ -307,36 +310,74 @@ if (!top_of_kernel_tree($lk_path)) {
 
 my @typevalue = ();
 my %keyword_hash;
+my @mfiles = ();
 
-open (my $maint, '<', "${lk_path}MAINTAINERS")
-    or die "$P: Can't open MAINTAINERS: $!\n";
-while (<$maint>) {
-    my $line = $_;
-
-    if ($line =~ m/^([A-Z]):\s*(.*)/) {
-       my $type = $1;
-       my $value = $2;
-
-       ##Filename pattern matching
-       if ($type eq "F" || $type eq "X") {
-           $value =~ s@\.@\\\.@g;       ##Convert . to \.
-           $value =~ s/\*/\.\*/g;       ##Convert * to .*
-           $value =~ s/\?/\./g;         ##Convert ? to .
-           ##if pattern is a directory and it lacks a trailing slash, add one
-           if ((-d $value)) {
-               $value =~ s@([^/])$@$1/@;
+sub read_maintainer_file {
+    my ($file) = @_;
+
+    open (my $maint, '<', "$file")
+       or die "$P: Can't open MAINTAINERS file '$file': $!\n";
+    while (<$maint>) {
+       my $line = $_;
+
+       if ($line =~ m/^([A-Z]):\s*(.*)/) {
+           my $type = $1;
+           my $value = $2;
+
+           ##Filename pattern matching
+           if ($type eq "F" || $type eq "X") {
+               $value =~ s@\.@\\\.@g;       ##Convert . to \.
+               $value =~ s/\*/\.\*/g;       ##Convert * to .*
+               $value =~ s/\?/\./g;         ##Convert ? to .
+               ##if pattern is a directory and it lacks a trailing slash, add one
+               if ((-d $value)) {
+                   $value =~ s@([^/])$@$1/@;
+               }
+           } elsif ($type eq "K") {
+               $keyword_hash{@typevalue} = $value;
            }
-       } elsif ($type eq "K") {
-           $keyword_hash{@typevalue} = $value;
+           push(@typevalue, "$type:$value");
+       } elsif (!(/^\s*$/ || /^\s*\#/)) {
+           $line =~ s/\n$//g;
+           push(@typevalue, $line);
        }
-       push(@typevalue, "$type:$value");
-    } elsif (!/^(\s)*$/) {
-       $line =~ s/\n$//g;
-       push(@typevalue, $line);
     }
+    close($maint);
+}
+
+sub find_is_maintainer_file {
+    my ($file) = $_;
+    return if ($file !~ m@/MAINTAINERS$@);
+    $file = $File::Find::name;
+    return if (! -f $file);
+    push(@mfiles, $file);
 }
-close($maint);
 
+sub find_ignore_git {
+    return grep { $_ !~ /^\.git$/; } @_;
+}
+
+if (-d "${lk_path}MAINTAINERS") {
+    opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
+    my @files = readdir(DIR);
+    closedir(DIR);
+    foreach my $file (@files) {
+       push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
+    }
+}
+
+if ($find_maintainer_files) {
+    find( { wanted => \&find_is_maintainer_file,
+           preprocess => \&find_ignore_git,
+           no_chdir => 1,
+       }, "${lk_path}");
+} else {
+    push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
+}
+
+foreach my $file (@mfiles) {
+    read_maintainer_file("$file");
+}
 
 #
 # Read mail address map
@@ -873,7 +914,7 @@ sub top_of_kernel_tree {
     if (   (-f "${lk_path}COPYING")
        && (-f "${lk_path}CREDITS")
        && (-f "${lk_path}Kbuild")
-       && (-f "${lk_path}MAINTAINERS")
+       && (-e "${lk_path}MAINTAINERS")
        && (-f "${lk_path}Makefile")
        && (-f "${lk_path}README")
        && (-d "${lk_path}Documentation")
index a0fe34349b24fccd9fb9b8834ea51d3e2d9e155a..e40b53db7f9fdc7c8e3f7094f8b862d66ef7651f 100644 (file)
@@ -2,9 +2,9 @@
 
 use strict;
 
-my %map;
+my $P = $0;
 
-# sort comparison function
+# sort comparison functions
 sub by_category($$) {
     my ($a, $b) = @_;
 
@@ -15,20 +15,33 @@ sub by_category($$) {
     $a =~ s/THE REST/ZZZZZZ/g;
     $b =~ s/THE REST/ZZZZZZ/g;
 
-    $a cmp $b;
+    return $a cmp $b;
 }
 
-sub alpha_output {
-    my $key;
-    my $sort_method = \&by_category;
-    my $sep = "";
-
-    foreach $key (sort $sort_method keys %map) {
-        if ($key ne " ") {
-            print $sep . $key . "\n";
-            $sep = "\n";
-        }
-        print $map{$key};
+sub by_pattern($$) {
+    my ($a, $b) = @_;
+    my $preferred_order = 'MRPLSWTQBCFXNK';
+
+    my $a1 = uc(substr($a, 0, 1));
+    my $b1 = uc(substr($b, 0, 1));
+
+    my $a_index = index($preferred_order, $a1);
+    my $b_index = index($preferred_order, $b1);
+
+    $a_index = 1000 if ($a_index == -1);
+    $b_index = 1000 if ($b_index == -1);
+
+    if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||
+       ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
+       return $a cmp $b;
+    }
+
+    if ($a_index < $b_index) {
+       return -1;
+    } elsif ($a_index == $b_index) {
+       return 0;
+    } else {
+       return 1;
     }
 }
 
@@ -39,39 +52,77 @@ sub trim {
     return $s;
 }
 
+sub alpha_output {
+    my ($hashref, $filename) = (@_);
+
+    open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";
+    foreach my $key (sort by_category keys %$hashref) {
+       if ($key eq " ") {
+           chomp $$hashref{$key};
+           print $file $$hashref{$key};
+       } else {
+           print $file "\n" . $key . "\n";
+           foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
+               print $file ($pattern . "\n");
+           }
+       }
+    }
+    close($file);
+}
+
 sub file_input {
+    my ($hashref, $filename) = (@_);
+
     my $lastline = "";
     my $case = " ";
-    $map{$case} = "";
+    $$hashref{$case} = "";
+
+    open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";
 
-    while (<>) {
+    while (<$file>) {
         my $line = $_;
 
         # Pattern line?
         if ($line =~ m/^([A-Z]):\s*(.*)/) {
             $line = $1 . ":\t" . trim($2) . "\n";
             if ($lastline eq "") {
-                $map{$case} = $map{$case} . $line;
+                $$hashref{$case} = $$hashref{$case} . $line;
                 next;
             }
             $case = trim($lastline);
-            exists $map{$case} and die "Header '$case' already exists";
-            $map{$case} = $line;
+            exists $$hashref{$case} and die "Header '$case' already exists";
+            $$hashref{$case} = $line;
             $lastline = "";
             next;
         }
 
         if ($case eq " ") {
-            $map{$case} = $map{$case} . $lastline;
+            $$hashref{$case} = $$hashref{$case} . $lastline;
             $lastline = $line;
             next;
         }
         trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");
         $lastline = $line;
     }
-    $map{$case} = $map{$case} . $lastline;
+    $$hashref{$case} = $$hashref{$case} . $lastline;
+    close($file);
 }
 
-&file_input;
-&alpha_output;
+my %hash;
+my %new_hash;
+
+file_input(\%hash, "MAINTAINERS");
+
+foreach my $type (@ARGV) {
+    foreach my $key (keys %hash) {
+       if ($key =~ /$type/ || $hash{$key} =~ /$type/) {
+           $new_hash{$key} = $hash{$key};
+           delete $hash{$key};
+       }
+    }
+}
+
+alpha_output(\%hash, "MAINTAINERS.new");
+alpha_output(\%new_hash, "SECTION.new");
+
 exit(0);
index 7598361ef1f10898ea73d944ae0b0c02c4725d99..da2172ff9662d0e86ffd132b6041b30ea232eb87 100644 (file)
@@ -11,6 +11,8 @@
 #  define __NR_bpf 280
 # elif defined(__sparc__)
 #  define __NR_bpf 349
+# elif defined(__s390__)
+#  define __NR_bpf 351
 # else
 #  error __NR_bpf not defined. libbpf does not support your arch.
 # endif
index 256f571f2ab525700c121c94b6a8e866acd79b73..e5bbb090bf88549111f939dcd7c1f500e6f84a4f 100644 (file)
@@ -39,6 +39,8 @@
 #  define __NR_bpf 280
 # elif defined(__sparc__)
 #  define __NR_bpf 349
+# elif defined(__s390__)
+#  define __NR_bpf 351
 # else
 #  error __NR_bpf not defined. libbpf does not support your arch.
 # endif
index 71729d47eb8552bca1350fc923b91ec189e2ef2f..7956302ecdf2ace692ba49927fe72c97ebf0cca3 100644 (file)
 
 int _version SEC("version") = 1;
 
+#if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 #define TEST_FIELD(TYPE, FIELD, MASK)                                  \
        {                                                               \
                TYPE tmp = *(volatile TYPE *)&skb->FIELD;               \
                if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))   \
                        return TC_ACT_SHOT;                             \
        }
+#else
+#define TEST_FIELD_OFFSET(a, b)        ((sizeof(a) - sizeof(b)) / sizeof(b))
+#define TEST_FIELD(TYPE, FIELD, MASK)                                  \
+       {                                                               \
+               TYPE tmp = *((volatile TYPE *)&skb->FIELD +             \
+                             TEST_FIELD_OFFSET(skb->FIELD, TYPE));     \
+               if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))   \
+                       return TC_ACT_SHOT;                             \
+       }
+#endif
 
 SEC("test1")
 int process(struct __sk_buff *skb)
index addea82f76c943edb42ebbb6075abe849b5b7f32..d3ed7324105e4eeeb281c4c90922c27541b2b58f 100644 (file)
@@ -8,6 +8,7 @@
  * License as published by the Free Software Foundation.
  */
 
+#include <endian.h>
 #include <asm/types.h>
 #include <linux/types.h>
 #include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load not permitted 3",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 3),
 #else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 2),
 #else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period word load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
                "check skb->data half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
 #else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
                "check skb->tc_classid half load not permitted for lwt prog",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, tc_classid)),
 #else