]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Apr 2017 21:00:42 +0000 (14:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Apr 2017 21:00:42 +0000 (14:00 -0700)
Pull iov iter fix from Al Viro.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  fix a braino in ITER_PIPE iov_iter_revert()

180 files changed:
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/ptrace.h
arch/arc/kernel/setup.c
arch/mips/Makefile
arch/mips/include/asm/asm-prototypes.h
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/elf.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/relocate.c
arch/mips/kernel/smp-cps.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-legacy.c
arch/parisc/include/asm/uaccess.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/s390/include/asm/pgtable.h
arch/sparc/Kconfig
arch/sparc/include/asm/ptrace.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/mm/hugetlbpage.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
block/blk-mq.c
block/elevator.c
crypto/ahash.c
crypto/algif_aead.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/power.c
drivers/block/mtip32xx/mtip32xx.c
drivers/clk/clk-stm32f4.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/ccu-sun8i-a33.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/sunxi-ng/ccu_common.h
drivers/hid/wacom_wac.c
drivers/input/mouse/elantech.c
drivers/input/serio/i8042-x86ia64io.h
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mtd/ubi/upd.c
drivers/net/bonding/bond_main.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_regs.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sfc/workarounds.h
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/dp83640.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/Kconfig
drivers/net/usb/ch9200.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/hso.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/plusb.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9700.c
drivers/net/vrf.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/dwc/pcie-hisi.c
drivers/scsi/scsi_lib.c
drivers/video/backlight/pwm_bl.c
fs/btrfs/qgroup.c
fs/ceph/inode.c
fs/cifs/smb1ops.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfssvc.c
fs/nfsd/nfsxdr.c
fs/nsfs.c
fs/stat.c
fs/ubifs/debug.c
fs/ubifs/dir.c
include/crypto/internal/hash.h
include/linux/mmc/sdio_func.h
include/linux/phy.h
include/uapi/linux/ipv6_route.h
kernel/bpf/syscall.c
kernel/irq/affinity.c
kernel/locking/lockdep_internals.h
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
lib/Kconfig.debug
mm/migrate.c
mm/page_alloc.c
mm/vmstat.c
net/bridge/br_device.c
net/bridge/br_if.c
net/core/dev.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/af_inet.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_output.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/ip6_input.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6.c
net/key/af_key.c
net/mac80211/rx.c
net/packet/af_packet.c
net/qrtr/qrtr.c
net/sched/act_api.c
net/tipc/socket.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
security/keys/gc.c
security/keys/keyctl.c
security/keys/process_keys.c
sound/core/seq/seq_lock.c
sound/firewire/lib.h
sound/firewire/oxfw/oxfw.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/bytcr_rt5651.c
sound/soc/soc-topology.c
sound/soc/sti/uniperif.h
sound/soc/sti/uniperif_player.c
sound/soc/sti/uniperif_reader.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc [new file with mode: 0644]
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/psock_lib.h

index b7fa3b97986d5fff391bf4ff1a01bdd25ce096e9..a339dbb154933282ee06aaeec0571e0d95a72b42 100644 (file)
@@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
        };
 
 HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
+
+Some BIOSes place the host controller in a mode where it is ECAM
+compliant for all devices other than the root complex. In such cases,
+the host controller should be described as below.
+
 The properties and their meanings are identical to those described in
 host-generic-pci.txt except as listed below.
 
 Properties of the host controller node that differ from
 host-generic-pci.txt:
 
-- compatible     : Must be "hisilicon,pcie-almost-ecam"
+- compatible     : Must be "hisilicon,hip06-pcie-ecam", or
+                  "hisilicon,hip07-pcie-ecam"
 
 - reg            : Two entries: First the ECAM configuration space for any
                   other bus underneath the root bus. Second, the base
@@ -59,7 +65,7 @@ host-generic-pci.txt:
 
 Example:
        pcie0: pcie@a0090000 {
-               compatible = "hisilicon,pcie-almost-ecam";
+               compatible = "hisilicon,hip06-pcie-ecam";
                reg = <0 0xb0000000 0 0x2000000>,  /*  ECAM configuration space */
                      <0 0xa0090000 0 0x10000>; /* host bridge registers */
                bus-range = <0  31>;
index 676c139bc883ef7906e854174b8177ef282e0c51..38d3e4ed7208bb3969abcbfc76c4e68fd90d12f5 100644 (file)
@@ -2585,12 +2585,26 @@ F:      include/uapi/linux/if_bonding.h
 
 BPF (Safe dynamic programs and tools)
 M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
 L:     netdev@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Supported
+F:     arch/x86/net/bpf_jit*
+F:     Documentation/networking/filter.txt
+F:     include/linux/bpf*
+F:     include/linux/filter.h
+F:     include/uapi/linux/bpf*
+F:     include/uapi/linux/filter.h
 F:     kernel/bpf/
-F:     tools/testing/selftests/bpf/
+F:     kernel/trace/bpf_trace.c
 F:     lib/test_bpf.c
+F:     net/bpf/
+F:     net/core/filter.c
+F:     net/sched/act_bpf.c
+F:     net/sched/cls_bpf.c
+F:     samples/bpf/
+F:     tools/net/bpf*
+F:     tools/testing/selftests/bpf/
 
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
@@ -8761,6 +8775,7 @@ W:        http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+B:     mailto:netdev@vger.kernel.org
 S:     Maintained
 F:     net/
 F:     include/net/
@@ -12464,7 +12479,6 @@ F:      drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
 TI ETHERNET SWITCH DRIVER (CPSW)
-M:     Mugunthan V N <mugunthanvnm@ti.com>
 R:     Grygorii Strashko <grygorii.strashko@ti.com>
 L:     linux-omap@vger.kernel.org
 L:     netdev@vger.kernel.org
index 5039b9148d15de5762a2a33147535569d5be5746..77930269545306e2a2e8e3b0d69900ab1dfec9c1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index c9f30f4763abce5ca1ffda1817e87bb5de410861..5d7fb3e7cb97159012d49e6cee745ada293949a6 100644 (file)
@@ -406,6 +406,14 @@ config ARC_HAS_DIV_REM
        bool "Insn: div, divu, rem, remu"
        default y
 
+config ARC_HAS_ACCL_REGS
+       bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
+       default n
+       help
+         Depending on the configuration, CPU can contain accumulator reg-pair
+         (also referred to as r58:r59). These can also be used by gcc as GPR so
+         kernel needs to save/restore per process
+
 endif  # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
index b65930a4958959fb65891ac375a7637a0e5cb146..54b54da6384c197a93848e05ef292c224f5c0f91 100644 (file)
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
+#define ATOMIC_INIT(i) { (i) }
+
 #ifndef CONFIG_ARC_PLAT_EZNPS
 
 #define atomic_read(v)  READ_ONCE((v)->counter)
-#define ATOMIC_INIT(i) { (i) }
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
index aee1a77934cf694e37ae579347a37bc167e43762..ac85380d14a4bb364bb9077e8c8b329f862845b8 100644 (file)
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       PUSH    r59
+       PUSH    r58
+#endif
+
        PUSH    r30
        PUSH    r12
 
        POP     r12
        POP     r30
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       POP     r58
+       POP     r59
+#endif
+
 .endm
 
 /*------------------------------------------------------------------------*/
index 47111d565a959d117ab9e2c7c9eea3b852137971..5297faa8a37803fd702da4270a7e23f5d6ba4f2b 100644 (file)
@@ -86,6 +86,10 @@ struct pt_regs {
 
        unsigned long r12, r30;
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
+#endif
+
        /*------- Below list auto saved by h/w -----------*/
        unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
 
index fa62404ba58f77ab9fc24489b35c15132d03d720..fc8211f338ad33cab4036a09693cb77e1ea62a98 100644 (file)
@@ -319,7 +319,8 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 static void arc_chk_core_config(void)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
-       int fpu_enabled;
+       int saved = 0, present = 0;
+       char *opt_nm = NULL;;
 
        if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
@@ -346,17 +347,28 @@ static void arc_chk_core_config(void)
 
        /*
         * FP hardware/software config sanity
-        * -If hardware contains DPFP, kernel needs to save/restore FPU state
+        * -If hardware present, kernel needs to save/restore FPU state
         * -If not, it will crash trying to save/restore the non-existant regs
-        *
-        * (only DPDP checked since SP has no arch visible regs)
         */
-       fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
 
-       if (cpu->extn.fpu_dp && !fpu_enabled)
-               pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
-       else if (!cpu->extn.fpu_dp && fpu_enabled)
-               panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+       if (is_isa_arcompact()) {
+               opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE";
+               saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
+
+               /* only DPDP checked since SP has no arch visible regs */
+               present = cpu->extn.fpu_dp;
+       } else {
+               opt_nm = "CONFIG_ARC_HAS_ACCL_REGS";
+               saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS);
+
+               /* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
+               present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
+       }
+
+       if (present && !saved)
+               pr_warn("Enable %s for working apps\n", opt_nm);
+       else if (!present && saved)
+               panic("Disable %s, hardware NOT present\n", opt_nm);
 }
 
 /*
index 8ef9c02747fa95a753ea79b77ed2a177a60ac6ad..02a1787c888c09b0d36f0b9ea6bea0988c3134c2 100644 (file)
@@ -489,7 +489,7 @@ $(generic_defconfigs):
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
                -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \
                $(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config)
-       $(Q)$(MAKE) olddefconfig
+       $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 
 #
 # Prevent generic merge_config rules attempting to merge single fragments
@@ -503,8 +503,8 @@ $(generic_config_dir)/%.config: ;
 #
 .PHONY: sead3_defconfig
 sead3_defconfig:
-       $(Q)$(MAKE) 32r2el_defconfig BOARDS=sead-3
+       $(Q)$(MAKE) -f $(srctree)/Makefile 32r2el_defconfig BOARDS=sead-3
 
 .PHONY: sead3micro_defconfig
 sead3micro_defconfig:
-       $(Q)$(MAKE) micro32r2el_defconfig BOARDS=sead-3
+       $(Q)$(MAKE) -f $(srctree)/Makefile micro32r2el_defconfig BOARDS=sead-3
index a160cf69bb92d3ec35ec20236015b8405f29e0a3..6e28971fe73ad7e8ec0ab07c5e6e87b68d76f857 100644 (file)
@@ -3,3 +3,4 @@
 #include <asm/fpu.h>
 #include <asm-generic/asm-prototypes.h>
 #include <asm/uaccess.h>
+#include <asm/ftrace.h>
index 804d2a2a19fe03175aa6ad6fa56f91238591f493..dd6a18bc10abd0c34a6717fe12bb7169a4589231 100644 (file)
@@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void)
                }
 
                /* Sorted insert of 75th percentile into buf2 */
-               for (k = 0; k < i; ++k) {
+               for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
                        if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
                                l = min_t(unsigned int,
                                          i, ARRAY_SIZE(buf2) - 1);
index 6430bff21fff80a7bb6647c6af0198364cac7bdd..5c429d70e17f6f24cbcfd0fa912c67eccd11f28f 100644 (file)
@@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
        else if ((prog_req.fr1 && prog_req.frdefault) ||
                 (prog_req.single && !prog_req.frdefault))
                /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
-               state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+               state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
                                          cpu_has_mips_r2_r6) ?
                                          FP_FR1 : FP_FR0;
        else if (prog_req.fr1)
index 1f4bd222ba765788fe8e9c1f14e382c59b60df62..eb6c0d582626b114fcb8d30f9fb28ee3472e8cc9 100644 (file)
@@ -244,9 +244,6 @@ static int compute_signal(int tt)
 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 {
        int reg;
-       struct thread_info *ti = task_thread_info(p);
-       unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
-       struct pt_regs *regs = (struct pt_regs *)ksp - 1;
 #if (KGDB_GDB_REG_SIZE == 32)
        u32 *ptr = (u32 *)gdb_regs;
 #else
@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 #endif
 
        for (reg = 0; reg < 16; reg++)
-               *(ptr++) = regs->regs[reg];
+               *(ptr++) = 0;
 
        /* S0 - S7 */
-       for (reg = 16; reg < 24; reg++)
-               *(ptr++) = regs->regs[reg];
+       *(ptr++) = p->thread.reg16;
+       *(ptr++) = p->thread.reg17;
+       *(ptr++) = p->thread.reg18;
+       *(ptr++) = p->thread.reg19;
+       *(ptr++) = p->thread.reg20;
+       *(ptr++) = p->thread.reg21;
+       *(ptr++) = p->thread.reg22;
+       *(ptr++) = p->thread.reg23;
 
        for (reg = 24; reg < 28; reg++)
                *(ptr++) = 0;
 
        /* GP, SP, FP, RA */
-       for (reg = 28; reg < 32; reg++)
-               *(ptr++) = regs->regs[reg];
-
-       *(ptr++) = regs->cp0_status;
-       *(ptr++) = regs->lo;
-       *(ptr++) = regs->hi;
-       *(ptr++) = regs->cp0_badvaddr;
-       *(ptr++) = regs->cp0_cause;
-       *(ptr++) = regs->cp0_epc;
+       *(ptr++) = (long)p;
+       *(ptr++) = p->thread.reg29;
+       *(ptr++) = p->thread.reg30;
+       *(ptr++) = p->thread.reg31;
+
+       *(ptr++) = p->thread.cp0_status;
+
+       /* lo, hi */
+       *(ptr++) = 0;
+       *(ptr++) = 0;
+
+       /*
+        * BadVAddr, Cause
+        * Ideally these would come from the last exception frame up the stack
+        * but that requires unwinding, otherwise we can't know much for sure.
+        */
+       *(ptr++) = 0;
+       *(ptr++) = 0;
+
+       /*
+        * PC
+        * use return address (RA), i.e. the moment after return from resume()
+        */
+       *(ptr++) = p->thread.reg31;
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 8c35b3152e1eb35cab7f8f605fe48163310b5024..9452b02ce0797e7ac890d1785f50907d9e747b5d 100644 (file)
@@ -1446,6 +1446,11 @@ static int mipsxx_pmu_handle_shared_irq(void)
        HANDLE_COUNTER(0)
        }
 
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+       read_unlock(&pmuint_rwlock);
+#endif
+       resume_local_counters();
+
        /*
         * Do all the work for the pending perf events. We can do this
         * in here because the performance counter interrupt is a regular
@@ -1454,10 +1459,6 @@ static int mipsxx_pmu_handle_shared_irq(void)
        if (handled == IRQ_HANDLED)
                irq_work_run();
 
-#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
-       read_unlock(&pmuint_rwlock);
-#endif
-       resume_local_counters();
        return handled;
 }
 
index 9103bebc9a8eef76e3d524b63b74aa91936f3663..2d1a0c4387713c7862e06bbf79e8c904a07b2279 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/libfdt.h>
 #include <linux/of_fdt.h>
-#include <linux/sched.h>
+#include <linux/sched/task.h>
 #include <linux/start_kernel.h>
 #include <linux/string.h>
 #include <linux/printk.h>
index 6d45f05538c8b37db5fff707d50daa42ef8b097a..795b4aaf89277be344e2e3e512a3642fb3085f5a 100644 (file)
@@ -422,13 +422,12 @@ void play_dead(void)
        local_irq_disable();
        idle_task_exit();
        cpu = smp_processor_id();
+       core = cpu_data[cpu].core;
        cpu_death = CPU_DEATH_POWER;
 
        pr_debug("CPU%d going offline\n", cpu);
 
        if (cpu_has_mipsmt || cpu_has_vp) {
-               core = cpu_data[cpu].core;
-
                /* Look for another online VPE within the core */
                for_each_online_cpu(cpu_death_sibling) {
                        if (cpu_data[cpu_death_sibling].core != core)
index cb675ec6f283ee9d08071e9b112845bddd5c1594..54f56d5a96c46ec8c3b2b1a6e8bcdc06dbf5e02a 100644 (file)
@@ -232,6 +232,17 @@ void __init arch_init_irq(void)
 {
        int corehi_irq;
 
+       /*
+        * Preallocate the i8259's expected virq's here. Since irqchip_init()
+        * will probe the irqchips in hierarchial order, i8259 is probed last.
+        * If anything allocates a virq before the i8259 is probed, it will
+        * be given one of the i8259's expected range and consequently setup
+        * of the i8259 will fail.
+        */
+       WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
+                           16, numa_node_id()) < 0,
+               "Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);
+
        i8259_set_poll(mips_pcibios_iack);
        irqchip_init();
 
index 014649be158d95f0cce86493ac7186b7ceec73d8..3a84f6c0c840569aa4f9db2d15b31c8079cfd39e 100644 (file)
@@ -190,7 +190,7 @@ void register_pci_controller(struct pci_controller *hose)
        }
 
        INIT_LIST_HEAD(&hose->list);
-       list_add(&hose->list, &controllers);
+       list_add_tail(&hose->list, &controllers);
 
        /*
         * Do not panic here but later - this might happen before console init.
index 8442727f28d2732eecb583d46bdcc88258a590dd..cbd4f4af8108bc8fa9ec36504589eac7107e6b1c 100644 (file)
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr)          __get_user_asm64(ptr)
+#define LDD_USER(val, ptr)     __get_user_asm64(val, ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x, ptr)
 #else
-#define LDD_USER(ptr)          __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr)     __get_user_asm(val, "ldd", ptr)
 #define STD_USER(x, ptr)       __put_user_asm("std", x, ptr)
 #endif
 
@@ -97,63 +97,87 @@ struct exception_data {
                " mtsp %0,%%sr2\n\t"            \
                : : "r"(get_fs()) : )
 
-#define __get_user(x, ptr)                               \
-({                                                       \
-       register long __gu_err __asm__ ("r8") = 0;       \
-       register long __gu_val;                          \
-                                                        \
-       load_sr2();                                      \
-       switch (sizeof(*(ptr))) {                        \
-           case 1: __get_user_asm("ldb", ptr); break;   \
-           case 2: __get_user_asm("ldh", ptr); break;   \
-           case 4: __get_user_asm("ldw", ptr); break;   \
-           case 8: LDD_USER(ptr);  break;               \
-           default: BUILD_BUG(); break;                 \
-       }                                                \
-                                                        \
-       (x) = (__force __typeof__(*(ptr))) __gu_val;     \
-       __gu_err;                                        \
+#define __get_user_internal(val, ptr)                  \
+({                                                     \
+       register long __gu_err __asm__ ("r8") = 0;      \
+                                                       \
+       switch (sizeof(*(ptr))) {                       \
+       case 1: __get_user_asm(val, "ldb", ptr); break; \
+       case 2: __get_user_asm(val, "ldh", ptr); break; \
+       case 4: __get_user_asm(val, "ldw", ptr); break; \
+       case 8: LDD_USER(val, ptr); break;              \
+       default: BUILD_BUG();                           \
+       }                                               \
+                                                       \
+       __gu_err;                                       \
 })
 
-#define __get_user_asm(ldx, ptr)                        \
+#define __get_user(val, ptr)                           \
+({                                                     \
+       load_sr2();                                     \
+       __get_user_internal(val, ptr);                  \
+})
+
+#define __get_user_asm(val, ldx, ptr)                  \
+{                                                      \
+       register long __gu_val;                         \
+                                                       \
        __asm__("1: " ldx " 0(%%sr2,%2),%0\n"           \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = (__force __typeof__(*(ptr))) __gu_val;  \
+}
 
 #if !defined(CONFIG_64BIT)
 
-#define __get_user_asm64(ptr)                          \
+#define __get_user_asm64(val, ptr)                     \
+{                                                      \
+       union {                                         \
+               unsigned long long      l;              \
+               __typeof__(*(ptr))      t;              \
+       } __gu_tmp;                                     \
+                                                       \
        __asm__("   copy %%r0,%R0\n"                    \
                "1: ldw 0(%%sr2,%2),%0\n"               \
                "2: ldw 4(%%sr2,%2),%R0\n"              \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "=&r"(__gu_tmp.l), "=r"(__gu_err)     \
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = __gu_tmp.t;                             \
+}
 
 #endif /* !defined(CONFIG_64BIT) */
 
 
-#define __put_user(x, ptr)                                      \
+#define __put_user_internal(x, ptr)                            \
 ({                                                             \
        register long __pu_err __asm__ ("r8") = 0;              \
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);      \
                                                                \
-       load_sr2();                                             \
        switch (sizeof(*(ptr))) {                               \
-           case 1: __put_user_asm("stb", __x, ptr); break;     \
-           case 2: __put_user_asm("sth", __x, ptr); break;     \
-           case 4: __put_user_asm("stw", __x, ptr); break;     \
-           case 8: STD_USER(__x, ptr); break;                  \
-           default: BUILD_BUG(); break;                        \
-       }                                                       \
+       case 1: __put_user_asm("stb", __x, ptr); break;         \
+       case 2: __put_user_asm("sth", __x, ptr); break;         \
+       case 4: __put_user_asm("stw", __x, ptr); break;         \
+       case 8: STD_USER(__x, ptr); break;                      \
+       default: BUILD_BUG();                                   \
+       }                                                       \
                                                                \
        __pu_err;                                               \
 })
 
+#define __put_user(x, ptr)                                     \
+({                                                             \
+       load_sr2();                                             \
+       __put_user_internal(x, ptr);                            \
+})
+
+
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
index 14752eee3d0c44816a61b395970186c1e56d4a21..ed3beadd2cc515d1b8a99b4836b3f06c1a97a87c 100644 (file)
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
        mtctr   reg;                                                    \
        bctr
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
-       __LOAD_FAR_HANDLER(reg, label);                                 \
-       mtctr   reg;                                                    \
+#define BRANCH_LINK_TO_FAR(label)                                      \
+       __LOAD_FAR_HANDLER(r12, label);                                 \
+       mtctr   r12;                                                    \
        bctrl
 
 /*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define BRANCH_TO_COMMON(reg, label)                                   \
        b       label
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
+#define BRANCH_LINK_TO_FAR(label)                                      \
        bl      label
 
 #define BRANCH_TO_KVM(reg, label)                                      \
index 6432d4bf08c889c128803cc5505546122118b964..767ef6d68c9ebf379dad6f065bcc0279a3021bb9 100644 (file)
@@ -689,7 +689,7 @@ resume_kernel:
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 
-       lwz     r3,GPR1(r1)
+       l     r3,GPR1(r1)
        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
        mr      r4,r1                   /* src:  current exception frame */
        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
        addi    r6,r6,8
        bdnz    2b
 
-       /* Do real store operation to complete stwu */
-       lwz     r5,GPR1(r1)
+       /* Do real store operation to complete stdu */
+       l     r5,GPR1(r1)
        std     r8,0(r5)
 
        /* Clear _TIF_EMULATE_STACK_STORE flag */
index 857bf7c5b9465aff743b3e62094cc47d808c8c92..6353019966e6a3bd15afa32d7318c47810a1d8e9 100644 (file)
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
        EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
        EXCEPTION_PROLOG_COMMON_3(0xe60)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode)
+       BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
        /* Windup the stack. */
        /* Move original HSRR0 and HSRR1 into the respective regs */
        ld      r9,_MSR(r1)
index 93e37b12e88237766821369e19827e5e2d844a1b..ecec682bb5166a41d9c86025fd3f4e9461f6ec71 100644 (file)
@@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 {
        if (!MACHINE_HAS_NX)
                pte_val(entry) &= ~_PAGE_NOEXEC;
+       if (pte_present(entry))
+               pte_val(entry) &= ~_PAGE_UNUSED;
        if (mm_has_pgste(mm))
                ptep_set_pte_at(mm, addr, ptep, entry);
        else
index 68ac5c7cd982619581dfa65b75ac89aa8e359554..3db2543733a5874f7a8a5e76275eb505cc1c3db3 100644 (file)
@@ -43,7 +43,7 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select HAVE_ARCH_HARDENED_USERCOPY
-       select PROVE_LOCKING_SMALL if PROVE_LOCKING
+       select LOCKDEP_SMALL if LOCKDEP
        select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
@@ -82,6 +82,7 @@ config SPARC64
        select HAVE_ARCH_AUDITSYSCALL
        select ARCH_SUPPORTS_ATOMIC_RMW
        select HAVE_NMI
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 config ARCH_DEFCONFIG
        string
index ca57f08bd3dba57e5e06e17de20eeee856e97bed..d73428e4333c980486561799aba8fc31ccc123b9 100644 (file)
@@ -83,7 +83,8 @@ unsigned long profile_pc(struct pt_regs *);
 
 #define MAX_REG_OFFSET (offsetof(struct pt_regs, magic))
 
-extern int regs_query_register_offset(const char *name);
+int regs_query_register_offset(const char *name);
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
 
 /**
  * regs_get_register() - get register value from its offset
index 36eee8132c22bac329e99fb7284211e11310ff26..ae77df75bffadd1e376a1baec7cb26286497ffb6 100644 (file)
 #define __NR_copy_file_range   357
 #define __NR_preadv2           358
 #define __NR_pwritev2          359
+#define __NR_statx             360
 
-#define NR_syscalls            360
+#define NR_syscalls            361
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
 #define __IGNORE_getresgid
 #endif
 
+/* Sparc doesn't have protection keys. */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
 #endif /* _UAPI_SPARC_UNISTD_H */
index fc5124ccdb53c7abc43e381ba098c453892a7640..e1d965e90e1697a8205ca505dbf428cb37b30312 100644 (file)
@@ -1162,3 +1162,39 @@ int regs_query_register_offset(const char *name)
                        return roff->offset;
        return -EINVAL;
 }
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+                                          unsigned long addr)
+{
+       unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+       return ((addr & ~(THREAD_SIZE - 1))  ==
+               (ksp & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @n:         stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+       unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+       unsigned long *addr = (unsigned long *)ksp;
+       addr += n;
+       if (regs_within_kernel_stack(regs, (unsigned long)addr))
+               return *addr;
+       else
+               return 0;
+}
index eac7f0db5c8c6269a913152a11941f66f5f3e8d0..5253e895b81b7214626e1afaf0c6157ed00519aa 100644 (file)
@@ -89,3 +89,4 @@ sys_call_table:
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
 /*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
+/*360*/        .long sys_statx
index b0f17ff2ddba2daa75a8e8a646b5661dfe0d36e9..82339f6be0b2d56427a2b9e0f0e75ef5c8ab211a 100644 (file)
@@ -90,6 +90,7 @@ sys_call_table32:
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
+/*360*/        .word sys_statx
 
 #endif /* CONFIG_COMPAT */
 
@@ -171,3 +172,4 @@ sys_call_table:
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
+/*360*/        .word sys_statx
index ee5273ad918de6302cb0f6644b2a4179f8c26351..7c29d38e6b99c68c5ac746eb3d3a3fe1da8394a5 100644 (file)
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
        pgd_t *pgd;
        unsigned long next;
 
+       addr &= PMD_MASK;
+       if (addr < floor) {
+               addr += PMD_SIZE;
+               if (!addr)
+                       return;
+       }
+       if (ceiling) {
+               ceiling &= PMD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               end -= PMD_SIZE;
+       if (addr > end - 1)
+               return;
+
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
index 1e5a50c11d3c3546a59d286cbb84c402682cfb2b..217cd4449bc9db3aedfd6367529bc9ca99fb8443 100644 (file)
@@ -85,7 +85,7 @@ void mce_gen_pool_process(struct work_struct *__unused)
        head = llist_reverse_order(head);
        llist_for_each_entry_safe(node, tmp, head, llnode) {
                mce = &node->mce;
-               atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+               blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
                gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
        }
 }
index 903043e6a62b36a2c395c7aab52da6f85e34fc11..19592ba1a320030a4cbdc59aa8194f14bcefae69 100644 (file)
@@ -13,7 +13,7 @@ enum severity_level {
        MCE_PANIC_SEVERITY,
 };
 
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern struct blocking_notifier_head x86_mce_decoder_chain;
 
 #define ATTR_LEN               16
 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
index 5accfbdee3f06fac48eaf4423d66d7bee1e3a0a3..af44ebeb593fb60f70bb497de3e8f09b4b988333 100644 (file)
@@ -123,7 +123,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
  */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 
 /* Do initial initialization of a struct mce */
 void mce_setup(struct mce *m)
@@ -220,7 +220,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
 
        WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
 
-       atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 
@@ -228,7 +228,7 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
 {
        atomic_dec(&num_notifiers);
 
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 
@@ -321,18 +321,7 @@ static void __print_mce(struct mce *m)
 
 static void print_mce(struct mce *m)
 {
-       int ret = 0;
-
        __print_mce(m);
-
-       /*
-        * Print out human-readable details about the MCE error,
-        * (if the CPU has an implementation for that)
-        */
-       ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
-       if (ret == NOTIFY_STOP)
-               return;
-
        pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 }
 
index 572966f495966b7fe8ce486ad4e5912a0d044eb5..c7836a1ded973e23026143edb95e36a4e86ad294 100644 (file)
@@ -2928,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
        hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
        if (!blk_qc_t_is_internal(cookie))
                rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-       else
+       else {
                rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
+               /*
+                * With scheduling, if the request has completed, we'll
+                * get a NULL return here, as we clear the sched tag when
+                * that happens. The request still remains valid, like always,
+                * so we should be safe with just the NULL check.
+                */
+               if (!rq)
+                       return false;
+       }
 
        return __blk_mq_poll(hctx, rq);
 }
index dbeecf7be719eaada4457ed3c7ac799fd0bbacec..4d9084a14c1093b1c00d79f079663a643c5d05fe 100644 (file)
@@ -1098,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
 }
 EXPORT_SYMBOL(elevator_change);
 
+static inline bool elv_support_iosched(struct request_queue *q)
+{
+       if (q->mq_ops && q->tag_set && (q->tag_set->flags &
+                               BLK_MQ_F_NO_SCHED))
+               return false;
+       return true;
+}
+
 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
                          size_t count)
 {
        int ret;
 
-       if (!(q->mq_ops || q->request_fn))
+       if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
                return count;
 
        ret = __elevator_change(q, name);
@@ -1135,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
                        continue;
                }
-               if (__e->uses_mq && q->mq_ops)
+               if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
                        len += sprintf(name+len, "%s ", __e->elevator_name);
                else if (!__e->uses_mq && !q->mq_ops)
                        len += sprintf(name+len, "%s ", __e->elevator_name);
index e58c4970c22b7cc1cdb5e8f08875d9c1e7714568..826cd7ab4d4a2ec830438b40e987e230bd03f32f 100644 (file)
@@ -32,6 +32,7 @@ struct ahash_request_priv {
        crypto_completion_t complete;
        void *data;
        u8 *result;
+       u32 flags;
        void *ubuf[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        priv->result = req->result;
        priv->complete = req->base.complete;
        priv->data = req->base.data;
+       priv->flags = req->base.flags;
+
        /*
         * WARNING: We do not backup req->priv here! The req->priv
         *          is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        return 0;
 }
 
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
 {
        struct ahash_request_priv *priv = req->priv;
 
+       if (!err)
+               memcpy(priv->result, req->result,
+                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
        /* Restore the original crypto request. */
        req->result = priv->result;
-       req->base.complete = priv->complete;
-       req->base.data = priv->data;
+
+       ahash_request_set_callback(req, priv->flags,
+                                  priv->complete, priv->data);
        req->priv = NULL;
 
        /* Free the req->priv.priv from the ADJUSTED request. */
        kzfree(priv);
 }
 
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
 {
        struct ahash_request_priv *priv = req->priv;
+       struct crypto_async_request oreq;
 
-       if (err == -EINPROGRESS)
-               return;
-
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+       oreq.data = priv->data;
 
-       ahash_restore_req(req);
+       priv->complete(&oreq, -EINPROGRESS);
 }
 
 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
        /*
         * Restore the original request, see ahash_op_unaligned() for what
         * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
         */
 
        /* First copy req->result into req->priv.result */
-       ahash_op_unaligned_finish(areq, err);
+       ahash_restore_req(areq, err);
 
        /* Complete the ORIGINAL request. */
        areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
                return err;
 
        err = op(req);
-       ahash_op_unaligned_finish(req, err);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
+       ahash_restore_req(req, err);
 
        return err;
 }
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
 {
-       struct ahash_request_priv *priv = req->priv;
+       struct ahash_request *areq = req->data;
 
        if (err == -EINPROGRESS)
                return;
 
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-       ahash_restore_req(req);
-}
-
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
-       struct ahash_request *areq = req->data;
-
-       ahash_def_finup_finish2(areq, err);
+       ahash_restore_req(areq, err);
 
        areq->base.complete(&areq->base, err);
 }
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
                goto out;
 
        req->base.complete = ahash_def_finup_done2;
-       req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = crypto_ahash_reqtfm(req)->final(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
 
 out:
-       ahash_def_finup_finish2(req, err);
+       ahash_restore_req(req, err);
        return err;
 }
 
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
+       areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = ahash_def_finup_finish1(areq, err);
+       if (areq->priv)
+               return;
 
        areq->base.complete(&areq->base, err);
 }
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
                return err;
 
        err = tfm->update(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
        return ahash_def_finup_finish1(req, err);
 }
 
index 5a805375865731f4cc7e94790362c208dd58d05b..ef59d9926ee99bccfbdc6077451cb95008d07651 100644 (file)
@@ -40,6 +40,7 @@ struct aead_async_req {
        struct aead_async_rsgl first_rsgl;
        struct list_head list;
        struct kiocb *iocb;
+       struct sock *sk;
        unsigned int tsgls;
        char iv[];
 };
@@ -379,12 +380,10 @@ unlock:
 
 static void aead_async_cb(struct crypto_async_request *_req, int err)
 {
-       struct sock *sk = _req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct aead_ctx *ctx = ask->private;
-       struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
-       struct aead_request *req = aead_request_cast(_req);
+       struct aead_request *req = _req->data;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+       struct sock *sk = areq->sk;
        struct scatterlist *sg = areq->tsgl;
        struct aead_async_rsgl *rsgl;
        struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
        memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
        INIT_LIST_HEAD(&areq->list);
        areq->iocb = msg->msg_iocb;
+       areq->sk = sk;
        memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
        aead_request_set_tfm(req, tfm);
        aead_request_set_ad(req, ctx->aead_assoclen);
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                 aead_async_cb, sk);
+                                 aead_async_cb, req);
        used -= ctx->aead_assoclen;
 
        /* take over all tx sgls from ctx */
index 3ea095adafd9af0067f695c4c70e2af702f70923..a8bfae4451bfcbfd26e25bbb39280818dcc139d4 100644 (file)
@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c976bfac29da526844f6a5578fea1455945c26f3..89ace5ebc2da88df3e049e89e1d2640b2e361e3d 100644 (file)
@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index fcd4ce6f78d5d387080343d96738c3b7275d0324..1c2b846c577604d0b471e87d19cecea6caa286f9 100644 (file)
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
                return -EINVAL;
 
        /* The state of the list is 'on' IFF all resources are 'on'. */
+       cur_state = 0;
        list_for_each_entry(entry, list, node) {
                struct acpi_power_resource *resource = entry->resource;
                acpi_handle handle = resource->device.handle;
index f96ab717534c4c8fe60e830c4020c6bd0517cf07..1d1dc11aa5faef6a9422faeb0fa7bd3a2732e860 100644 (file)
@@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
        dd->tags.reserved_tags = 1;
        dd->tags.cmd_size = sizeof(struct mtip_cmd);
        dd->tags.numa_node = dd->numa_node;
-       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
        dd->tags.driver_data = dd;
        dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
 
index ab609a76706f7bb0258ce47dda61366a0602d5cc..cf9449b3dbd9742bd8a3559c9939af9e057d9b5f 100644 (file)
@@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
        { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
 };
 
+static const struct clk_div_table pll_divq_table[] = {
+       { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
+       { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
+       { 14, 14 }, { 15, 15 },
+       { 0 }
+};
+
 static const struct clk_div_table pll_divr_table[] = {
        { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
 };
@@ -496,9 +503,9 @@ struct stm32f4_div_data {
 
 #define MAX_PLL_DIV 3
 static const struct stm32f4_div_data  div_data[MAX_PLL_DIV] = {
-       { 16, 2, 0,                     pll_divp_table  },
-       { 24, 4, CLK_DIVIDER_ONE_BASED, NULL            },
-       { 28, 3, 0,                     pll_divr_table  },
+       { 16, 2, 0, pll_divp_table },
+       { 24, 4, 0, pll_divq_table },
+       { 28, 3, 0, pll_divr_table },
 };
 
 struct stm32f4_pll_data {
index 72109d2cf41b29da83dd88dbdb0e820364c893dd..a077ab6edffae759564362b85fd596887ea7e89a 100644 (file)
@@ -1,6 +1,7 @@
 config SUNXI_CCU
        bool "Clock support for Allwinner SoCs"
        depends on ARCH_SUNXI || COMPILE_TEST
+       select RESET_CONTROLLER
        default ARCH_SUNXI
 
 if SUNXI_CCU
@@ -15,7 +16,7 @@ config SUNXI_CCU_FRAC
        bool
 
 config SUNXI_CCU_GATE
-       bool
+       def_bool y
 
 config SUNXI_CCU_MUX
        bool
@@ -135,6 +136,7 @@ config SUN8I_V3S_CCU
 config SUN9I_A80_CCU
        bool "Support for the Allwinner A80 CCU"
        select SUNXI_CCU_DIV
+       select SUNXI_CCU_MULT
        select SUNXI_CCU_GATE
        select SUNXI_CCU_NKMP
        select SUNXI_CCU_NM
index a7b3c08ed0e232c0cf41ae419f4980bf1614590e..2c69b631967aea3ae81389d29b20e8014c3030e1 100644 (file)
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
        .num_resets     = ARRAY_SIZE(sun8i_a33_ccu_resets),
 };
 
+static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
+       .common = &pll_cpux_clk.common,
+       /* copy from pll_cpux_clk */
+       .enable = BIT(31),
+       .lock   = BIT(28),
+};
+
 static struct ccu_mux_nb sun8i_a33_cpu_nb = {
        .common         = &cpux_clk.common,
        .cm             = &cpux_clk.mux,
@@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
 
        sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
 
+       /* Gate then ungate PLL CPU after any rate changes */
+       ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
+
+       /* Reparent CPU during PLL CPU rate changes */
        ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
                                  &sun8i_a33_cpu_nb);
 }
index 8a47bafd78905bce849235d791f1469d448afcc9..9d8724715a4352ddd07a411945bc1cd58367053d 100644 (file)
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 #include <linux/slab.h>
 
 #include "ccu_common.h"
+#include "ccu_gate.h"
 #include "ccu_reset.h"
 
 static DEFINE_SPINLOCK(ccu_lock);
@@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
        WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
 }
 
+/*
+ * This clock notifier is called when the frequency of a PLL clock is
+ * changed. In common PLL designs, changes to the dividers take effect
+ * almost immediately, while changes to the multipliers (implemented
+ * as dividers in the feedback loop) take a few cycles to work into
+ * the feedback loop for the PLL to stablize.
+ *
+ * Sometimes when the PLL clock rate is changed, the decrease in the
+ * divider is too much for the decrease in the multiplier to catch up.
+ * The PLL clock rate will spike, and in some cases, might lock up
+ * completely.
+ *
+ * This notifier callback will gate and then ungate the clock,
+ * effectively resetting it, so it proceeds to work. Care must be
+ * taken to reparent consumers to other temporary clocks during the
+ * rate change, and that this notifier callback must be the first
+ * to be registered.
+ */
+static int ccu_pll_notifier_cb(struct notifier_block *nb,
+                              unsigned long event, void *data)
+{
+       struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
+       int ret = 0;
+
+       if (event != POST_RATE_CHANGE)
+               goto out;
+
+       ccu_gate_helper_disable(pll->common, pll->enable);
+
+       ret = ccu_gate_helper_enable(pll->common, pll->enable);
+       if (ret)
+               goto out;
+
+       ccu_helper_wait_for_lock(pll->common, pll->lock);
+
+out:
+       return notifier_from_errno(ret);
+}
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
+{
+       pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
+
+       return clk_notifier_register(pll_nb->common->hw.clk,
+                                    &pll_nb->clk_nb);
+}
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc)
 {
index 73d81dc58fc5ad91f8a293530aa89e2b22fcbdb3..d6fdd7a789aa746a72f939fb51c552004941fd37 100644 (file)
@@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
 
 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
 
+struct ccu_pll_nb {
+       struct notifier_block   clk_nb;
+       struct ccu_common       *common;
+
+       u32     enable;
+       u32     lock;
+};
+
+#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc);
 
index 94250c293be2a18b247e2be006a0e7e4faf4f6f8..c68ac65db7ffec361169c326ede5bbf263a00b1b 100644 (file)
@@ -2006,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                return;
        case HID_DG_TOOLSERIALNUMBER:
                wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-               wacom_wac->serial[0] |= value;
+               wacom_wac->serial[0] |= (__u32)value;
                return;
        case WACOM_HID_WD_SENSE:
                wacom_wac->hid_data.sense_state = value;
@@ -2176,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
                wacom_wac->hid_data.cc_index = field->index;
                wacom_wac->hid_data.cc_value_index = usage->usage_index;
                break;
+       case HID_DG_CONTACTID:
+               if ((field->logical_maximum - field->logical_minimum) < touch_max) {
+                       /*
+                        * The HID descriptor for G11 sensors leaves logical
+                        * maximum set to '1' despite it being a multitouch
+                        * device. Override to a sensible number.
+                        */
+                       field->logical_maximum = 255;
+               }
+               break;
        }
 }
 
index efc8ec3423514ad33dd2ebaf0c861d41bb960140..e73d968023f7ce7de418dcf1315b7c554773d604 100644 (file)
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
@@ -1523,6 +1524,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
                .matches = {
index 312bd6ca919806f2593e12814e5037469d609c0f..09720d950686c844b49f1d7f32710e160d21624a 100644 (file)
@@ -620,6 +620,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
                },
        },
+       {
+               /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+               },
+       },
        { }
 };
 
index e992a7f8a16fc3019016aa1f2844cfbfb437ad97..2b32b88949ba40dfb3901cf9588f670b7aad8441 100644 (file)
@@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
        sdio_free_func_cis(func);
 
        kfree(func->info);
-
+       kfree(func->tmpbuf);
        kfree(func);
 }
 
@@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
        if (!func)
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * allocate buffer separately to make sure it's properly aligned for
+        * DMA usage (incl. 64 bit DMA)
+        */
+       func->tmpbuf = kmalloc(4, GFP_KERNEL);
+       if (!func->tmpbuf) {
+               kfree(func);
+               return ERR_PTR(-ENOMEM);
+       }
+
        func->card = card;
 
        device_initialize(&func->dev);
index a9ac0b4573131f48cad46044e018b5de479cf695..8718432751c50c6d5a955b104c250b151430d19c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
@@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 
                if (card->type == MMC_TYPE_SDIO ||
                    card->type == MMC_TYPE_SD_COMBO) {
-                       set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_get_noresume(mmc->parent);
+                               set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old & ~clken_low_pwr;
                } else {
-                       clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_put_noidle(mmc->parent);
+                               clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old | clken_low_pwr;
                }
 
index 7123ef96ed18523c88553146035103f3517bd372..445fc47dc3e77e39b17e7531eb7dbed58ddd3279 100644 (file)
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
 
        switch (uhs) {
        case MMC_TIMING_UHS_SDR50:
+       case MMC_TIMING_UHS_DDR50:
                pinctrl = imx_data->pins_100mhz;
                break;
        case MMC_TIMING_UHS_SDR104:
index 0134ba32a05784b65d1a0e6d470eee7a857df74c..39712560b4c1b55aa847f0ac69f6815edb11e678 100644 (file)
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
                        return err;
        }
 
-       if (bytes == 0) {
-               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
-               if (err)
-                       return err;
+       err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+       if (err)
+               return err;
 
+       if (bytes == 0) {
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
index 8a4ba8b88e52f9d5b1ba318e5dbfb53344f6ebca..34481c9be1d192137e2dc7e3c8475184dfa3bec0 100644 (file)
@@ -1104,11 +1104,11 @@ static void bond_compute_features(struct bonding *bond)
                gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
                gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
        }
+       bond_dev->hard_header_len = max_hard_header_len;
 
 done:
        bond_dev->vlan_features = vlan_features;
        bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
-       bond_dev->hard_header_len = max_hard_header_len;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
 
index 8483a40e7e9ef52327e15e03fa4100c73f68a53f..5f9e0e6301d06ecbe466e2f4eb4e8b7fee8ece27 100644 (file)
@@ -72,6 +72,8 @@ config CAN_PEAK_USB
          PCAN-USB Pro         dual CAN 2.0b channels USB adapter
          PCAN-USB FD          single CAN-FD channel USB adapter
          PCAN-USB Pro FD      dual CAN-FD channels USB adapter
+         PCAN-Chip USB        CAN-FD to USB stamp module
+         PCAN-USB X6          6 CAN-FD channels USB adapter
 
          (see also http://www.peak-system.com).
 
index 300349fe8dc04945d956ec0dd17a470aa7ddb426..eecee7f8dfb70763aa0e7c3f90f85ae66ff85385 100644 (file)
@@ -739,13 +739,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
 {
        struct gs_can *dev = netdev_priv(netdev);
-       struct gs_identify_mode imode;
+       struct gs_identify_mode *imode;
        int rc;
 
+       imode = kmalloc(sizeof(*imode), GFP_KERNEL);
+
+       if (!imode)
+               return -ENOMEM;
+
        if (do_identify)
-               imode.mode = GS_CAN_IDENTIFY_ON;
+               imode->mode = GS_CAN_IDENTIFY_ON;
        else
-               imode.mode = GS_CAN_IDENTIFY_OFF;
+               imode->mode = GS_CAN_IDENTIFY_OFF;
 
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
                             usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -755,10 +760,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
                             USB_RECIP_INTERFACE,
                             dev->channel,
                             0,
-                            &imode,
-                            sizeof(imode),
+                            imode,
+                            sizeof(*imode),
                             100);
 
+       kfree(imode);
+
        return (rc > 0) ? 0 : rc;
 }
 
index 0b0302af3bd2dc3da893ed120afa028d21b219da..57913dbbae0a970f5f28051d10f064792d69333c 100644 (file)
@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
+       {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
        {} /* Terminating entry */
 };
@@ -51,6 +52,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
        &pcan_usb_pro,
        &pcan_usb_fd,
        &pcan_usb_pro_fd,
+       &pcan_usb_chip,
        &pcan_usb_x6,
 };
 
index 3cbfb069893d5ce1d2da16969426433240ad4ce6..c01316cac354b364a422b3312efa7756771223b8 100644 (file)
@@ -27,6 +27,7 @@
 #define PCAN_USBPRO_PRODUCT_ID         0x000d
 #define PCAN_USBPROFD_PRODUCT_ID       0x0011
 #define PCAN_USBFD_PRODUCT_ID          0x0012
+#define PCAN_USBCHIP_PRODUCT_ID                0x0013
 #define PCAN_USBX6_PRODUCT_ID          0x0014
 
 #define PCAN_USB_DRIVER_NAME           "peak_usb"
@@ -90,6 +91,7 @@ struct peak_usb_adapter {
 extern const struct peak_usb_adapter pcan_usb;
 extern const struct peak_usb_adapter pcan_usb_pro;
 extern const struct peak_usb_adapter pcan_usb_fd;
+extern const struct peak_usb_adapter pcan_usb_chip;
 extern const struct peak_usb_adapter pcan_usb_pro_fd;
 extern const struct peak_usb_adapter pcan_usb_x6;
 
index 304732550f0a628a7fa9ae9c94e67113d9a869a0..528d3bb4917f1ecb5caf2ef67a71ae8adad51c44 100644 (file)
@@ -1061,6 +1061,78 @@ const struct peak_usb_adapter pcan_usb_fd = {
        .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
 };
 
+/* describes the PCAN-CHIP USB */
+static const struct can_bittiming_const pcan_usb_chip_const = {
+       .name = "pcan_chip_usb",
+       .tseg1_min = 1,
+       .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+       .tseg2_min = 1,
+       .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+       .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+       .brp_min = 1,
+       .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_chip_data_const = {
+       .name = "pcan_chip_usb",
+       .tseg1_min = 1,
+       .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
+       .tseg2_min = 1,
+       .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+       .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
+       .brp_min = 1,
+       .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
+       .brp_inc = 1,
+};
+
+const struct peak_usb_adapter pcan_usb_chip = {
+       .name = "PCAN-Chip USB",
+       .device_id = PCAN_USBCHIP_PRODUCT_ID,
+       .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
+       .ctrlmode_supported = CAN_CTRLMODE_FD |
+               CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+       .clock = {
+               .freq = PCAN_UFD_CRYSTAL_HZ,
+       },
+       .bittiming_const = &pcan_usb_chip_const,
+       .data_bittiming_const = &pcan_usb_chip_data_const,
+
+       /* size of device private data */
+       .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+       /* timestamps usage */
+       .ts_used_bits = 32,
+       .ts_period = 1000000, /* calibration period in ts. */
+       .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+       .us_per_ts_shift = 0,
+
+       /* give here messages in/out endpoints */
+       .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+       .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
+
+       /* size of rx/tx usb buffers */
+       .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+       .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+       /* device callbacks */
+       .intf_probe = pcan_usb_pro_probe,       /* same as PCAN-USB Pro */
+       .dev_init = pcan_usb_fd_init,
+
+       .dev_exit = pcan_usb_fd_exit,
+       .dev_free = pcan_usb_fd_free,
+       .dev_set_bus = pcan_usb_fd_set_bus,
+       .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+       .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+       .dev_decode_buf = pcan_usb_fd_decode_buf,
+       .dev_start = pcan_usb_fd_start,
+       .dev_stop = pcan_usb_fd_stop,
+       .dev_restart_async = pcan_usb_fd_restart_async,
+       .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+       .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
+
 /* describes the PCAN-USB Pro FD adapter */
 static const struct can_bittiming_const pcan_usb_pro_fd_const = {
        .name = "pcan_usb_pro_fd",
index 8cf4801994e883be64010934a0413cfbdb86ed16..fa0eece21eef9825e716dd7744c556211a0b41c3 100644 (file)
@@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
 
 static void b53_set_forwarding(struct b53_device *dev, int enable)
 {
+       struct dsa_switch *ds = dev->ds;
        u8 mgmt;
 
        b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
                mgmt &= ~SM_SW_FWD_EN;
 
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+       /* Include IMP port in dumb forwarding mode when no tagging protocol is
+        * set
+        */
+       if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
+               b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+               mgmt |= B53_MII_DUMB_FWDG_EN;
+               b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+       }
 }
 
 static void b53_enable_vlan(struct b53_device *dev, bool enable)
@@ -598,7 +608,8 @@ static void b53_switch_reset_gpio(struct b53_device *dev)
 
 static int b53_switch_reset(struct b53_device *dev)
 {
-       u8 mgmt;
+       unsigned int timeout = 1000;
+       u8 mgmt, reg;
 
        b53_switch_reset_gpio(dev);
 
@@ -607,6 +618,28 @@ static int b53_switch_reset(struct b53_device *dev)
                b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
        }
 
+       /* This is specific to 58xx devices here, do not use is58xx() which
+        * covers the larger Starfigther 2 family, including 7445/7278 which
+        * still use this driver as a library and need to perform the reset
+        * earlier.
+        */
+       if (dev->chip_id == BCM58XX_DEVICE_ID) {
+               b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+               reg |= SW_RST | EN_SW_RST | EN_CH_RST;
+               b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
+
+               do {
+                       b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+                       if (!(reg & SW_RST))
+                               break;
+
+                       usleep_range(1000, 2000);
+               } while (timeout-- > 0);
+
+               if (timeout == 0)
+                       return -ETIMEDOUT;
+       }
+
        b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
 
        if (!(mgmt & SM_SW_FWD_EN)) {
@@ -1731,7 +1764,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
                .vlans  = 4096,
                .enabled_ports = 0x1ff,
                .arl_entries = 4,
-               .cpu_port = B53_CPU_PORT_25,
+               .cpu_port = B53_CPU_PORT,
                .vta_regs = B53_VTA_REGS,
                .duplex_reg = B53_DUPLEX_STAT_GE,
                .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
index 9fd24c418fa4256e8517d5dec1dfa97681ba72db..e5c86d44667af1fd9f68a550f169a322989f39bd 100644 (file)
 #define  B53_UC_FWD_EN                 BIT(6)
 #define  B53_MC_FWD_EN                 BIT(7)
 
+/* Switch control (8 bit) */
+#define B53_SWITCH_CTRL                        0x22
+#define  B53_MII_DUMB_FWDG_EN          BIT(6)
+
 /* (16 bit) */
 #define B53_UC_FLOOD_MASK              0x32
 #define B53_MC_FLOOD_MASK              0x34
 /* Software reset register (8 bit) */
 #define B53_SOFTRESET                  0x79
 #define   SW_RST                       BIT(7)
+#define   EN_CH_RST                    BIT(6)
 #define   EN_SW_RST                    BIT(4)
 
 /* Fast Aging Control register (8 bit) */
index 64a1095e4d1495c1e32c3ff7008882789f6b6f6e..a0ca68ce3fbb164ea6e6a2c75a36a8c1ae54172d 100644 (file)
@@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
        pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
        switch (sdevid) {
        case PCI_SUBSYS_DEVID_81XX_BGX:
+       case PCI_SUBSYS_DEVID_81XX_RGX:
                max_bgx_per_node = MAX_BGX_PER_CN81XX;
                break;
        case PCI_SUBSYS_DEVID_83XX_BGX:
index c5080f2cead5d0efc435fd827038eb7dbe4b5830..6b7fe6fdd13b9b27b4a1573e8c7b3869b17bcc19 100644 (file)
@@ -16,6 +16,7 @@
 /* Subsystem device IDs */
 #define PCI_SUBSYS_DEVID_88XX_BGX              0xA126
 #define PCI_SUBSYS_DEVID_81XX_BGX              0xA226
+#define PCI_SUBSYS_DEVID_81XX_RGX              0xA254
 #define PCI_SUBSYS_DEVID_83XX_BGX              0xA326
 
 #define    MAX_BGX_THUNDER                     8 /* Max 2 nodes, 4 per node */
index 9e757684816d48b903f62cdac2d6a1123e6c3305..93949139e62cf92a7e593b36d6a7399edbd1b6e6 100644 (file)
@@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_eth *eth = mac->hw;
        struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
+       struct mtk_tx_buf *itx_buf, *tx_buf;
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
@@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
        txd4 |= fport;
 
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
+       itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+       memset(itx_buf, 0, sizeof(*itx_buf));
 
        if (gso)
                txd4 |= TX_DMA_TSO;
@@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                return -ENOMEM;
 
        WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
+       itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+       itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                         MTK_TX_FLAGS_FPORT1;
+       dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
+       dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
 
        /* TX SG offload */
        txd = itxd;
@@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                                               last_frag * TX_DMA_LS0));
                        WRITE_ONCE(txd->txd4, fport);
 
-                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
                        memset(tx_buf, 0, sizeof(*tx_buf));
-
+                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+                       tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                                        MTK_TX_FLAGS_FPORT1;
+
                        dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
                        dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
                        frag_size -= frag_map_size;
@@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* store skb to cleanup */
-       tx_buf->skb = skb;
+       itx_buf->skb = skb;
 
        WRITE_ONCE(itxd->txd4, txd4);
        WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
@@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
 
        while ((cpu != dma) && budget) {
                u32 next_cpu = desc->txd2;
-               int mac;
+               int mac = 0;
 
                desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
                if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
                        break;
 
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
                tx_buf = mtk_desc_to_tx_buf(ring, desc);
+               if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+                       mac = 1;
+
                skb = tx_buf->skb;
                if (!skb) {
                        condition = 1;
index 99b1c8e9f16f981a0603f906280dcd98f7fa1b54..08285a96ff7077f83b5ac530e8f59266922819d1 100644 (file)
@@ -406,12 +406,18 @@ struct mtk_hw_stats {
        struct u64_stats_sync   syncp;
 };
 
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
 enum mtk_tx_flags {
+       /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+        * track how memory was allocated so that it can be freed properly.
+        */
        MTK_TX_FLAGS_SINGLE0    = 0x01,
        MTK_TX_FLAGS_PAGE0      = 0x02,
+
+       /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+        * SKB out instead of looking up through hardware TX descriptor.
+        */
+       MTK_TX_FLAGS_FPORT0     = 0x04,
+       MTK_TX_FLAGS_FPORT1     = 0x08,
 };
 
 /* This enum allows us to identify how the clock is defined on the array of the
index dc52053128bc752ccd398449330c24c0bdf8b3a1..3d9490cd2db19720d2b06f90d4cd322a3c87f4b0 100644 (file)
@@ -90,7 +90,7 @@
 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
 
 #define MLX5_UMR_ALIGN                         (2048)
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (256)
 
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_DEFAULT_LRO_TIMEOUT                       32
index d55fff0ba388f746809ac601fc3863e94309fc12..26fc77e80f7b38d45e52911233d5678da6334eb1 100644 (file)
@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
        int idx = 0;
        int err = 0;
 
+       info->data = MAX_NUM_OF_ETHTOOL_RULES;
        while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
                err = mlx5e_ethtool_get_flow(priv, info, location);
                if (!err)
index 66c133757a5ee8daae122e93322306b1c5c44336..15cc7b469d2ed9c066ee11a94a17a8ea5c9709f5 100644 (file)
@@ -174,7 +174,7 @@ unlock:
 
 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
-       struct mlx5e_sw_stats *s = &priv->stats.sw;
+       struct mlx5e_sw_stats temp, *s = &temp;
        struct mlx5e_rq_stats *rq_stats;
        struct mlx5e_sq_stats *sq_stats;
        u64 tx_offload_none = 0;
@@ -229,6 +229,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
        s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
+       memcpy(&priv->stats.sw, s, sizeof(*s));
 }
 
 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
@@ -243,7 +244,6 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 
-       memset(out, 0, outlen);
        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 }
 
index fade7233dac5256cb69d0fc67b0c3cb1a4444da5..5436866798f447eef43dac9af242decae4ed6017 100644 (file)
@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
 
        if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
            rep->vport != FDB_UPLINK_VPORT) {
-               if (min_inline > esw->offloads.inline_mode) {
+               if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
+                   esw->offloads.inline_mode < min_inline) {
                        netdev_warn(priv->netdev,
                                    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
                                    min_inline, esw->offloads.inline_mode);
@@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
        return 0;
 }
 
-static int gen_vxlan_header_ipv4(struct net_device *out_dev,
-                                char buf[],
-                                unsigned char h_dest[ETH_ALEN],
-                                int ttl,
-                                __be32 daddr,
-                                __be32 saddr,
-                                __be16 udp_dst_port,
-                                __be32 vx_vni)
+static void gen_vxlan_header_ipv4(struct net_device *out_dev,
+                                 char buf[], int encap_size,
+                                 unsigned char h_dest[ETH_ALEN],
+                                 int ttl,
+                                 __be32 daddr,
+                                 __be32 saddr,
+                                 __be16 udp_dst_port,
+                                 __be32 vx_vni)
 {
-       int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
        struct ethhdr *eth = (struct ethhdr *)buf;
        struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
        struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
@@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
        udp->dest = udp_dst_port;
        vxh->vx_flags = VXLAN_HF_VNI;
        vxh->vx_vni = vxlan_vni_field(vx_vni);
-
-       return encap_size;
 }
 
-static int gen_vxlan_header_ipv6(struct net_device *out_dev,
-                                char buf[],
-                                unsigned char h_dest[ETH_ALEN],
-                                int ttl,
-                                struct in6_addr *daddr,
-                                struct in6_addr *saddr,
-                                __be16 udp_dst_port,
-                                __be32 vx_vni)
+static void gen_vxlan_header_ipv6(struct net_device *out_dev,
+                                 char buf[], int encap_size,
+                                 unsigned char h_dest[ETH_ALEN],
+                                 int ttl,
+                                 struct in6_addr *daddr,
+                                 struct in6_addr *saddr,
+                                 __be16 udp_dst_port,
+                                 __be32 vx_vni)
 {
-       int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
        struct ethhdr *eth = (struct ethhdr *)buf;
        struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
        struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
        udp->dest = udp_dst_port;
        vxh->vx_flags = VXLAN_HF_VNI;
        vxh->vx_vni = vxlan_vni_field(vx_vni);
-
-       return encap_size;
 }
 
 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                                          struct net_device **out_dev)
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
        struct ip_tunnel_key *tun_key = &e->tun_info.key;
-       int encap_size, ttl, err;
        struct neighbour *n = NULL;
        struct flowi4 fl4 = {};
        char *encap_header;
+       int ttl, err;
+
+       if (max_encap_size < ipv4_encap_size) {
+               mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+                              ipv4_encap_size, max_encap_size);
+               return -EOPNOTSUPP;
+       }
 
-       encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+       encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
        if (!encap_header)
                return -ENOMEM;
 
@@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
-               encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
-                                                  e->h_dest, ttl,
-                                                  fl4.daddr,
-                                                  fl4.saddr, tun_key->tp_dst,
-                                                  tunnel_id_to_key32(tun_key->tun_id));
+               gen_vxlan_header_ipv4(*out_dev, encap_header,
+                                     ipv4_encap_size, e->h_dest, ttl,
+                                     fl4.daddr,
+                                     fl4.saddr, tun_key->tp_dst,
+                                     tunnel_id_to_key32(tun_key->tun_id));
                break;
        default:
                err = -EOPNOTSUPP;
@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        }
 
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              encap_size, encap_header, &e->encap_id);
+                              ipv4_encap_size, encap_header, &e->encap_id);
 out:
        if (err && n)
                neigh_release(n);
@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
        struct ip_tunnel_key *tun_key = &e->tun_info.key;
-       int encap_size, err, ttl = 0;
        struct neighbour *n = NULL;
        struct flowi6 fl6 = {};
        char *encap_header;
+       int err, ttl = 0;
+
+       if (max_encap_size < ipv6_encap_size) {
+               mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+                              ipv6_encap_size, max_encap_size);
+               return -EOPNOTSUPP;
+       }
 
-       encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+       encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
        if (!encap_header)
                return -ENOMEM;
 
@@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
-               encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
-                                                  e->h_dest, ttl,
-                                                  &fl6.daddr,
-                                                  &fl6.saddr, tun_key->tp_dst,
-                                                  tunnel_id_to_key32(tun_key->tun_id));
+               gen_vxlan_header_ipv6(*out_dev, encap_header,
+                                     ipv6_encap_size, e->h_dest, ttl,
+                                     &fl6.daddr,
+                                     &fl6.saddr, tun_key->tp_dst,
+                                     tunnel_id_to_key32(tun_key->tun_id));
                break;
        default:
                err = -EOPNOTSUPP;
@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
        }
 
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              encap_size, encap_header, &e->encap_id);
+                              ipv6_encap_size, encap_header, &e->encap_id);
 out:
        if (err && n)
                neigh_release(n);
index 307ec6c5fd3b62dffe6cfd5f2541dad9fc155fc3..d111cebca9f1ea57d57a70b108a436af1adcc6aa 100644 (file)
@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        int num_vports = esw->enabled_vports;
-       int err;
-       int vport;
+       int err, vport;
        u8 mlx5_mode;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+       switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+               if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
+                       return 0;
+               /* fall through */
+       case MLX5_CAP_INLINE_MODE_L2:
+               esw_warn(dev, "Inline mode can't be set\n");
                return -EOPNOTSUPP;
+       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+               break;
+       }
 
        if (esw->offloads.num_flows > 0) {
                esw_warn(dev, "Can't set inline mode when flows are configured\n");
@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-               return -EOPNOTSUPP;
-
        return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
 
 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
 {
+       u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
        struct mlx5_core_dev *dev = esw->dev;
        int vport;
-       u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
                return -EOPNOTSUPP;
@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-               return -EOPNOTSUPP;
+       switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+               mlx5_mode = MLX5_INLINE_MODE_NONE;
+               goto out;
+       case MLX5_CAP_INLINE_MODE_L2:
+               mlx5_mode = MLX5_INLINE_MODE_L2;
+               goto out;
+       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+               goto query_vports;
+       }
 
+query_vports:
        for (vport = 1; vport <= nvfs; vport++) {
                mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
                if (vport > 1 && prev_mlx5_mode != mlx5_mode)
@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
                prev_mlx5_mode = mlx5_mode;
        }
 
+out:
        *mode = mlx5_mode;
        return 0;
 }
index 60154a175bd3866f2b461a357c60d86283afde12..0ad66324247f71c212f44f98679c80f05a2650d3 100644 (file)
@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        if (err) {
                dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
                        FW_INIT_TIMEOUT_MILI);
-               goto out_err;
+               goto err_cmd_cleanup;
        }
 
        err = mlx5_core_enable_hca(dev, 0);
index 2e6b0f290ddc2cbf3beeb2f5c1fe813378691c69..222b25908d012614639b4e9ef2a8a7727a82cd18 100644 (file)
@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
        struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
 
        list_del(&up->list);
+       iounmap(up->map);
        if (mlx5_cmd_free_uar(up->mdev, up->index))
                mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
        kfree(up->reg_bitmap);
index 5bd36a4a8fcdfd201b40321c7fefb82776cd347d..cfdadb658ade0fe73551f6f822ec8ae1805f83de 100644 (file)
        ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
 
 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
-       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
+       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
+       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
 };
 
 static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
                   p_params->ets_cbs,
                   p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
 
+       if (p_params->ets_enabled && !p_params->max_ets_tc) {
+               p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
+               DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                          "ETS params: max_ets_tc is forced to %d\n",
+               p_params->max_ets_tc);
+       }
+
        /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
         * encoded in a type u32 array of size 2.
         */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
        u8 pfc_map = 0;
        int i;
 
+       *pfc &= ~DCBX_PFC_ERROR_MASK;
+
        if (p_params->pfc.willing)
                *pfc |= DCBX_PFC_WILLING_MASK;
        else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
        if (!dcbx_info)
                return NULL;
 
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
                dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
 
+       dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
+
        ptt = qed_ptt_acquire(hwfn);
        if (!ptt)
                return -EINVAL;
index 8cfc4a54f2dc69240ae1fc42195ef854e0b1c2c9..3cd7989c007dfe46947e2ddb366a904f1af90198 100644 (file)
@@ -1516,11 +1516,12 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                spin_unlock_irqrestore(&priv->lock, flags);
                return NETDEV_TX_BUSY;
        }
-       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
-       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        if (skb_put_padto(skb, ETH_ZLEN))
-               goto drop;
+               goto exit;
+
+       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
                 entry / NUM_TX_DESC * DPTR_ALIGN;
index 54248775f227b062addf85044f486ad4512039f5..f68c4db656eda84691b411cee940528a01a2bb62 100644 (file)
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
        .get_mdio_data = sh_get_mdio,
 };
 
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_txdesc *txdesc;
+       int free_num = 0;
+       int entry;
+       bool sent;
+
+       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+               entry = mdp->dirty_tx % mdp->num_tx_ring;
+               txdesc = &mdp->tx_ring[entry];
+               sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+               if (sent_only && !sent)
+                       break;
+               /* TACT bit must be checked before all the following reads */
+               dma_rmb();
+               netif_info(mdp, tx_done, ndev,
+                          "tx entry %d status 0x%08x\n",
+                          entry, le32_to_cpu(txdesc->status));
+               /* Free the original skb. */
+               if (mdp->tx_skbuff[entry]) {
+                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                                        le32_to_cpu(txdesc->len) >> 16,
+                                        DMA_TO_DEVICE);
+                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+                       mdp->tx_skbuff[entry] = NULL;
+                       free_num++;
+               }
+               txdesc->status = cpu_to_le32(TD_TFP);
+               if (entry >= mdp->num_tx_ring - 1)
+                       txdesc->status |= cpu_to_le32(TD_TDLE);
+
+               if (sent) {
+                       ndev->stats.tx_packets++;
+                       ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+               }
+       }
+       return free_num;
+}
+
 /* free skb and descriptor buffer */
 static void sh_eth_ring_free(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int ringsize, i;
 
+       if (mdp->rx_ring) {
+               for (i = 0; i < mdp->num_rx_ring; i++) {
+                       if (mdp->rx_skbuff[i]) {
+                               struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+                               dma_unmap_single(&ndev->dev,
+                                                le32_to_cpu(rxdesc->addr),
+                                                ALIGN(mdp->rx_buf_sz, 32),
+                                                DMA_FROM_DEVICE);
+                       }
+               }
+               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+                                 mdp->rx_desc_dma);
+               mdp->rx_ring = NULL;
+       }
+
        /* Free Rx skb ringbuffer */
        if (mdp->rx_skbuff) {
                for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
        kfree(mdp->rx_skbuff);
        mdp->rx_skbuff = NULL;
 
-       /* Free Tx skb ringbuffer */
-       if (mdp->tx_skbuff) {
-               for (i = 0; i < mdp->num_tx_ring; i++)
-                       dev_kfree_skb(mdp->tx_skbuff[i]);
-       }
-       kfree(mdp->tx_skbuff);
-       mdp->tx_skbuff = NULL;
-
-       if (mdp->rx_ring) {
-               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
-                                 mdp->rx_desc_dma);
-               mdp->rx_ring = NULL;
-       }
-
        if (mdp->tx_ring) {
+               sh_eth_tx_free(ndev, false);
+
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
                dma_free_coherent(NULL, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
+
+       /* Free Tx skb ringbuffer */
+       kfree(mdp->tx_skbuff);
+       mdp->tx_skbuff = NULL;
 }
 
 /* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        update_mac_address(ndev);
 }
 
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       struct sh_eth_txdesc *txdesc;
-       int free_num = 0;
-       int entry;
-
-       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
-               entry = mdp->dirty_tx % mdp->num_tx_ring;
-               txdesc = &mdp->tx_ring[entry];
-               if (txdesc->status & cpu_to_le32(TD_TACT))
-                       break;
-               /* TACT bit must be checked before all the following reads */
-               dma_rmb();
-               netif_info(mdp, tx_done, ndev,
-                          "tx entry %d status 0x%08x\n",
-                          entry, le32_to_cpu(txdesc->status));
-               /* Free the original skb. */
-               if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
-                                        le32_to_cpu(txdesc->len) >> 16,
-                                        DMA_TO_DEVICE);
-                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
-                       mdp->tx_skbuff[entry] = NULL;
-                       free_num++;
-               }
-               txdesc->status = cpu_to_le32(TD_TFP);
-               if (entry >= mdp->num_tx_ring - 1)
-                       txdesc->status |= cpu_to_le32(TD_TDLE);
-
-               ndev->stats.tx_packets++;
-               ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
-       }
-       return free_num;
-}
-
 /* Packet receive function */
 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 {
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
                           intr_status, mdp->cur_tx, mdp->dirty_tx,
                           (u32)ndev->state, edtrr);
                /* dirty buffer free */
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
 
                /* SH7712 BUG */
                if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                /* Clear Tx interrupts */
                sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
 
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
                netif_wake_queue(ndev);
        }
 
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
-               if (!sh_eth_txfree(ndev)) {
+               if (!sh_eth_tx_free(ndev, true)) {
                        netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
index 50d28261b6b9ea22f42c26be0e9f0e0bed194109..b9cb697b281847a83aa511c577a6c790516f8012 100644 (file)
@@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EFX_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EFX_MAX_RX_QUEUES);
+               count = EFX_MAX_RX_QUEUES;
+       }
+
        /* If RSS is requested for the PF *and* VFs then we can't write RSS
         * table entries that are inaccessible to VFs
         */
index ee14662415c5dfc827a02cb577794af495b0433c..a0c52e3281024b566dfe4b58e3014f26aadb0eaf 100644 (file)
@@ -74,7 +74,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_RXQ_MIN_ENT                128U
 #define EFX_TXQ_MIN_ENT(efx)   (2 * efx_tx_max_skb_descs(efx))
 
-#define EFX_TXQ_MAX_ENT(efx)   (EFX_WORKAROUND_35388(efx) ? \
+/* All EF10 architecture NICs steal one bit of the DMAQ size for various
+ * other purposes when counting TxQ entries, so we halve the queue size.
+ */
+#define EFX_TXQ_MAX_ENT(efx)   (EFX_WORKAROUND_EF10(efx) ? \
                                 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
 
 static inline bool efx_rss_enabled(struct efx_nic *efx)
index f5e5cd1659a148fb63ce2078ef13a5ae12d048bc..29614da91cbf919f91841d8e644ab4b246741ec7 100644 (file)
@@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EF4_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EF4_MAX_RX_QUEUES);
+               count = EF4_MAX_RX_QUEUES;
+       }
+
        return count;
 }
 
index 103f827a16231e058160ea8e283adc51823a1928..c67fa18b8121091de9396b29eea68f9134206ce2 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
 #define EFX_WORKAROUND_10G(efx) 1
 
 /* Bit-bashed I2C reads cause performance drop */
index 9e631952b86f3d4ddd9108e1fe0db7c96d8363d2..48a541eb0af20f9a0db46c0b7a94cdcc709b0b04 100644 (file)
@@ -76,7 +76,7 @@ config TI_CPSW
 config TI_CPTS
        bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW || TI_KEYSTONE_NETCP
-       depends on PTP_1588_CLOCK
+       depends on POSIX_TIMERS
        ---help---
          This driver supports the Common Platform Time Sync unit of
          the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
@@ -87,6 +87,8 @@ config TI_CPTS_MOD
        tristate
        depends on TI_CPTS
        default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+       select NET_PTP_CLASSIFY
+       imply PTP_1588_CLOCK
        default m
 
 config TI_KEYSTONE_NETCP
index a45f98fa4aa70a6ce0c693bef2fda248754a313b..3dadee1080b9e2e541d4b1a335671eb40d8c8205 100644 (file)
@@ -1017,8 +1017,8 @@ tc35815_free_queues(struct net_device *dev)
                        BUG_ON(lp->tx_skbs[i].skb != skb);
 #endif
                        if (skb) {
-                               dev_kfree_skb(skb);
                                pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb(skb);
                                lp->tx_skbs[i].skb = NULL;
                                lp->tx_skbs[i].skb_dma = 0;
                        }
index f9f3dba7a58800d9288199b50bec0b85d18cb249..db23cb36ae5cb9ec50493b00329f276ecdd22ae9 100644 (file)
@@ -751,7 +751,6 @@ struct netvsc_device {
        u32 send_section_cnt;
        u32 send_section_size;
        unsigned long *send_section_map;
-       int map_words;
 
        /* Used for NetVSP initialization protocol */
        struct completion channel_init_wait;
index 8dd0b87703288ccc5f067e495f5b297cde00fba1..15ef713d96c0887ec7929ff8d4be3ec3a6cac291 100644 (file)
@@ -236,6 +236,7 @@ static int netvsc_init_buf(struct hv_device *device)
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
+       size_t map_words;
        int node;
 
        net_device = get_outbound_net_device(device);
@@ -401,11 +402,9 @@ static int netvsc_init_buf(struct hv_device *device)
                   net_device->send_section_size, net_device->send_section_cnt);
 
        /* Setup state for managing the send buffer. */
-       net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
-                                            BITS_PER_LONG);
+       map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
 
-       net_device->send_section_map = kcalloc(net_device->map_words,
-                                              sizeof(ulong), GFP_KERNEL);
+       net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
        if (net_device->send_section_map == NULL) {
                ret = -ENOMEM;
                goto cleanup;
@@ -683,7 +682,7 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
        unsigned long *map_addr = net_device->send_section_map;
        unsigned int i;
 
-       for_each_clear_bit(i, map_addr, net_device->map_words) {
+       for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
                if (sync_test_and_set_bit(i, map_addr) == 0)
                        return i;
        }
index ff0a5ed3ca803551a0350303af44976d0f47dcfc..49ce4e9f4a0f387dac1792252ecd5044b6373cd4 100644 (file)
@@ -617,7 +617,8 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 
 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
                                             unsigned char **iv,
-                                            struct scatterlist **sg)
+                                            struct scatterlist **sg,
+                                            int num_frags)
 {
        size_t size, iv_offset, sg_offset;
        struct aead_request *req;
@@ -629,7 +630,7 @@ static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 
        size = ALIGN(size, __alignof__(struct scatterlist));
        sg_offset = size;
-       size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+       size += sizeof(struct scatterlist) * num_frags;
 
        tmp = kmalloc(size, GFP_ATOMIC);
        if (!tmp)
@@ -649,6 +650,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 {
        int ret;
        struct scatterlist *sg;
+       struct sk_buff *trailer;
        unsigned char *iv;
        struct ethhdr *eth;
        struct macsec_eth_header *hh;
@@ -723,7 +725,14 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
+       ret = skb_cow_data(skb, 0, &trailer);
+       if (unlikely(ret < 0)) {
+               macsec_txsa_put(tx_sa);
+               kfree_skb(skb);
+               return ERR_PTR(ret);
+       }
+
+       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
        if (!req) {
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
@@ -732,7 +741,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 
        macsec_fill_iv(iv, secy->sci, pn);
 
-       sg_init_table(sg, MAX_SKB_FRAGS + 1);
+       sg_init_table(sg, ret);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
        if (tx_sc->encrypt) {
@@ -917,6 +926,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 {
        int ret;
        struct scatterlist *sg;
+       struct sk_buff *trailer;
        unsigned char *iv;
        struct aead_request *req;
        struct macsec_eth_header *hdr;
@@ -927,7 +937,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
+       ret = skb_cow_data(skb, 0, &trailer);
+       if (unlikely(ret < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(ret);
+       }
+       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
        if (!req) {
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
@@ -936,7 +951,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        hdr = (struct macsec_eth_header *)skb->data;
        macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
 
-       sg_init_table(sg, MAX_SKB_FRAGS + 1);
+       sg_init_table(sg, ret);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
        if (hdr->tci_an & MACSEC_TCI_E) {
index 9261722960a719a8e6d46f4ea5b39f500a0817e8..b34eaaae03fd3f289aab4a90b11c05c587858ad2 100644 (file)
@@ -1139,6 +1139,7 @@ static int macvlan_port_create(struct net_device *dev)
 static void macvlan_port_destroy(struct net_device *dev)
 {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+       struct sk_buff *skb;
 
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
@@ -1147,7 +1148,15 @@ static void macvlan_port_destroy(struct net_device *dev)
         * but we need to cancel it and purge left skbs if any.
         */
        cancel_work_sync(&port->bc_work);
-       __skb_queue_purge(&port->bc_queue);
+
+       while ((skb = __skb_dequeue(&port->bc_queue))) {
+               const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+               if (src)
+                       dev_put(src->dev);
+
+               kfree_skb(skb);
+       }
 
        kfree(port);
 }
index e2460a57e4b1105ed398e207aa8cdfd84d03707d..ed0d10f54f2607533868dfd10e6bc9d0e09050de 100644 (file)
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
                skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
                skb_queue_tail(&dp83640->rx_queue, skb);
                schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
-       } else {
-               netif_rx_ni(skb);
        }
 
        return true;
index 6742070ca676f57694a9a6cb11364941deb520a0..da5b392683703b9ece67a24ebcb59aadeba7cc8e 100644 (file)
@@ -297,17 +297,6 @@ static int kszphy_config_init(struct phy_device *phydev)
        if (priv->led_mode >= 0)
                kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
 
-       if (phy_interrupt_is_valid(phydev)) {
-               int ctl = phy_read(phydev, MII_BMCR);
-
-               if (ctl < 0)
-                       return ctl;
-
-               ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
-               if (ret < 0)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -798,9 +787,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -940,9 +926,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -952,6 +935,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9021_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -971,6 +955,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = ksz9031_read_status,
@@ -989,9 +974,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1003,9 +985,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1017,9 +996,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 } };
index a2bfc82e95d70ba645a1f0975ad835ca7c0991cf..97ff1278167bc455af890e9b6c16d7b9d659ea57 100644 (file)
@@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 EXPORT_SYMBOL(phy_mii_ioctl);
 
 /**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * phy_start_aneg_priv - start auto-negotiation for this PHY device
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: Sanitizes the settings (if we're not autonegotiating
  *   them), and then calls the driver's config_aneg function.
  *   If the PHYCONTROL Layer is operating, we change the state to
  *   reflect the beginning of Auto-negotiation or forcing.
  */
-int phy_start_aneg(struct phy_device *phydev)
+static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
 {
+       bool trigger = 0;
        int err;
 
        if (!phydev->drv)
@@ -628,10 +630,40 @@ int phy_start_aneg(struct phy_device *phydev)
                }
        }
 
+       /* Re-schedule a PHY state machine to check PHY status because
+        * negotiation may already be done and aneg interrupt may not be
+        * generated.
+        */
+       if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+               err = phy_aneg_done(phydev);
+               if (err > 0) {
+                       trigger = true;
+                       err = 0;
+               }
+       }
+
 out_unlock:
        mutex_unlock(&phydev->lock);
+
+       if (trigger)
+               phy_trigger_machine(phydev, sync);
+
        return err;
 }
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ *   them), and then calls the driver's config_aneg function.
+ *   If the PHYCONTROL Layer is operating, we change the state to
+ *   reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+       return phy_start_aneg_priv(phydev, true);
+}
 EXPORT_SYMBOL(phy_start_aneg);
 
 /**
@@ -659,7 +691,7 @@ void phy_start_machine(struct phy_device *phydev)
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
        if (sync)
                cancel_delayed_work_sync(&phydev->state_queue);
@@ -1154,7 +1186,7 @@ void phy_state_machine(struct work_struct *work)
        mutex_unlock(&phydev->lock);
 
        if (needs_aneg)
-               err = phy_start_aneg(phydev);
+               err = phy_start_aneg_priv(phydev, false);
        else if (do_suspend)
                phy_suspend(phydev);
 
index f8c81f12d988907d42786eedb6cd9e6ca3ca0015..85c01247f2e3858f33e901de1a37d3eaae0941d7 100644 (file)
@@ -2361,8 +2361,10 @@ start_again:
 
        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_OPTIONS_GET);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
@@ -2634,8 +2636,10 @@ start_again:
 
        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_PORT_LIST_GET);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
index 3dd490f53e485a399a4531c6681c030b1f5dcde4..f28bd74ac275a039a000cc01b0448d8866f14d1f 100644 (file)
@@ -369,7 +369,7 @@ config USB_NET_NET1080
          optionally with LEDs that indicate traffic
 
 config USB_NET_PLUSB
-       tristate "Prolific PL-2301/2302/25A1 based cables"
+       tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
        # if the handshake/init/reset problems, from original 'plusb',
        # are ever resolved ... then remove "experimental"
        depends on USB_USBNET
index 8a40202c0a1732850da1b2eb64af21470b9c85b4..c4f1c363e24b89404c6834312074f8a4451ded50 100644 (file)
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        tx_overhead = 0x40;
 
        len = skb->len;
-       if (skb_headroom(skb) < tx_overhead) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+       if (skb_cow_head(skb, tx_overhead)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, tx_overhead);
index e221bfcee76b40a3ad7ba60ec4d348f4b8f4cc73..947bea81d924124c3827e87f75e732e35adb2acd 100644 (file)
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 {
        int len = skb->len;
 
-       if (skb_headroom(skb) < 2) {
-               struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+       if (skb_cow_head(skb, 2)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
        skb_push(skb, 2);
 
index 4f2e8141dbe2e53eb23a2b60124e2821b2897fce..00067a0c51ca45b736920a3c56ee42cb24b08271 100644 (file)
@@ -2534,13 +2534,6 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        SET_NETDEV_DEV(net, &interface->dev);
        SET_NETDEV_DEVTYPE(net, &hso_type);
 
-       /* registering our net device */
-       result = register_netdev(net);
-       if (result) {
-               dev_err(&interface->dev, "Failed to register device\n");
-               goto exit;
-       }
-
        /* start allocating */
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
                hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
@@ -2560,6 +2553,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
 
        add_net_device(hso_dev);
 
+       /* registering our net device */
+       result = register_netdev(net);
+       if (result) {
+               dev_err(&interface->dev, "Failed to register device\n");
+               goto exit;
+       }
+
        hso_log_port(hso_dev);
 
        hso_create_rfkill(hso_dev, interface);
@@ -3279,9 +3279,9 @@ static void __exit hso_exit(void)
        pr_info("unloaded\n");
 
        tty_unregister_driver(tty_drv);
-       put_tty_driver(tty_drv);
        /* deregister the usb driver */
        usb_deregister(&hso_driver);
+       put_tty_driver(tty_drv);
 }
 
 /* Module definitions */
index 876f02f4945eafdc2fb5cfa0f9dcb54d9b498af4..2a2c3edb6bad0b3bd257c3a101d100ad3b00cc59 100644 (file)
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
        }
 
        /* We now decide whether we can put our special header into the sk_buff */
-       if (skb_cloned(skb) || skb_headroom(skb) < 2) {
-               /* no such luck - we make our own */
-               struct sk_buff *copied_skb;
-               copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
-               dev_kfree_skb_irq(skb);
-               skb = copied_skb;
-               if (!copied_skb) {
-                       kaweth->stats.tx_errors++;
-                       netif_start_queue(net);
-                       spin_unlock_irq(&kaweth->device_lock);
-                       return NETDEV_TX_OK;
-               }
+       if (skb_cow_head(skb, 2)) {
+               kaweth->stats.tx_errors++;
+               netif_start_queue(net);
+               spin_unlock_irq(&kaweth->device_lock);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
        }
 
        private_header = (__le16 *)__skb_push(skb, 2);
index 9889a70ff4f6fece5bfabbfb45a3470f721a5a32..636f48f19d1eacae67c050de4fc3e651bffdf825 100644 (file)
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (lan78xx_linearize(skb) < 0)
index 22e1a9a99a7d8cad77b22410973575ee17699b2b..6fe59373cba9b8bd1afce514265171dbbd43aa9e 100644 (file)
@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
 }
 
 static const struct driver_info        prolific_info = {
-       .description =  "Prolific PL-2301/PL-2302/PL-25A1",
+       .description =  "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
        .flags =        FLAG_POINTTOPOINT | FLAG_NO_SETINT,
                /* some PL-2302 versions seem to fail usb_set_interface() */
        .reset =        pl_reset,
@@ -139,6 +139,17 @@ static const struct usb_device_id  products [] = {
                                         * Host-to-Host Cable
                                         */
        .driver_info =  (unsigned long) &prolific_info,
+
+},
+
+/* super speed cables */
+{
+       USB_DEVICE(0x067b, 0x27a1),     /* PL-27A1, no eeprom
+                                        * also: goobay Active USB 3.0
+                                        * Data Link,
+                                        * Unitek Y-3501
+                                        */
+       .driver_info =  (unsigned long) &prolific_info,
 },
 
        { },            // END
@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
 module_usb_driver(plusb_driver);
 
 MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
 MODULE_LICENSE("GPL");
index 0b17b40d7a4fa2653caf21406c4a6b3b45d868b0..190de9a90f7387c5070c7f589aa18bb7d05ac5d7 100644 (file)
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
-               struct sk_buff *skb2 =
-                       skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
index 831aa33d078ae7d2dd57fdded5de71d1eb915f99..5f19fb0f025d9449d0ba20958610e0d1f083f032 100644 (file)
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
        /* We do not advertise SG, so skbs should be already linearized */
        BUG_ON(skb_shinfo(skb)->nr_frags);
 
-       if (skb_headroom(skb) < overhead) {
-               struct sk_buff *skb2 = skb_copy_expand(skb,
-                       overhead, 0, flags);
+       /* Make writable and expand header space by overhead if required */
+       if (skb_cow_head(skb, overhead)) {
+               /* Must deallocate here as returning NULL to indicate error
+                * means the skb won't be deallocated in the caller.
+                */
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (csum) {
index 4a1e9c489f1f455388ffee289d65e1d6b36cba42..aadfe1d1c37ee67e2a7d17c759db12dad248c41d 100644 (file)
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
        len = skb->len;
 
-       if (skb_headroom(skb) < SR_TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, SR_TX_OVERHEAD);
index d6988db1930d6b38db5f932a81c6c64b9c76776a..7d909c8183e95a62b6f8a3182d3ce645a264e909 100644 (file)
@@ -1128,7 +1128,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
                goto nla_put_failure;
 
        /* rule only needs to appear once */
-       nlh->nlmsg_flags &= NLM_F_EXCL;
+       nlh->nlmsg_flags |= NLM_F_EXCL;
 
        frh = nlmsg_data(nlh);
        memset(frh, 0, sizeof(*frh));
index 9583a5f58a1ddc498b89d1b6cbaa14cf1a60bea2..eeb409c287b8ed304bdafea7804619dfb39d15b5 100644 (file)
@@ -1315,6 +1315,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                        if (target)
                                table->entries[state] = target;
 
+                       /*
+                        * Don't allow transitions to the deepest state
+                        * if it's quirked off.
+                        */
+                       if (state == ctrl->npss &&
+                           (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+                               continue;
+
                        /*
                         * Is this state a useful non-operational state for
                         * higher-power states to autonomously transition to?
@@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
 };
 
 static const struct nvme_core_quirk_entry core_quirks[] = {
-       /*
-        * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
-        * the controller to go out to lunch.  It dies when the watchdog
-        * timer reads CSTS and gets 0xffffffff.
-        */
        {
-               .vid = 0x144d,
-               .fr = "BXW75D0Q",
+               /*
+                * This Toshiba device seems to die using any APST states.  See:
+                * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
+                */
+               .vid = 0x1179,
+               .mn = "THNSF5256GPUK TOSHIBA",
                .quirks = NVME_QUIRK_NO_APST,
-       },
+       }
 };
 
 /* match is null-terminated but idstr is space-padded. */
index 2aa20e3e5675bf14a8aaf8784bafc344b970a052..ab2d6ec7eb5cc32292a3a9aec3a536fb21f29c4c 100644 (file)
@@ -83,6 +83,11 @@ enum nvme_quirks {
         * APST should not be used.
         */
        NVME_QUIRK_NO_APST                      = (1 << 4),
+
+       /*
+        * The deepest sleep state should not be used.
+        */
+       NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
 };
 
 /*
index 26a5fd05fe88aa003a00dc4ece6e9900bd95e618..5d309535abbd6fbef366cc61b254dceac2719618 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/blk-mq-pci.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
@@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
+static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+{
+       if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
+               /*
+                * Several Samsung devices seem to drop off the PCIe bus
+                * randomly when APST is on and uses the deepest sleep state.
+                * This has been observed on a Samsung "SM951 NVMe SAMSUNG
+                * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
+                * 950 PRO 256GB", but it seems to be restricted to two Dell
+                * laptops.
+                */
+               if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
+                   (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
+                    dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
+                       return NVME_QUIRK_NO_DEEPEST_PS;
+       }
+
+       return 0;
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
+       unsigned long quirks = id->driver_data;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto put_pci;
 
+       quirks |= check_dell_samsung_bug(pdev);
+
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
-                       id->driver_data);
+                       quirks);
        if (result)
                goto release_pools;
 
index fd66a3199db77d41d08e0658d89611370d46446f..cf9d6a9d9fd4fc17afd19d0e659f5e7cff0a3ca8 100644 (file)
@@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
 
 static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
        {
-               .compatible = "hisilicon,pcie-almost-ecam",
+               .compatible =  "hisilicon,hip06-pcie-ecam",
                .data       = (void *) &hisi_pcie_platform_ops,
        },
+       {
+               .compatible =  "hisilicon,hip07-pcie-ecam",
+               .data       = (void *) &hisi_pcie_platform_ops,
+       },
        {},
 };
 
index e5a2d590a104d5cf7e63c1a7acfb94d7639a51da..15c9fe766071a392cbaf49b329c60188cc3e6484 100644 (file)
@@ -1061,10 +1061,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
        struct scsi_device *sdev = cmd->device;
        struct request *rq = cmd->request;
        bool is_mq = (rq->mq_ctx != NULL);
-       int error;
+       int error = BLKPREP_KILL;
 
        if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
-               return -EINVAL;
+               goto err_exit;
 
        error = scsi_init_sgtable(rq, &cmd->sdb);
        if (error)
index d7efcb632f7d9dde08b0a494c455725b05d55af0..002f1ce22bd02032062924b19d645ebc25302c93 100644 (file)
@@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        }
 
        /*
-        * If the GPIO is configured as input, change the direction to output
-        * and set the GPIO as active.
+        * If the GPIO is not known to be already configured as output, that
+        * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
+        * change the direction to output and set the GPIO as active.
         * Do not force the GPIO to active when it was already output as it
         * could cause backlight flickering or we would enable the backlight too
         * early. Leave the decision of the initial backlight state for later.
         */
        if (pb->enable_gpio &&
-           gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN)
+           gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
                gpiod_direction_output(pb->enable_gpio, 1);
 
        pb->power_supply = devm_regulator_get(&pdev->dev, "power");
index a59801dc2a340bcd3c5a34af9ef7277061967377..afbea61d957e893db09effb75ec47c7d670e24e3 100644 (file)
@@ -1042,9 +1042,12 @@ static void report_reserved_underflow(struct btrfs_fs_info *fs_info,
                                      struct btrfs_qgroup *qgroup,
                                      u64 num_bytes)
 {
-       btrfs_warn(fs_info,
+#ifdef CONFIG_BTRFS_DEBUG
+       WARN_ON(qgroup->reserved < num_bytes);
+       btrfs_debug(fs_info,
                "qgroup %llu reserved space underflow, have: %llu, to free: %llu",
                qgroup->qgroupid, qgroup->reserved, num_bytes);
+#endif
        qgroup->reserved = 0;
 }
 /*
@@ -1075,7 +1078,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
        qgroup->excl += sign * num_bytes;
        qgroup->excl_cmpr += sign * num_bytes;
        if (sign > 0) {
-               if (WARN_ON(qgroup->reserved < num_bytes))
+               if (qgroup->reserved < num_bytes)
                        report_reserved_underflow(fs_info, qgroup, num_bytes);
                else
                        qgroup->reserved -= num_bytes;
@@ -1100,7 +1103,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
                WARN_ON(sign < 0 && qgroup->excl < num_bytes);
                qgroup->excl += sign * num_bytes;
                if (sign > 0) {
-                       if (WARN_ON(qgroup->reserved < num_bytes))
+                       if (qgroup->reserved < num_bytes)
                                report_reserved_underflow(fs_info, qgroup,
                                                          num_bytes);
                        else
@@ -2469,7 +2472,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
 
                qg = unode_aux_to_qgroup(unode);
 
-               if (WARN_ON(qg->reserved < num_bytes))
+               if (qg->reserved < num_bytes)
                        report_reserved_underflow(fs_info, qg, num_bytes);
                else
                        qg->reserved -= num_bytes;
index d449e1c03cbd791922148ad00c3d6d0f0e1599ce..d3119fe3ab45fdbdb534651ef68194815dcc544b 100644 (file)
@@ -2071,11 +2071,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
 
-       if (ia_valid & ATTR_MODE) {
-               err = posix_acl_chmod(inode, attr->ia_mode);
-               if (err)
-                       goto out_put;
-       }
 
        if (mask) {
                req->r_inode = inode;
@@ -2088,14 +2083,12 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
        dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
             ceph_cap_string(dirtied), mask);
 
-       ceph_mdsc_put_request(req);
-       if (mask & CEPH_SETATTR_SIZE)
-               __ceph_do_pending_vmtruncate(inode);
-       ceph_free_cap_flush(prealloc_cf);
-       return err;
-out_put:
        ceph_mdsc_put_request(req);
        ceph_free_cap_flush(prealloc_cf);
+
+       if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
+               __ceph_do_pending_vmtruncate(inode);
+
        return err;
 }
 
@@ -2114,7 +2107,12 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (err != 0)
                return err;
 
-       return __ceph_setattr(inode, attr);
+       err = __ceph_setattr(inode, attr);
+
+       if (err >= 0 && (attr->ia_valid & ATTR_MODE))
+               err = posix_acl_chmod(inode, attr->ia_mode);
+
+       return err;
 }
 
 /*
index cc93ba4da9b592468f36d37b80777e88b8e400e6..27bc360c7ffd7e1081f907c5f080dc4ba439fbfc 100644 (file)
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
        return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
 }
 
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+       if (server->tcpStatus == CifsGood)
+               return true;
+
+       return false;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
        .get_dfs_refer = CIFSGetDFSRefer,
        .qfs_tcon = cifs_qfs_tcon,
        .is_path_accessible = cifs_is_path_accessible,
+       .can_echo = cifs_can_echo,
        .query_path_info = cifs_query_path_info,
        .query_file_info = cifs_query_file_info,
        .get_srv_inum = cifs_get_srv_inum,
index dba2ff8eaa68e3365deac3d61df3da5641099d9e..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -358,6 +358,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
 {
        unsigned int len, v, hdr, dlen;
        u32 max_blocksize = svc_max_payload(rqstp);
+       struct kvec *head = rqstp->rq_arg.head;
+       struct kvec *tail = rqstp->rq_arg.tail;
 
        p = decode_fh(p, &args->fh);
        if (!p)
@@ -367,6 +369,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
        args->count = ntohl(*p++);
        args->stable = ntohl(*p++);
        len = args->len = ntohl(*p++);
+       if ((void *)p > head->iov_base + head->iov_len)
+               return 0;
        /*
         * The count must equal the amount of data passed.
         */
@@ -377,9 +381,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
         * Check to make sure that we got the right number of
         * bytes.
         */
-       hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-       dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-               + rqstp->rq_arg.tail[0].iov_len - hdr;
+       hdr = (void*)p - head->iov_base;
+       dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
        /*
         * Round the length of the data which was specified up to
         * the next multiple of XDR units and then compare that
@@ -396,7 +399,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                len = args->len = max_blocksize;
        }
        rqstp->rq_vec[0].iov_base = (void*)p;
-       rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+       rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
        v = 0;
        while (len > rqstp->rq_vec[v].iov_len) {
                len -= rqstp->rq_vec[v].iov_len;
@@ -471,6 +474,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
        /* first copy and check from the first page */
        old = (char*)p;
        vec = &rqstp->rq_arg.head[0];
+       if ((void *)old > vec->iov_base + vec->iov_len)
+               return 0;
        avail = vec->iov_len - (old - (char*)vec->iov_base);
        while (len && avail && *old) {
                *new++ = *old++;
index cbeeda1e94a2fbbba61e2adeeb4f9ba89287eaf9..d86031b6ad79301c8ca0ceec9c8b991dd5db70f1 100644 (file)
@@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
 
 int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
-       if (op->opnum == OP_ILLEGAL)
+       if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
                return op_encode_hdr_size * sizeof(__be32);
 
        BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
index 31e1f959345715a59f8f6b9d9ec00a0ec00fece5..59979f0bbd4bf255f5ea6f8fbd33cb9b2a5aa073 100644 (file)
@@ -747,6 +747,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
        return nfserr;
 }
 
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page.  The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases.  Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+                               struct svc_procedure *proc)
+{
+       /*
+        * The ACL code has more careful bounds-checking and is not
+        * susceptible to this problem:
+        */
+       if (rqstp->rq_prog != NFS_PROGRAM)
+               return false;
+       /*
+        * Ditto NFSv4 (which can in theory have argument and reply both
+        * more than a page):
+        */
+       if (rqstp->rq_vers >= 4)
+               return false;
+       /* The reply will be small, we're OK: */
+       if (proc->pc_xdrressize > 0 &&
+           proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+               return false;
+
+       return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
 int
 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 {
@@ -759,6 +790,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
                                rqstp->rq_vers, rqstp->rq_proc);
        proc = rqstp->rq_procinfo;
 
+       if (nfs_request_too_big(rqstp, proc)) {
+               dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+               *statp = rpc_garbage_args;
+               return 1;
+       }
        /*
         * Give the xdr decoder a chance to change this if it wants
         * (necessary in the NFSv4.0 compound case)
index 41b468a6a90f807fe3f3d2e4ceeaa9f8c7ae0f8c..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                                        struct nfsd_writeargs *args)
 {
        unsigned int len, hdr, dlen;
+       struct kvec *head = rqstp->rq_arg.head;
        int v;
 
        p = decode_fh(p, &args->fh);
@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
         * Check to make sure that we got the right number of
         * bytes.
         */
-       hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-       dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-               - hdr;
+       hdr = (void*)p - head->iov_base;
+       if (hdr > head->iov_len)
+               return 0;
+       dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
 
        /*
         * Round the length of the data which was specified up to
@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                return 0;
 
        rqstp->rq_vec[0].iov_base = (void*)p;
-       rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+       rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
        v = 0;
        while (len > rqstp->rq_vec[v].iov_len) {
                len -= rqstp->rq_vec[v].iov_len;
index 1656843e87d2bef47b69d65b23c23946e8936f33..323f492e0822dd3286365d5cdbe56c59ec2d5463 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -91,6 +91,7 @@ slow:
                return ERR_PTR(-ENOMEM);
        }
        d_instantiate(dentry, inode);
+       dentry->d_flags |= DCACHE_RCUACCESS;
        dentry->d_fsdata = (void *)ns->ops;
        d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
        if (d) {
index c6c963b2546b4f2d0301f40706064498c31fd3b6..a257b872a53d1105a797c75b14f13c31a442a12b 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -547,13 +547,13 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
 /**
  * sys_statx - System call to get enhanced stats
  * @dfd: Base directory to pathwalk from *or* fd to stat.
- * @filename: File to stat *or* NULL.
+ * @filename: File to stat or "" with AT_EMPTY_PATH
  * @flags: AT_* flags to control pathwalk.
  * @mask: Parts of statx struct actually required.
  * @buffer: Result buffer.
  *
- * Note that if filename is NULL, then it does the equivalent of fstat() using
- * dfd to indicate the file of interest.
+ * Note that fstat() can be emulated by setting dfd to the fd of interest,
+ * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
  */
 SYSCALL_DEFINE5(statx,
                int, dfd, const char __user *, filename, unsigned, flags,
@@ -568,10 +568,7 @@ SYSCALL_DEFINE5(statx,
        if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
                return -EINVAL;
 
-       if (filename)
-               error = vfs_statx(dfd, filename, flags, &stat, mask);
-       else
-               error = vfs_statx_fd(dfd, &stat, mask, flags);
+       error = vfs_statx(dfd, filename, flags, &stat, mask);
        if (error)
                return error;
 
index 1e712a36468064a75910e8ba9c0e1cfb2bd3ab28..718b749fa11aa8901544a1e6925a7486bbb357f5 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/math64.h>
 #include <linux/uaccess.h>
 #include <linux/random.h>
+#include <linux/ctype.h>
 #include "ubifs.h"
 
 static DEFINE_SPINLOCK(dbg_lock);
@@ -286,8 +287,10 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
                        break;
                }
 
-               pr_err("\t%d: %s (%s)\n",
-                      count++, dent->name, get_dent_type(dent->type));
+               pr_err("\t%d: inode %llu, type %s, len %d\n",
+                      count++, (unsigned long long) le64_to_cpu(dent->inum),
+                      get_dent_type(dent->type),
+                      le16_to_cpu(dent->nlen));
 
                fname_name(&nm) = dent->name;
                fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -464,7 +467,8 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                        pr_err("(bad name length, not printing, bad or corrupted node)");
                else {
                        for (i = 0; i < nlen && dent->name[i]; i++)
-                               pr_cont("%c", dent->name[i]);
+                               pr_cont("%c", isprint(dent->name[i]) ?
+                                       dent->name[i] : '?');
                }
                pr_cont("\n");
 
index 30825d882aa94a4c2486d47581f33d1dfca1a409..b777bddaa1dda9f2768952dc562e06df6b068d32 100644 (file)
@@ -606,8 +606,8 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
        }
 
        while (1) {
-               dbg_gen("feed '%s', ino %llu, new f_pos %#x",
-                       dent->name, (unsigned long long)le64_to_cpu(dent->inum),
+               dbg_gen("ino %llu, new f_pos %#x",
+                       (unsigned long long)le64_to_cpu(dent->inum),
                        key_hash_flash(c, &dent->key));
                ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
                             ubifs_inode(dir)->creat_sqnum);
@@ -748,6 +748,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                goto out_fname;
 
        lock_2_inodes(dir, inode);
+
+       /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
+       if (inode->i_nlink == 0)
+               ubifs_delete_orphan(c, inode->i_ino);
+
        inc_nlink(inode);
        ihold(inode);
        inode->i_ctime = ubifs_current_time(inode);
@@ -768,6 +773,8 @@ out_cancel:
        dir->i_size -= sz_change;
        dir_ui->ui_size = dir->i_size;
        drop_nlink(inode);
+       if (inode->i_nlink == 0)
+               ubifs_add_orphan(c, inode->i_ino);
        unlock_2_inodes(dir, inode);
        ubifs_release_budget(c, &req);
        iput(inode);
@@ -1068,8 +1075,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
        }
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
-       if (err)
+       if (err) {
+               kfree(dev);
                goto out_budg;
+       }
 
        sz_change = CALC_DENT_SIZE(fname_len(&nm));
 
@@ -1316,9 +1325,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unsigned int uninitialized_var(saved_nlink);
        struct fscrypt_name old_nm, new_nm;
 
-       if (flags & ~RENAME_NOREPLACE)
-               return -EINVAL;
-
        /*
         * Budget request settings: deletion direntry, new direntry, removing
         * the old inode, and changing old and new parent directory inodes.
index 1d4f365d8f03a439ab0e20caf27ae34ad6e13e74..f6d9af3efa45a6cc8eb448a71f5d43d72d8fcbd1 100644 (file)
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
        return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
 }
 
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+       req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+       return req->base.flags;
+}
+
 static inline struct crypto_ahash *crypto_spawn_ahash(
        struct crypto_ahash_spawn *spawn)
 {
index aab032a6ae6124de63b7934f49d0dc564b0d2dc3..97ca105347a6c5e608297ae1a3562925dc6f6834 100644 (file)
@@ -53,7 +53,7 @@ struct sdio_func {
        unsigned int            state;          /* function state */
 #define SDIO_STATE_PRESENT     (1<<0)          /* present in sysfs */
 
-       u8                      tmpbuf[4];      /* DMA:able scratch buffer */
+       u8                      *tmpbuf;        /* DMA:able scratch buffer */
 
        unsigned                num_info;       /* number of info strings */
        const char              **info;         /* info strings */
index 43a774873aa96d4af64d0cdebb579be572a6658a..fb38573371512338a7876652d95c7f811be1bd3f 100644 (file)
@@ -852,6 +852,7 @@ void phy_change_work(struct work_struct *work);
 void phy_mac_interrupt(struct phy_device *phydev, int new_link);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_ksettings_get(struct phy_device *phydev,
index 85bbb1799df3f93cb1eacbfec497278c2c20c098..d496c02e14bc44327fd3ab6c3faad0c1d7ac1e12 100644 (file)
@@ -35,7 +35,7 @@
 #define RTF_PREF(pref) ((pref) << 27)
 #define RTF_PREF_MASK  0x18000000
 
-#define RTF_PCPU       0x40000000
+#define RTF_PCPU       0x40000000      /* read-only: can not be set by user */
 #define RTF_LOCAL      0x80000000
 
 
index 7af0dcc5d7555679cea6c08395ab54710e7066e6..821f9e807de5705d5b4d65e502fb13a06d3215bb 100644 (file)
@@ -617,6 +617,14 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
                        if (insn->imm == BPF_FUNC_xdp_adjust_head)
                                prog->xdp_adjust_head = 1;
                        if (insn->imm == BPF_FUNC_tail_call) {
+                               /* If we tail call into other programs, we
+                                * cannot make any assumptions since they
+                                * can be replaced dynamically during runtime
+                                * in the program array.
+                                */
+                               prog->cb_access = 1;
+                               prog->xdp_adjust_head = 1;
+
                                /* mark bpf_tail_call as different opcode
                                 * to avoid conditional branch in
                                 * interpeter for every normal call
index d052947fe78507ac80bbe58a09cc612bca7bab73..e2d356dd75812df8e42dfac3feb132edd5612e33 100644 (file)
@@ -98,7 +98,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                int ncpus, v, vecs_to_assign, vecs_per_node;
 
                /* Spread the vectors per node */
-               vecs_per_node = (affv - curvec) / nodes;
+               vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
index c2b88490d857583026a35090b62f7891446b7ba2..c08fbd2f5ba9fa2a806f326a3a85f5d021d74027 100644 (file)
@@ -46,13 +46,13 @@ enum {
                (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 
 /*
- * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  * .data and .bss to fit in required 32MB limit for the kernel. With
- * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  * So, reduce the static allocations for lockdeps related structures so that
  * everything fits in current required size limit.
  */
-#ifdef CONFIG_PROVE_LOCKING_SMALL
+#ifdef CONFIG_LOCKDEP_SMALL
 /*
  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  * we track.
index 27bb2e61276e1c6b3a581ce0f3a1f750d5975227..dd3e91d68dc73053e7ba0ca5bed0681b374ea8fc 100644 (file)
@@ -5566,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
        trace_free_pid_list(pid_list);
 }
 
+void ftrace_clear_pids(struct trace_array *tr)
+{
+       mutex_lock(&ftrace_lock);
+
+       clear_ftrace_pids(tr);
+
+       mutex_unlock(&ftrace_lock);
+}
+
 static void ftrace_pid_reset(struct trace_array *tr)
 {
        mutex_lock(&ftrace_lock);
index 54e7a90db848df3d1bca17e1ca966e57d2ce57d2..ca47a4fa2986c953dffa0b74fc791647bf0409fb 100644 (file)
@@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_page *reader;
+       struct buffer_page *head_page;
+       struct buffer_page *commit_page;
+       unsigned commit;
 
        cpu_buffer = iter->cpu_buffer;
 
-       return iter->head_page == cpu_buffer->commit_page &&
-               iter->head == rb_commit_index(cpu_buffer);
+       /* Remember, trace recording is off when iterator is in use */
+       reader = cpu_buffer->reader_page;
+       head_page = cpu_buffer->head_page;
+       commit_page = cpu_buffer->commit_page;
+       commit = rb_page_commit(commit_page);
+
+       return ((iter->head_page == commit_page && iter->head == commit) ||
+               (iter->head_page == reader && commit_page == head_page &&
+                head_page->read == commit &&
+                iter->head == rb_page_commit(cpu_buffer->reader_page)));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
 
index f35109514a015c38de8b2e1da99399fd5f399692..0ad75e9698f6b0f918df83af90da1204e99d8308 100644 (file)
@@ -6733,11 +6733,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
                return ret;
 
  out_reg:
-       ret = register_ftrace_function_probe(glob, ops, count);
+       ret = alloc_snapshot(&global_trace);
+       if (ret < 0)
+               goto out;
 
-       if (ret >= 0)
-               alloc_snapshot(&global_trace);
+       ret = register_ftrace_function_probe(glob, ops, count);
 
+ out:
        return ret < 0 ? ret : 0;
 }
 
@@ -7402,6 +7404,7 @@ static int instance_rmdir(const char *name)
 
        tracing_set_nop(tr);
        event_trace_del_tracer(tr);
+       ftrace_clear_pids(tr);
        ftrace_destroy_function_files(tr);
        tracefs_remove_recursive(tr->dir);
        free_trace_buffers(tr);
index ae1cce91fead25a065899109e426a6cc1e597d28..d19d52d600d623e9d9f0676891e19c6e5e880bce 100644 (file)
@@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
                                  struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
 #else
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
@@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
 /* ftace_func_t type is not defined, use macro instead of static inline */
 #define ftrace_init_array_ops(tr, func) do { } while (0)
 #endif /* CONFIG_FUNCTION_TRACER */
index 97d62c2da6c25dd5721f8c1c75264c83201f7247..fa16c0f82d6e4c159ac4b8751a1fc52631974438 100644 (file)
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
 
         For more details, see Documentation/locking/lockdep-design.txt.
 
-config PROVE_LOCKING_SMALL
-       bool
-
 config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
        select KALLSYMS
        select KALLSYMS_ALL
 
+config LOCKDEP_SMALL
+       bool
+
 config LOCK_STAT
        bool "Lock usage statistics"
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
index ed97c2c14fa80b47ffbf7fa22ec6d4b9b57202b1..738f1d5f83503e546960d005a034abf2dde2c0e7 100644 (file)
@@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
                        unlock_page(page);
                        put_page(page);
                } else {
-                       putback_lru_page(page);
                        dec_node_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
+                       putback_lru_page(page);
                }
        }
 }
index f3d603cef2c0c0e5aef09540dd2f8d50da5a808c..07efbc3a86567676986105005f77c64f9f99597a 100644 (file)
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
-       unsigned long nr_scanned, flags;
+       unsigned long nr_scanned;
        bool isolated_pageblocks;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        isolated_pageblocks = has_isolate_pageblock(zone);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        trace_mm_page_pcpu_drain(page, 0, mt);
                } while (--count && --batch_free && !list_empty(list));
        }
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
                                unsigned int order,
                                int migratetype)
 {
-       unsigned long nr_scanned, flags;
-       spin_lock_irqsave(&zone->lock, flags);
-       __count_vm_events(PGFREE, 1 << order);
+       unsigned long nr_scanned;
+       spin_lock(&zone->lock);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
                __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
                migratetype = get_pfnblock_migratetype(page, pfn);
        }
        __free_one_page(page, pfn, zone, order, migratetype);
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
 
 static void __free_pages_ok(struct page *page, unsigned int order)
 {
+       unsigned long flags;
        int migratetype;
        unsigned long pfn = page_to_pfn(page);
 
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
+       local_irq_save(flags);
+       __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, pfn, order, migratetype);
+       local_irq_restore(flags);
 }
 
 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        int migratetype, bool cold)
 {
        int i, alloced = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
                struct page *page = __rmqueue(zone, order, migratetype);
                if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
         * pages added to the pcp list.
         */
        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
        return alloced;
 }
 
@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
+       unsigned long flags;
        unsigned long pfn = page_to_pfn(page);
        int migratetype;
 
-       if (in_interrupt()) {
-               __free_pages_ok(page, 0);
-               return;
-       }
-
        if (!free_pcp_prepare(page))
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
        set_pcppage_migratetype(page, migratetype);
-       preempt_disable();
+       local_irq_save(flags);
+       __count_vm_event(PGFREE);
 
        /*
         * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
                migratetype = MIGRATE_MOVABLE;
        }
 
-       __count_vm_event(PGFREE);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        if (!cold)
                list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
        }
 
 out:
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 {
        struct page *page;
 
-       VM_BUG_ON(in_interrupt());
-
        do {
                if (list_empty(list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        struct list_head *list;
        bool cold = ((gfp_flags & __GFP_COLD) != 0);
        struct page *page;
+       unsigned long flags;
 
-       preempt_disable();
+       local_irq_save(flags);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        list = &pcp->lists[migratetype];
        page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
@@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
                zone_statistics(preferred_zone, zone);
        }
-       preempt_enable();
+       local_irq_restore(flags);
        return page;
 }
 
@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
        unsigned long flags;
        struct page *page;
 
-       if (likely(order == 0) && !in_interrupt()) {
+       if (likely(order == 0)) {
                page = rmqueue_pcplist(preferred_zone, zone, order,
                                gfp_flags, migratetype);
                goto out;
index 809025ed97ea0eee97573a32ba2764c63ee2dffd..5a4f5c5a31e88ee558f536d22f61f05a3fd13c45 100644 (file)
@@ -1768,8 +1768,7 @@ void __init init_mm_internals(void)
 {
        int ret __maybe_unused;
 
-       mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
-                                      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+       mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
 
 #ifdef CONFIG_SMP
        ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
index 90f49a194249ff5fecb3d81a05837ea8b75cc586..430b53e7d941def09220a1c97a2e82d288304595 100644 (file)
@@ -123,6 +123,7 @@ static void br_dev_uninit(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
+       br_multicast_dev_del(br);
        br_multicast_uninit_stats(br);
        br_vlan_flush(br);
        free_percpu(br->stats);
index 56a2a72e7738c9869e27e77ca13140aed4bd530a..a8d0ed282a109a1f4075565a0934e742febb387b 100644 (file)
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
 
        br_fdb_delete_by_port(br, NULL, 0, 1);
 
-       br_multicast_dev_del(br);
        cancel_delayed_work_sync(&br->gc_work);
 
        br_sysfs_delbr(br->dev);
index 533a6d6f60920a01e8a9cef8fdb408950b914b05..9b5875388c23c4f3306124697fd291c40fb6e6cd 100644 (file)
@@ -2450,6 +2450,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
 {
        unsigned long flags;
 
+       if (unlikely(!skb))
+               return;
+
        if (likely(atomic_read(&skb->users) == 1)) {
                smp_rmb();
                atomic_set(&skb->users, 0);
index 9424673009c14e0fb288b8e4041dba596b37ee8d..29be2466970cd670daa7a8abdd54929c9af39026 100644 (file)
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
                struct netdev_queue *txq;
+               unsigned int q_index;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
                        kfree_skb(skb);
                        continue;
                }
 
-               txq = skb_get_tx_queue(dev, skb);
-
                local_irq_save(flags);
+               /* check if skb->queue_mapping is still valid */
+               q_index = skb_get_queue_mapping(skb);
+               if (unlikely(q_index >= dev->real_num_tx_queues)) {
+                       q_index = q_index % dev->real_num_tx_queues;
+                       skb_set_queue_mapping(skb, q_index);
+               }
+               txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
                    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
index 9f781092fda9cb8cac22b0743b4bc7666a3bd91a..f1d04592ace02f32efa6e05df89c9a5e0023157f 100644 (file)
@@ -1576,6 +1576,8 @@ done:
                skb_set_tail_pointer(skb, len);
        }
 
+       if (!skb->sk || skb->destructor == sock_edemux)
+               skb_condense(skb);
        return 0;
 }
 EXPORT_SYMBOL(___pskb_trim);
@@ -3082,22 +3084,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        if (sg && csum && (mss != GSO_BY_FRAGS))  {
                if (!(features & NETIF_F_GSO_PARTIAL)) {
                        struct sk_buff *iter;
+                       unsigned int frag_len;
 
                        if (!list_skb ||
                            !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
                                goto normal;
 
-                       /* Split the buffer at the frag_list pointer.
-                        * This is based on the assumption that all
-                        * buffers in the chain excluding the last
-                        * containing the same amount of data.
+                       /* If we get here then all the required
+                        * GSO features except frag_list are supported.
+                        * Try to split the SKB to multiple GSO SKBs
+                        * with no frag_list.
+                        * Currently we can do that only when the buffers don't
+                        * have a linear part and all the buffers except
+                        * the last are of the same length.
                         */
+                       frag_len = list_skb->len;
                        skb_walk_frags(head_skb, iter) {
+                               if (frag_len != iter->len && iter->next)
+                                       goto normal;
                                if (skb_headlen(iter))
                                        goto normal;
 
                                len -= iter->len;
                        }
+
+                       if (len != frag_len)
+                               goto normal;
                }
 
                /* GSO partial only requires that we trim off any excess that
@@ -3807,6 +3819,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        serr->ee.ee_info = tstype;
        serr->opt_stats = opt_stats;
+       serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk->sk_protocol == IPPROTO_TCP &&
index 6b1fc6e4278ef4f1cba58412977918af31d73e62..13a9a3297eae3ac48a77214e9365657202d44f08 100644 (file)
@@ -1343,6 +1343,9 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
+       if (ip_is_fragment(iph))
+               goto out_unlock;
+
        if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
index ebd953bc5607f3b25fffddcb26a5c65e5490cb2b..1d46d05efb0ff067c35750ac43be8e7babd60446 100644 (file)
@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
                return false;
 
        /* Support IP_PKTINFO on tstamp packets if requested, to correlate
-        * timestamp with egress dev. Not possible for packets without dev
+        * timestamp with egress dev. Not possible for packets without iif
         * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
         */
-       if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
-           (!skb->dev))
+       info = PKTINFO_SKB_CB(skb);
+       if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+           !info->ipi_ifindex)
                return false;
 
-       info = PKTINFO_SKB_CB(skb);
        info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
-       info->ipi_ifindex = skb->dev->ifindex;
        return true;
 }
 
@@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
        case MCAST_LEAVE_GROUP:
        case MCAST_LEAVE_SOURCE_GROUP:
        case MCAST_UNBLOCK_SOURCE:
+       case IP_ROUTER_ALERT:
                return true;
        }
        return false;
index c0317c940bcdc303015f500b52198e0862440e17..b036e85e093b3e97cee1b0dbffc8d1dfeb6a2b72 100644 (file)
@@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
        struct net *net = sock_net(sk);
        struct mr_table *mrt;
 
-       rtnl_lock();
+       ASSERT_RTNL();
        ipmr_for_each_table(mrt, net) {
                if (sk == rtnl_dereference(mrt->mroute_sk)) {
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
@@ -1289,7 +1289,6 @@ static void mrtsock_destruct(struct sock *sk)
                        mroute_clean_tables(mrt, false);
                }
        }
-       rtnl_unlock();
 }
 
 /* Socket options and virtual interface manipulation. The whole
@@ -1353,13 +1352,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                if (sk != rcu_access_pointer(mrt->mroute_sk)) {
                        ret = -EACCES;
                } else {
-                       /* We need to unlock here because mrtsock_destruct takes
-                        * care of rtnl itself and we can't change that due to
-                        * the IP_ROUTER_ALERT setsockopt which runs without it.
-                        */
-                       rtnl_unlock();
                        ret = ip_ra_control(sk, 0, NULL);
-                       goto out;
+                       goto out_unlock;
                }
                break;
        case MRT_ADD_VIF:
@@ -1470,7 +1464,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
        }
 out_unlock:
        rtnl_unlock();
-out:
        return ret;
 }
 
index 8119e1f66e036ad2a8372bf24dd943c7d9631d8e..9d943974de2b6d91c56b2ae2dee0019883f8f3cf 100644 (file)
@@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
        /*
         * Raw sockets may have direct kernel references. Kill them.
         */
+       rtnl_lock();
        ip_ra_control(sk, 0, NULL);
+       rtnl_unlock();
 
        sk_common_release(sk);
 }
index acd69cfe2951ed89213403b3e9556da6ddd50ba8..d9724889ff09077aa88c98ec3e170dcfdb91d29b 100644 (file)
@@ -2359,7 +2359,8 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
                }
 
                /* L3 master device is the loopback for that domain */
-               dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
+               dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
+                       net->loopback_dev;
                fl4->flowi4_oif = dev_out->ifindex;
                flags |= RTCF_LOCAL;
                goto make_route;
index 79c4817abc94d08265edb2dfa995e3e479148a16..6e3c512054a60715e8e2d16ffedd12cba6a3d2d9 100644 (file)
@@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
        }
 out:
        rcu_read_unlock();
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       /* Clear out private data before diag gets it and
-        * the ca has not been initialized.
-        */
-       if (ca->get_info)
-               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
        if (ca->flags & TCP_CONG_NEEDS_ECN)
                INET_ECN_xmit(sk);
        else
@@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        tcp_cleanup_congestion_control(sk);
        icsk->icsk_ca_ops = ca;
        icsk->icsk_ca_setsockopt = 1;
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       if (sk->sk_state != TCP_CLOSE) {
-               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+       if (sk->sk_state != TCP_CLOSE)
                tcp_init_congestion_control(sk);
-       }
 }
 
 /* Manage refcounts on socket close. */
index c3c082ed38796f65948e7a1042e37813b71d5abf..a85d863c44196e60fd22e25471cf773e72d2c133 100644 (file)
@@ -1267,7 +1267,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
  * eventually). The difference is that pulled data not copied, but
  * immediately discarded.
  */
-static void __pskb_trim_head(struct sk_buff *skb, int len)
+static int __pskb_trim_head(struct sk_buff *skb, int len)
 {
        struct skb_shared_info *shinfo;
        int i, k, eat;
@@ -1277,7 +1277,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
                __skb_pull(skb, eat);
                len -= eat;
                if (!len)
-                       return;
+                       return 0;
        }
        eat = len;
        k = 0;
@@ -1303,23 +1303,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        skb_reset_tail_pointer(skb);
        skb->data_len -= len;
        skb->len = skb->data_len;
+       return len;
 }
 
 /* Remove acked data from a packet in the transmit queue. */
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 {
+       u32 delta_truesize;
+
        if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
-       __pskb_trim_head(skb, len);
+       delta_truesize = __pskb_trim_head(skb, len);
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_PARTIAL;
 
-       skb->truesize        -= len;
-       sk->sk_wmem_queued   -= len;
-       sk_mem_uncharge(sk, len);
-       sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       if (delta_truesize) {
+               skb->truesize      -= delta_truesize;
+               sk->sk_wmem_queued -= delta_truesize;
+               sk_mem_uncharge(sk, delta_truesize);
+               sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       }
 
        /* Any change of skb->len requires recalculation of tso factor. */
        if (tcp_skb_pcount(skb) > 1)
index b2be1d9757efb8ce8b82dc0a0fe3a475d193ea5b..781250151d40ee4559f7b90d15dccad8ffaeafd0 100644 (file)
@@ -29,6 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
        u16 mac_len = skb->mac_len;
        int udp_offset, outer_hlen;
        __wsum partial;
+       bool need_ipsec;
 
        if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
                goto out;
@@ -62,8 +63,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 
        ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
 
+       need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
        /* Try to offload checksum if possible */
        offload_csum = !!(need_csum &&
+                         !need_ipsec &&
                          (skb->dev->features &
                           (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
                                      (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
index 80ce478c4851318f6eef9320bbdb8548641f920c..0ea96c4d334da2821a8d9c0e5e7d0d513dcb4228 100644 (file)
@@ -3271,14 +3271,24 @@ static void addrconf_gre_config(struct net_device *dev)
 static int fixup_permanent_addr(struct inet6_dev *idev,
                                struct inet6_ifaddr *ifp)
 {
-       if (!ifp->rt) {
-               struct rt6_info *rt;
+       /* rt6i_ref == 0 means the host route was removed from the
+        * FIB, for example, if 'lo' device is taken down. In that
+        * case regenerate the host route.
+        */
+       if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
+               struct rt6_info *rt, *prev;
 
                rt = addrconf_dst_alloc(idev, &ifp->addr, false);
                if (unlikely(IS_ERR(rt)))
                        return PTR_ERR(rt);
 
+               /* ifp->rt can be accessed outside of rtnl */
+               spin_lock(&ifp->lock);
+               prev = ifp->rt;
                ifp->rt = rt;
+               spin_unlock(&ifp->lock);
+
+               ip6_rt_put(prev);
        }
 
        if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
index a9a9553ee63df8eb6e16e00d5da8c29406435350..e82e59f22dfc0e8eabe6b8dd3e12f5c25533142b 100644 (file)
@@ -933,8 +933,6 @@ static int __init inet6_init(void)
        if (err)
                goto igmp_fail;
 
-       ipv6_stub = &ipv6_stub_impl;
-
        err = ipv6_netfilter_init();
        if (err)
                goto netfilter_fail;
@@ -1010,6 +1008,10 @@ static int __init inet6_init(void)
        if (err)
                goto sysctl_fail;
 #endif
+
+       /* ensure that ipv6 stubs are visible only after ipv6 is ready */
+       wmb();
+       ipv6_stub = &ipv6_stub_impl;
 out:
        return err;
 
index eec27f87efaca15133cf1d5225e37e6a2f6a6f8a..e011122ebd43c190aec3812099345ec852444284 100644 (file)
@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
  * At one point, excluding local errors was a quick test to identify icmp/icmp6
  * errors. This is no longer true, but the test remained, so the v6 stack,
  * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
  */
 static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
                                      struct sock_exterr_skb *serr)
@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
        if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
                return false;
 
-       if (!skb->dev)
+       if (!IP6CB(skb)->iif)
                return false;
 
-       if (skb->protocol == htons(ETH_P_IPV6))
-               IP6CB(skb)->iif = skb->dev->ifindex;
-       else
-               PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
        return true;
 }
 
index 275cac628a95066f0a27e93f5015ddeb0172c28c..d32e2110aff286cf6c911048e96fc3abf6e10779 100644 (file)
@@ -388,7 +388,6 @@ looped_back:
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                  ((&hdr->segments_left) -
                                   skb_network_header(skb)));
-               kfree_skb(skb);
                return -1;
        }
 
@@ -910,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
 {
        switch (opt->type) {
        case IPV6_SRCRT_TYPE_0:
+       case IPV6_SRCRT_STRICT:
+       case IPV6_SRCRT_TYPE_2:
                ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
                break;
        case IPV6_SRCRT_TYPE_4:
@@ -1164,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
 
        switch (opt->srcrt->type) {
        case IPV6_SRCRT_TYPE_0:
+       case IPV6_SRCRT_STRICT:
+       case IPV6_SRCRT_TYPE_2:
                fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
                break;
        case IPV6_SRCRT_TYPE_4:
index aacfb4bce1533b3f3b38e1173c18cb1bb6b33099..c45b12b4431cbfcaef1f8452bae871bb176be478 100644 (file)
@@ -122,11 +122,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
        /*
         * RFC4291 2.5.3
+        * The loopback address must not be used as the source address in IPv6
+        * packets that are sent outside of a single node. [..]
         * A packet received on an interface with a destination address
         * of loopback must be dropped.
         */
-       if (!(dev->flags & IFF_LOOPBACK) &&
-           ipv6_addr_loopback(&hdr->daddr))
+       if ((ipv6_addr_loopback(&hdr->saddr) ||
+            ipv6_addr_loopback(&hdr->daddr)) &&
+            !(dev->flags & IFF_LOOPBACK))
                goto err;
 
        /* RFC4291 Errata ID: 3480
index 75fac933c209a0f430279dea10b5dd2426a7ed31..a9692ec0cd6d0ba9fecba143d16191e8df0d9572 100644 (file)
@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = t->net;
        struct net_device_stats *stats = &t->dev->stats;
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        struct ipv6_tel_txoption opt;
        struct dst_entry *dst = NULL, *ndst = NULL;
        struct net_device *tdev;
@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
        /* NBMA tunnel */
        if (ipv6_addr_any(&t->parms.raddr)) {
-               struct in6_addr *addr6;
-               struct neighbour *neigh;
-               int addr_type;
+               if (skb->protocol == htons(ETH_P_IPV6)) {
+                       struct in6_addr *addr6;
+                       struct neighbour *neigh;
+                       int addr_type;
 
-               if (!skb_dst(skb))
-                       goto tx_err_link_failure;
+                       if (!skb_dst(skb))
+                               goto tx_err_link_failure;
 
-               neigh = dst_neigh_lookup(skb_dst(skb),
-                                        &ipv6_hdr(skb)->daddr);
-               if (!neigh)
-                       goto tx_err_link_failure;
+                       neigh = dst_neigh_lookup(skb_dst(skb),
+                                                &ipv6_hdr(skb)->daddr);
+                       if (!neigh)
+                               goto tx_err_link_failure;
 
-               addr6 = (struct in6_addr *)&neigh->primary_key;
-               addr_type = ipv6_addr_type(addr6);
+                       addr6 = (struct in6_addr *)&neigh->primary_key;
+                       addr_type = ipv6_addr_type(addr6);
 
-               if (addr_type == IPV6_ADDR_ANY)
-                       addr6 = &ipv6_hdr(skb)->daddr;
+                       if (addr_type == IPV6_ADDR_ANY)
+                               addr6 = &ipv6_hdr(skb)->daddr;
 
-               memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
-               neigh_release(neigh);
+                       memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+                       neigh_release(neigh);
+               }
        } else if (!(t->parms.flags &
                     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
                /* enable the cache only only if the routing decision does
index 6ba6c900ebcf430cf313a2bef55ff69c114af218..bf34d0950752ba1466fa806fd4f8ce0b802b0087 100644 (file)
@@ -774,7 +774,8 @@ failure:
  *     Delete a VIF entry
  */
 
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+                      struct list_head *head)
 {
        struct mif_device *v;
        struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
                                             dev->ifindex, &in6_dev->cnf);
        }
 
-       if (v->flags & MIFF_REGISTER)
+       if ((v->flags & MIFF_REGISTER) && !notify)
                unregister_netdevice_queue(dev, head);
 
        dev_put(dev);
@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
        struct mr6_table *mrt;
        struct mif_device *v;
        int ct;
-       LIST_HEAD(list);
 
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
                v = &mrt->vif6_table[0];
                for (ct = 0; ct < mrt->maxvif; ct++, v++) {
                        if (v->dev == dev)
-                               mif6_delete(mrt, ct, &list);
+                               mif6_delete(mrt, ct, 1, NULL);
                }
        }
-       unregister_netdevice_many(&list);
 
        return NOTIFY_DONE;
 }
@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
        for (i = 0; i < mrt->maxvif; i++) {
                if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
                        continue;
-               mif6_delete(mrt, i, &list);
+               mif6_delete(mrt, i, 0, &list);
        }
        unregister_netdevice_many(&list);
 
@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
                if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
                        return -EFAULT;
                rtnl_lock();
-               ret = mif6_delete(mrt, mifi, NULL);
+               ret = mif6_delete(mrt, mifi, 0, NULL);
                rtnl_unlock();
                return ret;
 
index 7ebac630d3c603186be2fc0dcbaac7d7e74bfde6..cb1766724a4ca12ec9ccbc452c776261477c99f1 100644 (file)
@@ -1749,7 +1749,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
                idev = in6_dev_get(dev);
                if (!idev)
                        break;
-               if (idev->cnf.ndisc_notify)
+               if (idev->cnf.ndisc_notify ||
+                   net->ipv6.devconf_all->ndisc_notify)
                        ndisc_send_unsol_na(dev);
                in6_dev_put(idev);
                break;
index f174e76e6505d4045e940c9fceef765d2aaa937d..0da6a12b5472e322d679572c7244e5c9bc467741 100644 (file)
@@ -1178,8 +1178,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb)
-                       amount = skb_tail_pointer(skb) -
-                               skb_transport_header(skb);
+                       amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
        }
index 9db1418993f2b8a5b4194895f243441033d4729a..fb174b590fd3b443a7503207d822dd02e7171290 100644 (file)
@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
        int addr_type;
        int err = -EINVAL;
 
+       /* RTF_PCPU is an internal flag; can not be set by userspace */
+       if (cfg->fc_flags & RTF_PCPU)
+               goto out;
+
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
                goto out;
 #ifndef CONFIG_IPV6_SUBTREES
index a855eb325b030a666fe92c56a2d432c77d9dfe7a..5f44ffed25768d83c31b31295474c5ecf623e986 100644 (file)
@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
                struct sr6_tlv *tlv;
                unsigned int tlv_len;
 
+               if (trailing < sizeof(*tlv))
+                       return false;
+
                tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
                tlv_len = sizeof(*tlv) + tlv->len;
 
index c6252ed42c1de65dee149d7d869b62b96616e22a..be8cecc6500214de68cc8872b48b38c840d3304f 100644 (file)
@@ -63,8 +63,13 @@ struct pfkey_sock {
                } u;
                struct sk_buff  *skb;
        } dump;
+       struct mutex dump_lock;
 };
 
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+                              xfrm_address_t *saddr, xfrm_address_t *daddr,
+                              u16 *family);
+
 static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
 {
        return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
 {
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
+       struct pfkey_sock *pfk;
        int err;
 
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        if (sk == NULL)
                goto out;
 
+       pfk = pfkey_sk(sk);
+       mutex_init(&pfk->dump_lock);
+
        sock->ops = &pfkey_ops;
        sock_init_data(sock, sk);
 
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        struct sadb_msg *hdr;
        int rc;
 
+       mutex_lock(&pfk->dump_lock);
+       if (!pfk->dump.dump) {
+               rc = 0;
+               goto out;
+       }
+
        rc = pfk->dump.dump(pfk);
-       if (rc == -ENOBUFS)
-               return 0;
+       if (rc == -ENOBUFS) {
+               rc = 0;
+               goto out;
+       }
 
        if (pfk->dump.skb) {
-               if (!pfkey_can_dump(&pfk->sk))
-                       return 0;
+               if (!pfkey_can_dump(&pfk->sk)) {
+                       rc = 0;
+                       goto out;
+               }
 
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        }
 
        pfkey_terminate_dump(pfk);
+
+out:
+       mutex_unlock(&pfk->dump_lock);
        return rc;
 }
 
@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        struct xfrm_address_filter *filter = NULL;
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        proto = pfkey_satype2proto(hdr->sadb_msg_satype);
-       if (proto == 0)
+       if (proto == 0) {
+               mutex_unlock(&pfk->dump_lock);
                return -EINVAL;
+       }
 
        if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
                struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
 
                filter = kmalloc(sizeof(*filter), GFP_KERNEL);
-               if (filter == NULL)
+               if (filter == NULL) {
+                       mutex_unlock(&pfk->dump_lock);
                        return -ENOMEM;
+               }
 
                memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
                       sizeof(xfrm_address_t));
@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
        xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
        /* addresses present only in tunnel mode */
        if (t->mode == XFRM_MODE_TUNNEL) {
-               u8 *sa = (u8 *) (rq + 1);
-               int family, socklen;
+               int err;
 
-               family = pfkey_sockaddr_extract((struct sockaddr *)sa,
-                                               &t->saddr);
-               if (!family)
-                       return -EINVAL;
-
-               socklen = pfkey_sockaddr_len(family);
-               if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
-                                          &t->id.daddr) != family)
-                       return -EINVAL;
-               t->encap_family = family;
+               err = parse_sockaddr_pair(
+                       (struct sockaddr *)(rq + 1),
+                       rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+                       &t->saddr, &t->id.daddr, &t->encap_family);
+               if (err)
+                       return err;
        } else
                t->encap_family = xp->family;
 
@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
                return -EINVAL;
 
-       while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+       while (len >= sizeof(*rq)) {
+               if (len < rq->sadb_x_ipsecrequest_len ||
+                   rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+                       return -EINVAL;
+
                if ((err = parse_ipsecrequest(xp, rq)) < 0)
                        return err;
                len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2437,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_NET_KEY_MIGRATE
 static int pfkey_sockaddr_pair_size(sa_family_t family)
 {
        return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
 {
        int af, socklen;
 
-       if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+       if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
                return -EINVAL;
 
        af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
        return 0;
 }
 
+#ifdef CONFIG_NET_KEY_MIGRATE
 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
                                    struct xfrm_migrate *m)
 {
@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        struct sadb_x_ipsecrequest *rq2;
        int mode;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq1->sadb_x_ipsecrequest_len)
+       if (len < sizeof(*rq1) ||
+           len < rq1->sadb_x_ipsecrequest_len ||
+           rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
                return -EINVAL;
 
        /* old endoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
-                                 rq1->sadb_x_ipsecrequest_len,
+                                 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
                                  &m->old_saddr, &m->old_daddr,
                                  &m->old_family);
        if (err)
@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
        len -= rq1->sadb_x_ipsecrequest_len;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq2->sadb_x_ipsecrequest_len)
+       if (len <= sizeof(*rq2) ||
+           len < rq2->sadb_x_ipsecrequest_len ||
+           rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
                return -EINVAL;
 
        /* new endpoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
-                                 rq2->sadb_x_ipsecrequest_len,
+                                 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
                                  &m->new_saddr, &m->new_daddr,
                                  &m->new_family);
        if (err)
@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
        pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sp;
        pfk->dump.done = pfkey_dump_sp_done;
        xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
index e48724a6725e3266c1d5559d268339a7d2cd7f10..4d7543d1a62cce8d70e2d6894ffb86920c80c241 100644 (file)
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
        return len;
 }
 
+static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
+                                        struct sk_buff *skb,
+                                        int rtap_vendor_space)
+{
+       struct {
+               struct ieee80211_hdr_3addr hdr;
+               u8 category;
+               u8 action_code;
+       } __packed action;
+
+       if (!sdata)
+               return;
+
+       BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
+
+       if (skb->len < rtap_vendor_space + sizeof(action) +
+                      VHT_MUMIMO_GROUPS_DATA_LEN)
+               return;
+
+       if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
+
+       if (!ieee80211_is_action(action.hdr.frame_control))
+               return;
+
+       if (action.category != WLAN_CATEGORY_VHT)
+               return;
+
+       if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
+               return;
+
+       if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb = skb_copy(skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+       skb_queue_tail(&sdata->skb_queue, skb);
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
 /*
  * ieee80211_add_rx_radiotap_header - add radiotap header
  *
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
        struct net_device *prev_dev = NULL;
        int present_fcs_len = 0;
        unsigned int rtap_vendor_space = 0;
-       struct ieee80211_mgmt *mgmt;
        struct ieee80211_sub_if_data *monitor_sdata =
                rcu_dereference(local->monitor_sdata);
 
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                return remove_monitor_info(local, origskb, rtap_vendor_space);
        }
 
+       ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
+
        /* room for the radiotap header based on driver features */
        rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
        needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                ieee80211_rx_stats(sdata->dev, skb->len);
        }
 
-       mgmt = (void *)skb->data;
-       if (monitor_sdata &&
-           skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
-           ieee80211_is_action(mgmt->frame_control) &&
-           mgmt->u.action.category == WLAN_CATEGORY_VHT &&
-           mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
-           is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
-           ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
-               struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
-
-               if (mu_skb) {
-                       mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
-                       skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
-                       ieee80211_queue_work(&local->hw, &monitor_sdata->work);
-               }
-       }
-
        if (prev_dev) {
                skb->dev = prev_dev;
                netif_receive_skb(skb);
@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
+
+               /*
+                * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+                * the BSSID - we've checked that already but may have accepted
+                * the wildcard (ff:ff:ff:ff:ff:ff).
+                *
+                * It also says:
+                *      The BSSID of the Data frame is determined as follows:
+                *      a) If the STA is contained within an AP or is associated
+                *         with an AP, the BSSID is the address currently in use
+                *         by the STA contained in the AP.
+                *
+                * So we should not accept data frames with an address that's
+                * multicast.
+                *
+                * Accepting it also opens a security problem because stations
+                * could encrypt it with the GTK and inject traffic that way.
+                */
+               if (ieee80211_is_data(hdr->frame_control) && multicast)
+                       return false;
+
                return true;
        case NL80211_IFTYPE_WDS:
                if (bssid || !ieee80211_is_data(hdr->frame_control))
index 8489beff5c25c971067f38833ed4a790a98dd86e..ea81ccf3c7d6a53095b4922329c25b566d5b5940 100644 (file)
@@ -3836,6 +3836,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        case PACKET_HDRLEN:
                if (len > sizeof(int))
                        len = sizeof(int);
+               if (len < sizeof(int))
+                       return -EINVAL;
                if (copy_from_user(&val, optval, len))
                        return -EFAULT;
                switch (val) {
index ae5ac175b2bef96ffa614bc799db5cd90a7bdc08..9da7368b0140f9e4a96794e21d66f487881987ba 100644 (file)
@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        if (plen != len) {
-               skb_pad(skb, plen - len);
+               rc = skb_pad(skb, plen - len);
+               if (rc)
+                       goto out_node;
                skb_put(skb, plen - len);
        }
 
index b70aa57319ea3233395dc7ae349b8b7aab5dfd03..e05b924618a03e6a58655873700ee66e0688b7ad 100644 (file)
@@ -529,20 +529,20 @@ errout:
        return err;
 }
 
-static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
+static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 {
-       a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
-       if (!a->act_cookie)
-               return -ENOMEM;
+       struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return NULL;
 
-       a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
-       if (!a->act_cookie->data) {
-               kfree(a->act_cookie);
-               return -ENOMEM;
+       c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
+       if (!c->data) {
+               kfree(c);
+               return NULL;
        }
-       a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
+       c->len = nla_len(tb[TCA_ACT_COOKIE]);
 
-       return 0;
+       return c;
 }
 
 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 {
        struct tc_action *a;
        struct tc_action_ops *a_o;
+       struct tc_cookie *cookie = NULL;
        char act_name[IFNAMSIZ];
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
                        goto err_out;
                if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
                        goto err_out;
+               if (tb[TCA_ACT_COOKIE]) {
+                       int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+
+                       if (cklen > TC_COOKIE_MAX_SIZE)
+                               goto err_out;
+
+                       cookie = nla_memdup_cookie(tb);
+                       if (!cookie) {
+                               err = -ENOMEM;
+                               goto err_out;
+                       }
+               }
        } else {
                err = -EINVAL;
                if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto err_mod;
 
-       if (tb[TCA_ACT_COOKIE]) {
-               int cklen = nla_len(tb[TCA_ACT_COOKIE]);
-
-               if (cklen > TC_COOKIE_MAX_SIZE) {
-                       err = -EINVAL;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
-               }
-
-               if (nla_memdup_cookie(a, tb) < 0) {
-                       err = -ENOMEM;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
+       if (name == NULL && tb[TCA_ACT_COOKIE]) {
+               if (a->act_cookie) {
+                       kfree(a->act_cookie->data);
+                       kfree(a->act_cookie);
                }
+               a->act_cookie = cookie;
        }
 
        /* module count goes up only when brand new policy is created
@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 err_mod:
        module_put(a_o->owner);
 err_out:
+       if (cookie) {
+               kfree(cookie->data);
+               kfree(cookie);
+       }
        return ERR_PTR(err);
 }
 
index 7130e73bd42c21758e88b24b875da4bd97b3c4d2..bdce99f9407affaae8ef524d3bd65bca5847d62b 100644 (file)
@@ -866,6 +866,14 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
        if (!tsk_peer_msg(tsk, hdr))
                goto exit;
 
+       if (unlikely(msg_errcode(hdr))) {
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+                                     tsk_peer_port(tsk));
+               sk->sk_state_change(sk);
+               goto exit;
+       }
+
        tsk->probe_unacked = false;
 
        if (mtyp == CONN_PROBE) {
@@ -1083,7 +1091,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
                }
        } while (sent < dlen && !rc);
 
-       return rc ? rc : sent;
+       return sent ? sent : rc;
 }
 
 /**
@@ -1259,7 +1267,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
        struct sock *sk = sock->sk;
        DEFINE_WAIT(wait);
        long timeo = *timeop;
-       int err;
+       int err = sock_error(sk);
+
+       if (err)
+               return err;
 
        for (;;) {
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
@@ -1281,6 +1292,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                err = sock_intr_errno(timeo);
                if (signal_pending(current))
                        break;
+
+               err = sock_error(sk);
+               if (err)
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        *timeop = timeo;
@@ -1484,7 +1499,7 @@ restart:
        if (unlikely(flags & MSG_PEEK))
                goto exit;
 
-       tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+       tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg));
        if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
                tipc_sk_send_ack(tsk);
        tsk_advance_rx_queue(sk);
@@ -1551,6 +1566,8 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
        struct tipc_msg *hdr = buf_msg(skb);
+       u32 pport = msg_origport(hdr);
+       u32 pnode = msg_orignode(hdr);
 
        if (unlikely(msg_mcast(hdr)))
                return false;
@@ -1558,18 +1575,28 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
        switch (sk->sk_state) {
        case TIPC_CONNECTING:
                /* Accept only ACK or NACK message */
-               if (unlikely(!msg_connected(hdr)))
-                       return false;
+               if (unlikely(!msg_connected(hdr))) {
+                       if (pport != tsk_peer_port(tsk) ||
+                           pnode != tsk_peer_node(tsk))
+                               return false;
+
+                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+                       sk->sk_err = ECONNREFUSED;
+                       sk->sk_state_change(sk);
+                       return true;
+               }
 
                if (unlikely(msg_errcode(hdr))) {
                        tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                        sk->sk_err = ECONNREFUSED;
+                       sk->sk_state_change(sk);
                        return true;
                }
 
                if (unlikely(!msg_isdata(hdr))) {
                        tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                        sk->sk_err = EINVAL;
+                       sk->sk_state_change(sk);
                        return true;
                }
 
@@ -1581,8 +1608,7 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
                        return true;
 
                /* If empty 'ACK-' message, wake up sleeping connect() */
-               if (waitqueue_active(sk_sleep(sk)))
-                       wake_up_interruptible(sk_sleep(sk));
+               sk->sk_data_ready(sk);
 
                /* 'ACK-' message is neither accepted nor rejected: */
                msg_set_dest_droppable(hdr, 1);
index 46bdb4fbed0bb34a5d6ae40991b3fda6e5dff82c..e23570b647ae721516997130d7abebeaf3f8bb03 100644 (file)
@@ -395,7 +395,7 @@ resume:
                if (xo)
                        xfrm_gro = xo->flags & XFRM_GRO;
 
-               err = x->inner_mode->afinfo->transport_finish(skb, async);
+               err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
                if (xfrm_gro) {
                        skb_dst_drop(skb);
                        gro_cells_receive(&gro_cells, skb);
index 236cbbc0ab9cfff05cd027ffb0dc56aa15e61033..dfc77b9c5e5a8dd2b31440be47fb4480513680e1 100644 (file)
@@ -1006,6 +1006,10 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
                err = -ESRCH;
 out:
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+
+       if (cnt)
+               xfrm_garbage_collect(net);
+
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_flush);
index addf060399e09547307d9c023f36d8dbf869a931..9cb4fe4478a137e7bb3352ae958830657f6b5b7c 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
  * immediately unlinked.
  */
 struct key_type key_type_dead = {
-       .name = "dead",
+       .name = ".dead",
 };
 
 /*
index 52c34532c78562643fce84832a5b536baf84988b..4ad3212adebe8becc152f22d2448f8db4716146b 100644 (file)
@@ -273,7 +273,8 @@ error:
  * Create and join an anonymous session keyring or join a named session
  * keyring, creating it if necessary.  A named session keyring must have Search
  * permission for it to be joined.  Session keyrings without this permit will
- * be skipped over.
+ * be skipped over.  It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
  *
  * If successful, the ID of the joined session keyring will be returned.
  */
@@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
                        ret = PTR_ERR(name);
                        goto error;
                }
+
+               ret = -EPERM;
+               if (name[0] == '.')
+                       goto error_name;
        }
 
        /* join the session */
        ret = join_session_keyring(name);
+error_name:
        kfree(name);
-
 error:
        return ret;
 }
@@ -1253,8 +1258,8 @@ error:
  * Read or set the default keyring in which request_key() will cache keys and
  * return the old setting.
  *
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist.  The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist.  The old setting will be returned if successful.
  */
 long keyctl_set_reqkey_keyring(int reqkey_defl)
 {
@@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
 
        case KEY_REQKEY_DEFL_PROCESS_KEYRING:
                ret = install_process_keyring_to_cred(new);
-               if (ret < 0) {
-                       if (ret != -EEXIST)
-                               goto error;
-                       ret = 0;
-               }
+               if (ret < 0)
+                       goto error;
                goto set;
 
        case KEY_REQKEY_DEFL_DEFAULT:
index b6fdd22205b169b663cdb00aecd5d214c7a376dd..9139b18fc863eb36d62d7777f1553dda63236dfe 100644 (file)
@@ -128,13 +128,18 @@ error:
 }
 
 /*
- * Install a fresh thread keyring directly to new credentials.  This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 int install_thread_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
+       if (new->thread_keyring)
+               return 0;
+
        keyring = keyring_alloc("_tid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
                                KEY_ALLOC_QUOTA_OVERRUN,
@@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 static int install_thread_keyring(void)
 {
@@ -158,8 +165,6 @@ static int install_thread_keyring(void)
        if (!new)
                return -ENOMEM;
 
-       BUG_ON(new->thread_keyring);
-
        ret = install_thread_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
@@ -170,17 +175,17 @@ static int install_thread_keyring(void)
 }
 
 /*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
  *
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 int install_process_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
        if (new->process_keyring)
-               return -EEXIST;
+               return 0;
 
        keyring = keyring_alloc("_pid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
@@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Make sure a process keyring is installed for the current process.  The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
  *
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 static int install_process_keyring(void)
 {
@@ -212,14 +215,18 @@ static int install_process_keyring(void)
        ret = install_process_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
-               return ret != -EEXIST ? ret : 0;
+               return ret;
        }
 
        return commit_creds(new);
 }
 
 /*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any.  If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
@@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 }
 
 /*
- * Install a session keyring, discarding the old one.  If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any.  If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 static int install_session_keyring(struct key *keyring)
 {
index 3b693e924db745c0ec8be74171bb189f17dfc53d..12ba83367b1bc882f6d6fbab9329185d58125090 100644 (file)
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
-       int max_count = 5 * HZ;
+       int warn_count = 5 * HZ;
 
        if (atomic_read(lockp) < 0) {
                pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
                return;
        }
        while (atomic_read(lockp) > 0) {
-               if (max_count == 0) {
-                       pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
-                       break;
-               }
+               if (warn_count-- == 0)
+                       pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
                schedule_timeout_uninterruptible(1);
-               max_count--;
        }
 }
 
index f6769312ebfccbe473ac291e81b386b48b45488f..c3768cd494a5f3fe3bb9059bd8e588c73b806c86 100644 (file)
@@ -45,7 +45,7 @@ struct snd_fw_async_midi_port {
 
        struct snd_rawmidi_substream *substream;
        snd_fw_async_midi_port_fill fill;
-       unsigned int consume_bytes;
+       int consume_bytes;
 };
 
 int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
index 74d7fb6efce6ca8258ece95a83c3460588a3e99e..413ab6313bb66515c284734ca92c87bef014c5fd 100644 (file)
@@ -227,11 +227,11 @@ static void do_registration(struct work_struct *work)
        if (err < 0)
                goto error;
 
-       err = detect_quirks(oxfw);
+       err = snd_oxfw_stream_discover(oxfw);
        if (err < 0)
                goto error;
 
-       err = snd_oxfw_stream_discover(oxfw);
+       err = detect_quirks(oxfw);
        if (err < 0)
                goto error;
 
index 5c7219fb3aa86738a49cff98bf6f16693f7a0192..9e2a3404a836bf919f68b6cbd33c8f569b3c8843 100644 (file)
@@ -621,7 +621,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
+               .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
                .dpcm_capture = 1,
@@ -634,7 +634,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
@@ -661,6 +660,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                                                | SND_SOC_DAIFMT_CBS_CFS,
                .be_hw_params_fixup = byt_rt5640_codec_fixup,
                .ignore_suspend = 1,
+               .nonatomic = true,
                .dpcm_playback = 1,
                .dpcm_capture = 1,
                .init = byt_rt5640_init,
index 3186f015939fb5fce3e8393fd40463362464dbf8..8164bec63bf15b874da49a4406ae9f6124cd776c 100644 (file)
@@ -235,7 +235,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
@@ -249,7 +248,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
index 3e9b1c0bb1ce3cb1864e1825ade56720803df289..058bc99c6c3479e4d7a92c97a885f634b168105d 100644 (file)
@@ -933,6 +933,7 @@ static int soc_tplg_denum_create_texts(struct soc_enum *se,
                }
        }
 
+       se->texts = (const char * const *)se->dobj.control.dtexts;
        return 0;
 
 err:
index d487dd2ef016fee0f25d6a5705feb98a0f26356a..cfcb0ea9d99d8981abc2425d15f3e7148e0dc7a6 100644 (file)
@@ -1299,6 +1299,7 @@ struct uniperif {
        int ver; /* IP version, used by register access macros */
        struct regmap_field *clk_sel;
        struct regmap_field *valid_sel;
+       spinlock_t irq_lock; /* use to prevent race condition with IRQ */
 
        /* capabilities */
        const struct snd_pcm_hardware *hw;
index 60ae31a303ab001e5724518248f59ef12033570b..d7e8dd46d2cc40ba2c937a4ea627f4859fe1f478 100644 (file)
@@ -65,10 +65,13 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
        unsigned int status;
        unsigned int tmp;
 
-       if (player->state == UNIPERIF_STATE_STOPPED) {
-               /* Unexpected IRQ: do nothing */
-               return IRQ_NONE;
-       }
+       spin_lock(&player->irq_lock);
+       if (!player->substream)
+               goto irq_spin_unlock;
+
+       snd_pcm_stream_lock(player->substream);
+       if (player->state == UNIPERIF_STATE_STOPPED)
+               goto stream_unlock;
 
        /* Get interrupt status & clear them immediately */
        status = GET_UNIPERIF_ITS(player);
@@ -88,9 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                        SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
 
                        /* Stop the player */
-                       snd_pcm_stream_lock(player->substream);
                        snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-                       snd_pcm_stream_unlock(player->substream);
                }
 
                ret = IRQ_HANDLED;
@@ -104,9 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
 
                /* Stop the player */
-               snd_pcm_stream_lock(player->substream);
                snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(player->substream);
 
                ret = IRQ_HANDLED;
        }
@@ -116,7 +115,8 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                if (!player->underflow_enabled) {
                        dev_err(player->dev,
                                "unexpected Underflow recovering\n");
-                       return -EPERM;
+                       ret = -EPERM;
+                       goto stream_unlock;
                }
                /* Read the underflow recovery duration */
                tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
@@ -138,13 +138,16 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                dev_err(player->dev, "Underflow recovery failed\n");
 
                /* Stop the player */
-               snd_pcm_stream_lock(player->substream);
                snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(player->substream);
 
                ret = IRQ_HANDLED;
        }
 
+stream_unlock:
+       snd_pcm_stream_unlock(player->substream);
+irq_spin_unlock:
+       spin_unlock(&player->irq_lock);
+
        return ret;
 }
 
@@ -588,6 +591,7 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
        struct snd_aes_iec958 *iec958 =  &player->stream_settings.iec958;
+       unsigned long flags;
 
        mutex_lock(&player->ctrl_lock);
        iec958->status[0] = ucontrol->value.iec958.status[0];
@@ -596,12 +600,14 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        iec958->status[3] = ucontrol->value.iec958.status[3];
        mutex_unlock(&player->ctrl_lock);
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        if (player->substream && player->substream->runtime)
                uni_player_set_channel_status(player,
                                              player->substream->runtime);
        else
                uni_player_set_channel_status(player, NULL);
 
+       spin_unlock_irqrestore(&player->irq_lock, flags);
        return 0;
 }
 
@@ -686,9 +692,12 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        player->substream = substream;
+       spin_unlock_irqrestore(&player->irq_lock, flags);
 
        player->clk_adj = 0;
 
@@ -986,12 +995,15 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
+       unsigned long flags;
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        if (player->state != UNIPERIF_STATE_STOPPED)
                /* Stop the player */
                uni_player_stop(player);
 
        player->substream = NULL;
+       spin_unlock_irqrestore(&player->irq_lock, flags);
 }
 
 static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
@@ -1096,6 +1108,7 @@ int uni_player_init(struct platform_device *pdev,
        }
 
        mutex_init(&player->ctrl_lock);
+       spin_lock_init(&player->irq_lock);
 
        /* Ensure that disabled by default */
        SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
index 93a8df6ed880ea8cc32b62f94ab0b44befbe06ef..ee0055e608529195fda9b76a4226ebf752ead6e3 100644 (file)
@@ -46,10 +46,15 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
        struct uniperif *reader = dev_id;
        unsigned int status;
 
+       spin_lock(&reader->irq_lock);
+       if (!reader->substream)
+               goto irq_spin_unlock;
+
+       snd_pcm_stream_lock(reader->substream);
        if (reader->state == UNIPERIF_STATE_STOPPED) {
                /* Unexpected IRQ: do nothing */
                dev_warn(reader->dev, "unexpected IRQ\n");
-               return IRQ_HANDLED;
+               goto stream_unlock;
        }
 
        /* Get interrupt status & clear them immediately */
@@ -60,13 +65,16 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
        if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
                dev_err(reader->dev, "FIFO error detected\n");
 
-               snd_pcm_stream_lock(reader->substream);
                snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(reader->substream);
 
-               return IRQ_HANDLED;
+               ret = IRQ_HANDLED;
        }
 
+stream_unlock:
+       snd_pcm_stream_unlock(reader->substream);
+irq_spin_unlock:
+       spin_unlock(&reader->irq_lock);
+
        return ret;
 }
 
@@ -347,9 +355,12 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *reader = priv->dai_data.uni;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&reader->irq_lock, flags);
        reader->substream = substream;
+       spin_unlock_irqrestore(&reader->irq_lock, flags);
 
        if (!UNIPERIF_TYPE_IS_TDM(reader))
                return 0;
@@ -375,12 +386,15 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *reader = priv->dai_data.uni;
+       unsigned long flags;
 
+       spin_lock_irqsave(&reader->irq_lock, flags);
        if (reader->state != UNIPERIF_STATE_STOPPED) {
                /* Stop the reader */
                uni_reader_stop(reader);
        }
        reader->substream = NULL;
+       spin_unlock_irqrestore(&reader->irq_lock, flags);
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
@@ -415,6 +429,8 @@ int uni_reader_init(struct platform_device *pdev,
                return -EBUSY;
        }
 
+       spin_lock_init(&reader->irq_lock);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(uni_reader_init);
index a0aa2009b0e0a81e65672eb795039a2172484c55..20f1871874df54a7d567797c753e379a760e492c 100644 (file)
@@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        int key, next_key, fd, i;
-       long values[nr_cpus];
+       long long values[nr_cpus];
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
                            sizeof(values[0]), 2, 0);
@@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
         * allocator more than anything else
         */
        unsigned int nr_keys = 2000;
-       long values[nr_cpus];
+       long long values[nr_cpus];
        int key, fd, i;
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
new file mode 100644 (file)
index 0000000..bab5ff7
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/sh
+# description: ftrace - function pid filters
+
+# Make sure that function pid matching filter works.
+# Also test it on an instance directory
+
+if ! grep -q function available_tracers; then
+    echo "no function tracer configured"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_pid ]; then
+    echo "set_ftrace_pid not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_filter ]; then
+    echo "set_ftrace_filter not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+do_function_fork=1
+
+if [ ! -f options/function-fork ]; then
+    do_function_fork=0
+    echo "no option for function-fork found. Option will not be tested."
+fi
+
+read PID _ < /proc/self/stat
+
+if [ $do_function_fork -eq 1 ]; then
+    # default value of function-fork option
+    orig_value=`grep function-fork trace_options`
+fi
+
+do_reset() {
+    reset_tracer
+    clear_trace
+    enable_tracing
+    echo > set_ftrace_filter
+    echo > set_ftrace_pid
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    echo $orig_value > trace_options
+}
+
+fail() { # msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+yield() {
+    ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
+}
+
+do_test() {
+    disable_tracing
+
+    echo do_execve* > set_ftrace_filter
+    echo *do_fork >> set_ftrace_filter
+
+    echo $PID > set_ftrace_pid
+    echo function > current_tracer
+
+    if [ $do_function_fork -eq 1 ]; then
+       # don't allow children to be traced
+       echo nofunction-fork > trace_options
+    fi
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should be 0
+    if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
+       fail "PID filtering not working?"
+    fi
+
+    disable_tracing
+    clear_trace
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    # allow children to be traced
+    echo function-fork > trace_options
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should NOT be 0
+    if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
+       fail "PID filtering not following fork?"
+    fi
+}
+
+do_test
+
+mkdir instances/foo
+cd instances/foo
+do_test
+cd ../../
+rmdir instances/foo
+
+do_reset
+
+exit 0
index 4124593696862fcb370fee15bcf7e34e11cdf9e0..e62bb354820cacdc653a19db385e3bd497931d0b 100644 (file)
@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
 {
        int fd, val;
 
-       fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
+       fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
        if (fd < 0) {
                perror("socket packet");
                exit(1);
@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
        return fd;
 }
 
+static void sock_fanout_set_cbpf(int fd)
+{
+       struct sock_filter bpf_filter[] = {
+               BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80),           /* ldb [80] */
+               BPF_STMT(BPF_RET+BPF_A, 0),                   /* ret A */
+       };
+       struct sock_fprog bpf_prog;
+
+       bpf_prog.filter = bpf_filter;
+       bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
+
+       if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
+                      sizeof(bpf_prog))) {
+               perror("fanout data cbpf");
+               exit(1);
+       }
+}
+
 static void sock_fanout_set_ebpf(int fd)
 {
        const int len_off = __builtin_offsetof(struct __sk_buff, len);
@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
                exit(1);
        }
        if (type == PACKET_FANOUT_CBPF)
-               sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+               sock_fanout_set_cbpf(fds[0]);
        else if (type == PACKET_FANOUT_EBPF)
                sock_fanout_set_ebpf(fds[0]);
 
index a77da88bf9469d515713e1d1f0f6d318544ac458..7d990d6c861b5863f23fe1d29523707c48ac28d5 100644 (file)
@@ -38,7 +38,7 @@
 # define __maybe_unused                __attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
+static __maybe_unused void pair_udp_setfilter(int fd)
 {
        /* the filter below checks for all of the following conditions that
         * are based on the contents of create_payload()
@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
        };
        struct sock_fprog bpf_prog;
 
-       if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
-               bpf_filter[5].code = 0x16;   /* RET A                         */
-
        bpf_prog.filter = bpf_filter;
        bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-       if (setsockopt(fd, lvl, optnum, &bpf_prog,
+
+       if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
                       sizeof(bpf_prog))) {
                perror("setsockopt SO_ATTACH_FILTER");
                exit(1);
        }
 }
 
-static __maybe_unused void pair_udp_setfilter(int fd)
-{
-       sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
-}
-
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
        struct sockaddr_in saddr, daddr;