]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'i2c/for-current-fixed' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Jun 2017 00:20:25 +0000 (09:20 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Jun 2017 00:20:25 +0000 (09:20 +0900)
Pull i2c fixes from Wolfram Sang:
 "Two driver bugfixes"

* 'i2c/for-current-fixed' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: ismt: fix wrong device address when unmap the data buffer
  i2c: rcar: use correct length when unmapping DMA

299 files changed:
Documentation/networking/scaling.txt
arch/arm64/net/bpf_jit_comp.c
arch/mips/boot/Makefile
arch/mips/include/asm/highmem.h
arch/mips/include/asm/kprobes.h
arch/mips/include/asm/pgtable-32.h
arch/mips/kernel/branch.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/mm/pgtable-32.c
arch/powerpc/include/asm/bug.h
arch/powerpc/include/asm/xive.h
arch/powerpc/kvm/book3s_xive_template.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/sysdev/xive/common.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/kernel/entry.S
arch/x86/include/asm/extable.h
arch/x86/kernel/traps.c
arch/x86/mm/extable.c
arch/x86/mm/init.c
arch/xtensa/include/asm/irq.h
arch/xtensa/kernel/irq.c
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/platforms/iss/simdisk.c
arch/xtensa/platforms/xtfpga/include/platform/hardware.h
arch/xtensa/platforms/xtfpga/setup.c
block/blk-sysfs.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/utresrc.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpuidle/dt_idle_states.c
drivers/devfreq/event/exynos-nocp.c
drivers/devfreq/event/exynos-ppmu.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/bridge/synopsys/Kconfig
drivers/gpu/drm/i915/i915_pvinfo.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mxsfb/mxsfb_crtc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/host1x/dev.c
drivers/hid/hid-core.c
drivers/hsi/clients/ssi_protocol.c
drivers/iio/adc/meson_saradc.c
drivers/iio/adc/mxs-lradc-adc.c
drivers/iio/buffer/industrialio-buffer-dma.c
drivers/iio/buffer/industrialio-buffer-dmaengine.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
drivers/infiniband/core/addr.c
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.h
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/qplib_sp.h
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe.h
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/irqchip/irq-xtensa-mx.c
drivers/irqchip/irq-xtensa-pic.c
drivers/leds/leds-bcm6328.c
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/media/cec/Kconfig
drivers/media/cec/cec-api.c
drivers/media/i2c/tc358743.c
drivers/media/rc/sir_ir.c
drivers/media/usb/rainshadow-cec/rainshadow-cec.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/mmc/host/meson-gx-mmc.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/caif/caif_virtio.c
drivers/net/can/dev.c
drivers/net/can/peak_canfd/peak_canfd.c
drivers/net/can/slcan.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/vcan.c
drivers/net/can/vxcan.c
drivers/net/dummy.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ifb.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/loopback.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netconsole.c
drivers/net/nlmon.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/slip/slip.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/vrf.c
drivers/net/vsockmon.c
drivers/net/vxlan.c
drivers/net/wan/dlci.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/lapbether.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/intersil/hostap/hostap_main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/pci/access.c
drivers/pci/endpoint/functions/Kconfig
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_card.c
drivers/s390/crypto/ap_queue.c
drivers/s390/net/netiucv.c
drivers/staging/iio/cdc/ad7152.c
drivers/staging/rtl8188eu/os_dep/mon.c
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/staging/rtl8723bs/os_dep/os_intfs.c
drivers/staging/rtl8723bs/os_dep/osdep_service.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_phonet.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/video/fbdev/core/fbmon.c
drivers/video/fbdev/smscufx.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/via/viafbdev.c
fs/btrfs/hash.c
fs/ceph/acl.c
fs/ceph/export.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/configfs/item.c
fs/configfs/symlink.c
fs/dcache.c
fs/f2fs/f2fs.h
fs/namespace.c
fs/read_write.c
fs/ufs/balloc.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/ufs_fs.h
fs/ufs/util.c
fs/ufs/util.h
fs/userfaultfd.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_icache.c
include/acpi/actbl.h
include/linux/blkdev.h
include/linux/configfs.h
include/linux/dmi.h
include/linux/netdevice.h
include/media/cec-notifier.h
include/media/cec.h
include/uapi/linux/ethtool.h
include/uapi/linux/openvswitch.h
kernel/irq/manage.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/time/alarmtimer.c
kernel/time/tick-broadcast.c
kernel/time/tick-internal.h
lib/libcrc32c.c
mm/huge_memory.c
mm/memory-failure.c
mm/swap_cgroup.c
mm/vmpressure.c
net/8021q/vlan_dev.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/bluetooth/6lowpan.c
net/bridge/br_device.c
net/caif/caif_socket.c
net/caif/cfpkt_skbuff.c
net/caif/chnl_net.c
net/can/af_can.c
net/core/dev.c
net/core/dst.c
net/core/rtnetlink.c
net/decnet/netfilter/dn_rtmsg.c
net/hsr/hsr_device.c
net/hsr/hsr_forward.c
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/ieee802154/6lowpan/core.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ip_tunnel.c
net/ipv4/ipmr.c
net/ipv6/icmp.c
net/ipv6/ila/ila_xlat.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/proc.c
net/ipv6/route.c
net/ipv6/sit.c
net/irda/irlan/irlan_eth.c
net/l2tp/l2tp_eth.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/wpa.c
net/mac802154/iface.c
net/openvswitch/vport-internal_dev.c
net/phonet/pep-gprs.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sctp/socket.c
net/tipc/msg.c
net/unix/af_unix.c
security/selinux/hooks.c
tools/objtool/builtin-check.c
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/Build
tools/perf/pmu-events/Build
tools/perf/tests/Build
tools/perf/tests/task-exit.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/unwind-libdw.c
tools/testing/selftests/bpf/bpf_endian.h

index 59f4db2a0c85c02df4f6cee3176ceb173333cac1..f55639d71d35b8c466252808cf9b0e58d42b7f30 100644 (file)
@@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware
 or will be computed in the stack. Capable hardware can pass the hash in
 the receive descriptor for the packet; this would usually be the same
 hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
-skb->rx_hash and can be used elsewhere in the stack as a hash of the
+skb->hash and can be used elsewhere in the stack as a hash of the
 packet’s flow.
 
 Each receive hardware queue has an associated list of CPUs to which
index 71f930501ade7cec2d1f230aa638ad3fc9112ee8..c870d6f01ac217e0dc80bf4ee0d3549e6fa1c658 100644 (file)
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
 
 /* Map BPF registers to A64 registers */
 static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
        /* temporary registers for internal BPF JIT */
        [TMP_REG_1] = A64_R(10),
        [TMP_REG_2] = A64_R(11),
+       [TMP_REG_3] = A64_R(12),
        /* tail_call_cnt */
        [TCALL_CNT] = A64_R(26),
        /* temporary register for blinding constants */
@@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        const u8 src = bpf2a64[insn->src_reg];
        const u8 tmp = bpf2a64[TMP_REG_1];
        const u8 tmp2 = bpf2a64[TMP_REG_2];
+       const u8 tmp3 = bpf2a64[TMP_REG_3];
        const s16 off = insn->off;
        const s32 imm = insn->imm;
        const int i = insn - ctx->prog->insnsi;
@@ -689,10 +692,10 @@ emit_cond_jmp:
                emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
                emit(A64_LDXR(isdw, tmp2, tmp), ctx);
                emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
-               emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
+               emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
                jmp_offset = -3;
                check_imm19(jmp_offset);
-               emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
+               emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
                break;
 
        /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
index 2728a9a9c7c5bc4f822ab6788e1f889cb39c0a51..145b5ce8eb7e660dda67f6a53aa472f74a413af0 100644 (file)
@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS     $@
                        -DADDR_BITS=$(ADDR_BITS) \
                        -DADDR_CELLS=$(itb_addr_cells)
 
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
        $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
 
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
        $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
 
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX)  FORCE
        $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
 
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
        $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
 
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
        $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
 
 quiet_cmd_itb-image = ITB     $@
index d34536e7653f6a29fe85b5b1e15b67c19a8c9b3e..279b6d14ffeb7c5af5b54152292b70dd5581d88c 100644 (file)
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
  * easily, subsequent pte tables have to be allocated in one physical
  * chunk of RAM.
  */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define LAST_PKMAP 512
+#else
 #define LAST_PKMAP 1024
+#endif
+
 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
index 291846d9ba8346ca915879ff6b7c1162cde67df8..ad1a99948f2795c0608c32e3dc213c46e171d162 100644 (file)
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
 
 #define flush_insn_slot(p)                                             \
 do {                                                                   \
-       flush_icache_range((unsigned long)p->addr,                      \
+       if (p->addr)                                                    \
+               flush_icache_range((unsigned long)p->addr,              \
                           (unsigned long)p->addr +                     \
                           (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));  \
 } while (0)
index 6f94bed571c4416b917a52fe364172243a3a9fe6..74afe8c76bdd01a9a0351208ab05f54f4d43160a 100644 (file)
 #define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
 extern int temp_tlb_entry;
 
 /*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 
 #define VMALLOC_START    MAP_BASE
 
-#define PKMAP_BASE             (0xfe000000UL)
+#define PKMAP_END      ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE     (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END   (PKMAP_BASE-2*PAGE_SIZE)
index b11facd11c9d05dfae733094359da713c948ad84..f702a459a830060f6aa77fe09eda28c03a3d4f4c 100644 (file)
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        break;
                }
                /* Compact branch: BNEZC || JIALC */
-               if (insn.i_format.rs)
+               if (!insn.i_format.rs) {
+                       /* JIALC: set $31/ra */
                        regs->regs[31] = epc + 4;
+               }
                regs->cp0_epc += 8;
                break;
 #endif
index 30a3b75e88eb6a3310808d8c743739efa3f98fb7..9d9b8fbae2022a426f0bcd9c7e134c4c33b67034 100644 (file)
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
 
 #endif
 
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
-       if (ip >= (unsigned long)_stext &&
-           ip <= (unsigned long)_etext)
-               return 1;
-       return 0;
-}
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #define JAL 0x0c000000         /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
         * If ip is in kernel space, no long call, otherwise, long call is
         * needed.
         */
-       new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+       new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
 #ifdef CONFIG_64BIT
        return ftrace_modify_code(ip, new);
 #else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        unsigned int new;
        unsigned long ip = rec->ip;
 
-       new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+       new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
 
 #ifdef CONFIG_64BIT
        return ftrace_modify_code(ip, new);
 #else
-       return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
+       return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
                                                INSN_NOP : insn_la_mcount[1]);
 #endif
 }
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
         * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
         * kernel, move after the instruction "move ra, at"(offset is 16)
         */
-       ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+       ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
 
        /*
         * search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
         * entries configured through the tracing/set_graph_function interface.
         */
 
-       insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+       insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
        trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 
        /* Only trace if the calling function expects to */
index 313a88b2973f673f0fac0998c36517fd2139504b..f3e301f95aef7edb160e122fa722d8cb6840a9a7 100644 (file)
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
                break;
        case CPU_P5600:
        case CPU_P6600:
-       case CPU_I6400:
                /* 8-bit event numbers */
                raw_id = config & 0x1ff;
                base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
                raw_event.range = P;
 #endif
                break;
+       case CPU_I6400:
+               /* 8-bit event numbers */
+               base_id = config & 0xff;
+               raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+               break;
        case CPU_1004K:
                if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
                        raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
index adc6911ba748915bda5b2575fc03b76891f1fb21..b19a3c506b1e9d203cbacb0da71513d8f21868b1 100644 (file)
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
        /*
         * Fixed mappings:
         */
-       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-       fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+       fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
 
 #ifdef CONFIG_HIGHMEM
        /*
         * Permanent kmaps:
         */
        vaddr = PKMAP_BASE;
-       fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+       fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 
        pgd = swapper_pg_dir + __pgd_offset(vaddr);
        pud = pud_offset(pgd, vaddr);
index f2c562a0a427dda2c774acb1e13c7eda8514e21e..0151af6c2a505a77c512110271e73f0c9c3fcd6e 100644 (file)
                "1:     "PPC_TLNEI"     %4,0\n"                 \
                _EMIT_BUG_ENTRY                                 \
                : : "i" (__FILE__), "i" (__LINE__),             \
-                 "i" (BUGFLAG_TAINT(TAINT_WARN)),              \
+                 "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
                  "i" (sizeof(struct bug_entry)),               \
                  "r" (__ret_warn_on));                         \
        }                                                       \
index c8a822acf962ab95d3ef36c37abe70cc82f1c76e..c23ff4389ca236c43994fe4279b7f151e80fe5e7 100644 (file)
@@ -94,11 +94,13 @@ struct xive_q {
  * store at 0 and some ESBs support doing a trigger via a
  * separate trigger page.
  */
-#define XIVE_ESB_GET           0x800
-#define XIVE_ESB_SET_PQ_00     0xc00
-#define XIVE_ESB_SET_PQ_01     0xd00
-#define XIVE_ESB_SET_PQ_10     0xe00
-#define XIVE_ESB_SET_PQ_11     0xf00
+#define XIVE_ESB_STORE_EOI     0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI      0x000 /* Load */
+#define XIVE_ESB_GET           0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00     0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01     0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10     0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11     0xf00 /* Load */
 
 #define XIVE_ESB_VAL_P         0x2
 #define XIVE_ESB_VAL_Q         0x1
index 023a31133c37ce0714fc4457a5ff73062ce8abbc..4636ca6e7d383b7d3ce26b18df01152b8087a8ec 100644 (file)
@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
 {
        /* If the XIVE supports the new "store EOI facility, use it */
        if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
-               __x_writeq(0, __x_eoi_page(xd));
+               __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
        else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
                opal_int_eoi(hw_irq);
        } else {
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
                 * properly.
                 */
                if (xd->flags & XIVE_IRQ_FLAG_LSI)
-                       __x_readq(__x_eoi_page(xd));
+                       __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
                else {
                        eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
 
index 78fa9395b8c55c3dab2c10e1a245f5a7a04be8a2..e6f444b462079c3c4f4bea059337b92e700488f5 100644 (file)
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
        if (WARN_ON(!gpdev))
                return NULL;
 
-       if (WARN_ON(!gpdev->dev.of_node))
+       /* Not all PCI devices have device-tree nodes */
+       if (!gpdev->dev.of_node)
                return NULL;
 
        /* Get assoicated PCI device */
index 913825086b8df675f68f69a0b993934c3b607104..8f5e3035483bc3fac3b552f793ac358344774661 100644 (file)
@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
 {
        /* If the XIVE supports the new "store EOI facility, use it */
        if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
-               out_be64(xd->eoi_mmio, 0);
+               out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
        else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
                /*
                 * The FW told us to call it. This happens for some
index a5039fa8931442a5c5b35c77fbee22fe86e78131..282072206df7076e732b8fcd121ca49b1693634a 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_XFS_DEBUG=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_BTRFS_DEBUG=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QUOTA_DEBUG=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_RODATA_TEST=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_SELFTEST=y
 CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
 CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
 CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_STRING_HELPERS=y
-CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
 CONFIG_CORDIC=m
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
index 83970b5afb2bc32f8ca86545c6eea31ece2d0001..3c6b78189fbcc805225b9b2e05d5812c5a5f9b67 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRC7=m
index fbc6542aaf5955bdeb839fbf03496d2b39113688..653d72bcc007db3b2e39aa7225b447c18c9d9cb2 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRC7=m
index e23d97c13735b94e43bb965341b93766413a5502..afa46a7406eaeddbbf70bbaf384f49ffa0535874 100644 (file)
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=2
 # CONFIG_HOTPLUG_CPU is not set
 CONFIG_HZ_100=y
+# CONFIG_ARCH_RANDOM is not set
 # CONFIG_COMPACTION is not set
 # CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
 # CONFIG_CHECK_STACK is not set
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ZFCP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_HVC_IUCV is not set
+# CONFIG_HW_RANDOM_S390 is not set
 CONFIG_RAW_DRIVER=y
 # CONFIG_SCLP_ASYNC is not set
 # CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_INOTIFY_USER is not set
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
index 97189dbaf34b2a36dade0738a64eb65bae38aafc..20244a38c88698f887e002bce2423afeb199ace3 100644 (file)
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_MD_MULTIPATH=m
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
index e408d9cc5b96adf40b873d8aabb3235793cd1b16..6315037335ba9365a198859de579c6fdcfb683cf 100644 (file)
@@ -231,12 +231,17 @@ ENTRY(sie64a)
        lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
 .Lsie_done:
 # some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
 # See also .Lcleanup_sie
-.Lrewind_pad:
-       nop     0
+.Lrewind_pad6:
+       nopr    7
+.Lrewind_pad4:
+       nopr    7
+.Lrewind_pad2:
+       nopr    7
        .globl sie_exit
 sie_exit:
        lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
        stg     %r14,__SF_EMPTY+16(%r15)        # set exit reason code
        j       sie_exit
 
-       EX_TABLE(.Lrewind_pad,.Lsie_fault)
+       EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+       EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+       EX_TABLE(.Lrewind_pad2,.Lsie_fault)
        EX_TABLE(sie_exit,.Lsie_fault)
 EXPORT_SYMBOL(sie64a)
 EXPORT_SYMBOL(sie_exit)
index b8ad261d11dce0d5f196efdc71e9f2b5e837d4bf..c66d19e3c23e8c6e31630c5c06f8a90539025695 100644 (file)
@@ -29,6 +29,7 @@ struct pt_regs;
        } while (0)
 
 extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_bug(struct pt_regs *regs, int trapnr);
 extern bool ex_has_fault_handler(unsigned long ip);
 extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
 
index 3995d3a777d49c569e3ba79e3eca5d2206ab1b0a..bf54309b85da279147d6dbae5bff34381fa789a8 100644 (file)
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
        return ud == INSN_UD0 || ud == INSN_UD2;
 }
 
-static int fixup_bug(struct pt_regs *regs, int trapnr)
+int fixup_bug(struct pt_regs *regs, int trapnr)
 {
        if (trapnr != X86_TRAP_UD)
                return 0;
index 35ea061010a1a51f743bda3ca7cfe1be468670e1..0ea8afcb929c031fdac01681ac277dcf74887e4e 100644 (file)
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
        if (fixup_exception(regs, trapnr))
                return;
 
+       if (fixup_bug(regs, trapnr))
+               return;
+
 fail:
        early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
                     (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
index cbc87ea9875109b79b70a17ea6c130d8803ff207..9b3f9fa5b283acb12ba38a42824e1b872a9560b9 100644 (file)
@@ -161,16 +161,16 @@ static int page_size_mask;
 
 static void __init probe_page_size_mask(void)
 {
-#if !defined(CONFIG_KMEMCHECK)
        /*
         * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
         * use small pages.
         * This will simplify cpa(), which otherwise needs to support splitting
         * large pages into small in interrupt context, etc.
         */
-       if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
+       if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
                page_size_mask |= 1 << PG_LEVEL_2M;
-#endif
+       else
+               direct_gbpages = 0;
 
        /* Enable PSE if available */
        if (boot_cpu_has(X86_FEATURE_PSE))
index f71f88ea7646dcc798067e984dddefa8a659037d..19707db966f1393017bcf9528ff64d2951089d20 100644 (file)
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
 # define PLATFORM_NR_IRQS 0
 #endif
 #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
 
 #if VARIANT_NR_IRQS == 0
 static inline void variant_init_irq(void) { }
index a265edd6ac37b423e7227f095201fcd8e1685538..99341028cc77c5d24f10ee1ec6b1cb534137cc84 100644 (file)
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
        int irq = irq_find_mapping(NULL, hwirq);
 
-       if (hwirq >= NR_IRQS) {
-               printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-                               __func__, hwirq);
-       }
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        /* Debugging check for stack overflow: is there less than 1KB free? */
        {
index 394ef08300b6a3188b196155e0863bf2360fef93..33bfa5270d95440cd44c95674073608797e1d131 100644 (file)
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
                      (ccount_freq/10000) % 100,
                      loops_per_jiffy/(500000/HZ),
                      (loops_per_jiffy/(5000/HZ)) % 100);
-
-       seq_printf(f,"flags\t\t: "
+       seq_puts(f, "flags\t\t: "
 #if XCHAL_HAVE_NMI
                     "nmi "
 #endif
index 30d9fc21e0763c4115f7f5c74b78aba46ead1109..162c77e53ca845bf26ba9ab40ce4cd606f5a7060 100644 (file)
@@ -118,7 +118,7 @@ SECTIONS
   SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
   SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
   SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
-  SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48)
+  SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
   SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
 #endif
 
@@ -306,13 +306,13 @@ SECTIONS
                  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
                  .DoubleExceptionVector.literal,
-                 DOUBLEEXC_VECTOR_VADDR - 48,
+                 DOUBLEEXC_VECTOR_VADDR - 20,
                  SIZEOF(.UserExceptionVector.text),
                  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
                  .DoubleExceptionVector.text,
                  DOUBLEEXC_VECTOR_VADDR,
-                 48,
+                 20,
                  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
index 02e94bb3ad3e088e65eef692873cf86447eccf16..c45b90bb93393bf58c20ab279bc8e63981b7dc02 100644 (file)
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
        if (simdisk_count > MAX_SIMDISK_COUNT)
                simdisk_count = MAX_SIMDISK_COUNT;
 
-       sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
-                       GFP_KERNEL);
+       sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
        if (sddev == NULL)
                goto out_unregister;
 
index dbeea2b440a1b50bd3e6a1eea85dcb7626244a91..1fda7e20dfcbff79849e5aae29ac8a6529c43e39 100644 (file)
 
 /* Interrupt configuration. */
 
-#define PLATFORM_NR_IRQS       10
+#define PLATFORM_NR_IRQS       0
 
 /* Default assignment of LX60 devices to external interrupts. */
 
 #ifdef CONFIG_XTENSA_MX
 #define DUART16552_INTNUM      XCHAL_EXTINT3_NUM
 #define OETH_IRQ               XCHAL_EXTINT4_NUM
+#define C67X00_IRQ             XCHAL_EXTINT8_NUM
 #else
 #define DUART16552_INTNUM      XCHAL_EXTINT0_NUM
 #define OETH_IRQ               XCHAL_EXTINT1_NUM
+#define C67X00_IRQ             XCHAL_EXTINT5_NUM
 #endif
 
 /*
@@ -63,5 +65,5 @@
 
 #define C67X00_PADDR           (XCHAL_KIO_PADDR + 0x0D0D0000)
 #define C67X00_SIZE            0x10
-#define C67X00_IRQ             5
+
 #endif /* __XTENSA_XTAVNET_HARDWARE_H */
index 779be723eb2bdd4fa03c6b30abd553e948df9991..42285f35d3135a0a6d58b5b9e2eea7f5fcaa4019 100644 (file)
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
                .flags = IORESOURCE_MEM,
        },
        [2] = { /* IRQ number */
-               .start = OETH_IRQ,
-               .end   = OETH_IRQ,
+               .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+               .end   = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
                .flags = IORESOURCE_IRQ,
        },
 };
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
                .flags = IORESOURCE_MEM,
        },
        [1] = { /* IRQ number */
-               .start = C67X00_IRQ,
-               .end   = C67X00_IRQ,
+               .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+               .end   = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
                .flags = IORESOURCE_IRQ,
        },
 };
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
 static struct plat_serial8250_port serial_platform_data[] = {
        [0] = {
                .mapbase        = DUART16552_PADDR,
-               .irq            = DUART16552_INTNUM,
+               .irq            = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
                .flags          = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
                                  UPF_IOREMAP,
                .iotype         = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
index 283da7fbe03408d9eef71ba3e1a4f863671d761b..27aceab1cc31484423524caae9d6970cef9274ec 100644 (file)
@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 }
 
 /**
- * blk_release_queue: - release a &struct request_queue when it is no longer needed
- * @kobj:    the kobj belonging to the request queue to be released
+ * __blk_release_queue - release a request queue when it is no longer needed
+ * @work: pointer to the release_work member of the request queue to be released
  *
  * Description:
- *     blk_release_queue is the pair to blk_init_queue() or
- *     blk_queue_make_request().  It should be called when a request queue is
- *     being released; typically when a block device is being de-registered.
- *     Currently, its primary task it to free all the &struct request
- *     structures that were allocated to the queue and the queue itself.
+ *     blk_release_queue is the counterpart of blk_init_queue(). It should be
+ *     called when a request queue is being released; typically when a block
+ *     device is being de-registered. Its primary task it to free the queue
+ *     itself.
  *
- * Note:
+ * Notes:
  *     The low level driver must have finished any outstanding requests first
  *     via blk_cleanup_queue().
- **/
-static void blk_release_queue(struct kobject *kobj)
+ *
+ *     Although blk_release_queue() may be called with preemption disabled,
+ *     __blk_release_queue() may sleep.
+ */
+static void __blk_release_queue(struct work_struct *work)
 {
-       struct request_queue *q =
-               container_of(kobj, struct request_queue, kobj);
+       struct request_queue *q = container_of(work, typeof(*q), release_work);
 
        if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
                blk_stat_remove_callback(q, q->poll_cb);
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
        call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
 
+static void blk_release_queue(struct kobject *kobj)
+{
+       struct request_queue *q =
+               container_of(kobj, struct request_queue, kobj);
+
+       INIT_WORK(&q->release_work, __blk_release_queue);
+       schedule_work(&q->release_work);
+}
+
 static const struct sysfs_ops queue_sysfs_ops = {
        .show   = queue_attr_show,
        .store  = queue_attr_store,
index 7abe6650573950674ce41a7511fd9abe2736c451..0d2e98920069cf97ee323c8f9c3df543f8d6407a 100644 (file)
@@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
                }
        }
 
-       table_desc->validation_count++;
-       if (table_desc->validation_count == 0) {
-               table_desc->validation_count--;
+       if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+               table_desc->validation_count++;
+
+               /*
+                * Detect validation_count overflows to ensure that the warning
+                * message will only be printed once.
+                */
+               if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+                       ACPI_WARNING((AE_INFO,
+                                     "Table %p, Validation count overflows\n",
+                                     table_desc));
+               }
        }
 
        *out_table = table_desc->pointer;
@@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
 
        ACPI_FUNCTION_TRACE(acpi_tb_put_table);
 
-       if (table_desc->validation_count == 0) {
-               ACPI_WARNING((AE_INFO,
-                             "Table %p, Validation count is zero before decrement\n",
-                             table_desc));
-               return_VOID;
+       if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+               table_desc->validation_count--;
+
+               /*
+                * Detect validation_count underflows to ensure that the warning
+                * message will only be printed once.
+                */
+               if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+                       ACPI_WARNING((AE_INFO,
+                                     "Table %p, Validation count underflows\n",
+                                     table_desc));
+                       return_VOID;
+               }
        }
-       table_desc->validation_count--;
 
        if (table_desc->validation_count == 0) {
 
index e0587c85bafdf73a299c40d531eefa27deab4761..ff096d9755b925d9f72105f42993ebcc7c0522e1 100644 (file)
@@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                                return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
                        }
 
-                       /*
-                        * The end_tag opcode must be followed by a zero byte.
-                        * Although this byte is technically defined to be a checksum,
-                        * in practice, all ASL compilers set this byte to zero.
-                        */
-                       if (*(aml + 1) != 0) {
-                               return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-                       }
-
                        /* Return the pointer to the end_tag if requested */
 
                        if (!user_function) {
index 992f7c20760f3c69a07b9f4558d8774fc39520ed..88220ff3e1c226277e7ef8895317a5a1314ddee6 100644 (file)
@@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
        int ret;
        ret = sscanf(buf, "%u", &input);
 
-       /* cannot be lower than 11 otherwise freq will not fall */
-       if (ret != 1 || input < 11 || input > 100 ||
+       /* cannot be lower than 1 otherwise freq will not fall */
+       if (ret != 1 || input < 1 || input > 100 ||
                        input >= dbs_data->up_threshold)
                return -EINVAL;
 
index ffca4fc0061d6dad4de3c0d8ce4724461d7a13da..ae8eb03598892d18155d71161b4ea699d614ce97 100644 (file)
@@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
                if (!state_node)
                        break;
 
-               if (!of_device_is_available(state_node))
+               if (!of_device_is_available(state_node)) {
+                       of_node_put(state_node);
                        continue;
+               }
 
                if (!idle_state_valid(state_node, i, cpumask)) {
                        pr_warn("%s idle state not valid, bailing out\n",
index 5c3e7b11e8a66c4853a1aabf9850537eea26767a..f6e7956fc91a212538b0ce937fe34f17aa73f073 100644 (file)
@@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, nocp);
 
-       clk_prepare_enable(nocp->clk);
+       ret = clk_prepare_enable(nocp->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+               return ret;
+       }
 
        pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
                        dev_name(dev));
index 9b7350935b73259828a897a7eea6d4fc9b2e4c4f..d96e3dc71cf8ba74fa92078d97c79d434344c949 100644 (file)
@@ -44,7 +44,7 @@ struct exynos_ppmu {
        { "ppmu-event2-"#name, PPMU_PMNCNT2 },  \
        { "ppmu-event3-"#name, PPMU_PMNCNT3 }
 
-struct __exynos_ppmu_events {
+static struct __exynos_ppmu_events {
        char *name;
        int id;
 } ppmu_events[] = {
@@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
                        dev_name(&pdev->dev), desc[i].name);
        }
 
-       clk_prepare_enable(info->ppmu.clk);
+       ret = clk_prepare_enable(info->ppmu.clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+               return ret;
+       }
 
        return 0;
 }
index dc269cb288c209d60e780eff287af2930fb4c477..951b6c79f166a7d2b4ec14e12096559df9aed684 100644 (file)
@@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
-DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0400, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,       0444, DMI_BOARD_VERSION);
@@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
-       ADD_DMI_ATTR(product_family,      DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 93f7acdaac7ac19c057fc6b98a76c270a4646f24..783041964439acaa60c80eac80df24080579d036 100644 (file)
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
 
        buf = dmi_early_remap(dmi_base, orig_dmi_len);
        if (buf == NULL)
-               return -1;
+               return -ENOMEM;
 
        dmi_decode_table(buf, decode, NULL);
 
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
        const char *d = (const char *) dm;
        const char *p;
 
-       if (dmi_ident[slot])
+       if (dmi_ident[slot] || dm->length <= string)
                return;
 
        p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
 static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
                int index)
 {
-       const u8 *d = (u8 *) dm + index;
+       const u8 *d;
        char *s;
        int is_ff = 1, is_00 = 1, i;
 
-       if (dmi_ident[slot])
+       if (dmi_ident[slot] || dm->length <= index + 16)
                return;
 
+       d = (u8 *) dm + index;
        for (i = 0; i < 16 && (is_ff || is_00); i++) {
                if (d[i] != 0x00)
                        is_00 = 0;
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
 static void __init dmi_save_type(const struct dmi_header *dm, int slot,
                int index)
 {
-       const u8 *d = (u8 *) dm + index;
+       const u8 *d;
        char *s;
 
-       if (dmi_ident[slot])
+       if (dmi_ident[slot] || dm->length <= index)
                return;
 
        s = dmi_alloc(4);
        if (!s)
                return;
 
+       d = (u8 *) dm + index;
        sprintf(s, "%u", *d & 0x7F);
        dmi_ident[slot] = s;
 }
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
 
 static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
 {
-       int i, count = *(u8 *)(dm + 1);
+       int i, count;
        struct dmi_device *dev;
 
+       if (dm->length < 0x05)
+               return;
+
+       count = *(u8 *)(dm + 1);
        for (i = 1; i <= count; i++) {
                const char *devname = dmi_string(dm, i);
 
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
        const char *name;
        const u8 *d = (u8 *)dm;
 
+       if (dm->length < 0x0B)
+               return;
+
        /* Skip disabled device */
        if ((d[0x5] & 0x80) == 0)
                return;
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
        const char *d = (const char *)dm;
        static int nr;
 
-       if (dm->type != DMI_ENTRY_MEM_DEVICE)
+       if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
                return;
        if (nr >= dmi_memdev_nr) {
                pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -649,6 +658,21 @@ void __init dmi_scan_machine(void)
                if (p == NULL)
                        goto error;
 
+               /*
+                * Same logic as above, look for a 64-bit entry point
+                * first, and if not found, fall back to 32-bit entry point.
+                */
+               memcpy_fromio(buf, p, 16);
+               for (q = p + 16; q < p + 0x10000; q += 16) {
+                       memcpy_fromio(buf + 16, q, 16);
+                       if (!dmi_smbios3_present(buf)) {
+                               dmi_available = 1;
+                               dmi_early_unmap(p, 0x10000);
+                               goto out;
+                       }
+                       memcpy(buf, buf + 16, 16);
+               }
+
                /*
                 * Iterate over all possible DMI header addresses q.
                 * Maintain the 32 bytes around q in buf.  On the
@@ -659,7 +683,7 @@ void __init dmi_scan_machine(void)
                memset(buf, 0, 16);
                for (q = p; q < p + 0x10000; q += 16) {
                        memcpy_fromio(buf + 16, q, 16);
-                       if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
+                       if (!dmi_present(buf)) {
                                dmi_available = 1;
                                dmi_early_unmap(p, 0x10000);
                                goto out;
@@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
  *     @decode: Callback function
  *     @private_data: Private data to be passed to the callback function
  *
- *     Returns -1 when the DMI table can't be reached, 0 on success.
+ *     Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ *     or a different negative error code if DMI walking fails.
  */
 int dmi_walk(void (*decode)(const struct dmi_header *, void *),
             void *private_data)
@@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
        u8 *buf;
 
        if (!dmi_available)
-               return -1;
+               return -ENXIO;
 
        buf = dmi_remap(dmi_base, dmi_len);
        if (buf == NULL)
-               return -1;
+               return -ENOMEM;
 
        dmi_decode_table(buf, decode, private_data);
 
index 0cdeb6a2e4a0166d8f33a542e950e7c87abc11fa..5dffa27afa45a2dff164d46b53f6af2e24ca00ab 100644 (file)
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
        u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
 
                /* watermark for high clocks */
                if (adev->pm.dpm_enabled) {
index 773654a19749fa7594250c6683d688e50bb12172..47bbc87f96d2bbf291db431e964b0dc9b0c5424e 100644 (file)
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
        u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
 
                /* watermark for high clocks */
                if (adev->pm.dpm_enabled) {
index 1f3552967ba374c2e5677a742b70e2040f867d65..d8c9a959493ed512104e8b8414ffb1742c9566e3 100644 (file)
@@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
        fixed20_12 a, b, c;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
                priority_a_cnt = 0;
                priority_b_cnt = 0;
 
index 3c558c170e5e685ad58aafa8dc6df301e879f264..db30c6ba563a4a2362e3fde4545ed92d35b1bae9 100644 (file)
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
        u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
 
                /* watermark for high clocks */
                if (adev->pm.dpm_enabled) {
index 40d2827a6d19846c96d6e06ff84c0fdfc63567d1..53e78d092d18e3b9085d0784772639f8fa01f21c 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_DW_HDMI
        tristate
        select DRM_KMS_HELPER
+       select REGMAP_MMIO
 
 config DRM_DW_HDMI_AHB_AUDIO
        tristate "Synopsys Designware AHB Audio interface"
index c0cb2974caacdb0357b8fcde1a585da51ead7b43..2cfe96d3e5d13e0dd1f6cc689a9af78608007617 100644 (file)
 #define VGT_VERSION_MAJOR 1
 #define VGT_VERSION_MINOR 0
 
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
-       INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
 /*
  * notifications from guest to vgpu device model
  */
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
 
 struct vgt_if {
        u64 magic;              /* VGT_MAGIC */
-       uint16_t version_major;
-       uint16_t version_minor;
+       u16 version_major;
+       u16 version_minor;
        u32 vgt_id;             /* ID of vGT instance */
        u32 rsv1[12];           /* pad to offset 0x40 */
        /*
index 4ab8a973b61f155c47ba528d1a907385e22b3b94..2e739018fb4c69581832d49800c5364d02160fb5 100644 (file)
@@ -60,8 +60,8 @@
  */
 void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
-       uint64_t magic;
-       uint32_t version;
+       u64 magic;
+       u16 version_major;
 
        BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
        if (magic != VGT_MAGIC)
                return;
 
-       version = INTEL_VGT_IF_VERSION_ENCODE(
-               __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
-               __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
-       if (version != INTEL_VGT_IF_VERSION) {
+       version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+       if (version_major < VGT_VERSION_MAJOR) {
                DRM_INFO("VGT interface version mismatch!\n");
                return;
        }
index 569717a1272367a91cf682a9cae7640f9ae32777..96b0b01677e26b22f382868f4b8b4c6dd738a4b3 100644 (file)
@@ -4598,7 +4598,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 
 static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
-                 unsigned scaler_user, int *scaler_id, unsigned int rotation,
+                 unsigned int scaler_user, int *scaler_id,
                  int src_w, int src_h, int dst_w, int dst_h)
 {
        struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4607,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                to_intel_crtc(crtc_state->base.crtc);
        int need_scaling;
 
-       need_scaling = drm_rotation_90_or_270(rotation) ?
-               (src_h != dst_w || src_w != dst_h):
-               (src_w != dst_w || src_h != dst_h);
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
+       need_scaling = src_w != dst_w || src_h != dst_h;
 
        /*
         * if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4674,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-               &state->scaler_state.scaler_id, DRM_ROTATE_0,
+               &state->scaler_state.scaler_id,
                state->pipe_src_w, state->pipe_src_h,
                adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4700,7 +4703,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
        ret = skl_update_scaler(crtc_state, force_detach,
                                drm_plane_index(&intel_plane->base),
                                &plane_state->scaler_id,
-                               plane_state->base.rotation,
                                drm_rect_width(&plane_state->base.src) >> 16,
                                drm_rect_height(&plane_state->base.src) >> 16,
                                drm_rect_width(&plane_state->base.dst),
index 2ca481b5aa691872d39263605ef67b9c7335cec6..078fd1bfa5ea8aaa47cd552a8b37766394e76d01 100644 (file)
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
 
        /* n.b., src is 16.16 fixed point, dst is whole integer */
        if (plane->id == PLANE_CURSOR) {
+               /*
+                * Cursors only support 0/180 degree rotation,
+                * hence no need to account for rotation here.
+                */
                src_w = pstate->base.src_w;
                src_h = pstate->base.src_h;
                dst_w = pstate->base.crtc_w;
                dst_h = pstate->base.crtc_h;
        } else {
+               /*
+                * Src coordinates are already rotated by 270 degrees for
+                * the 90/270 degree plane rotation cases (to match the
+                * GTT mapping), hence no need to account for rotation here.
+                */
                src_w = drm_rect_width(&pstate->base.src);
                src_h = drm_rect_height(&pstate->base.src);
                dst_w = drm_rect_width(&pstate->base.dst);
                dst_h = drm_rect_height(&pstate->base.dst);
        }
 
-       if (drm_rotation_90_or_270(pstate->base.rotation))
-               swap(dst_w, dst_h);
-
        downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
        downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
 
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
        if (y && format != DRM_FORMAT_NV12)
                return 0;
 
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        width = drm_rect_width(&intel_pstate->base.src) >> 16;
        height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-       if (drm_rotation_90_or_270(pstate->rotation))
-               swap(width, height);
-
        /* for planar format */
        if (format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
            fb->modifier != I915_FORMAT_MOD_Yf_TILED)
                return 8;
 
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
        src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-       if (drm_rotation_90_or_270(pstate->rotation))
-               swap(src_w, src_h);
-
        /* Halve UV plane width and height for NV12 */
        if (fb->format->format == DRM_FORMAT_NV12 && !y) {
                src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
                width = intel_pstate->base.crtc_w;
                height = intel_pstate->base.crtc_h;
        } else {
+               /*
+                * Src coordinates are already rotated by 270 degrees for
+                * the 90/270 degree plane rotation cases (to match the
+                * GTT mapping), hence no need to account for rotation here.
+                */
                width = drm_rect_width(&intel_pstate->base.src) >> 16;
                height = drm_rect_height(&intel_pstate->base.src) >> 16;
        }
 
-       if (drm_rotation_90_or_270(pstate->rotation))
-               swap(width, height);
-
        cpp = fb->format->cpp[0];
        plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
 
index adb411a078e8237c0fcb5ecde76bdf1287f9df50..f4b53588e071e4cdc10ecbf152fcb0cee3d55e10 100644 (file)
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
 
        if (IS_G200_SE(mdev)) {
-               if (mdev->unique_rev_id >= 0x02) {
+               if  (mdev->unique_rev_id >= 0x04) {
+                       WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+                       WREG8(MGAREG_CRTCEXT_DATA, 0);
+               } else if (mdev->unique_rev_id >= 0x02) {
                        u8 hi_pri_lvl;
                        u32 bpp;
                        u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
                        if (mga_vga_calculate_mode_bandwidth(mode, bpp)
                                > (30100 * 1024))
                                return MODE_BANDWIDTH;
+               } else {
+                       if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+                               > (55000 * 1024))
+                               return MODE_BANDWIDTH;
                }
        } else if (mdev->type == G200_WB) {
                if (mode->hdisplay > 1280)
index 1144e0c9e8942ddb6226a7f409ed07f25a0eb96a..0abe77675b76d36ceaa38ece5a2a14afdf3eb060 100644 (file)
 #include "mxsfb_drv.h"
 #include "mxsfb_regs.h"
 
+#define MXS_SET_ADDR           0x4
+#define MXS_CLR_ADDR           0x8
+#define MODULE_CLKGATE         BIT(30)
+#define MODULE_SFTRST          BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT          1000000
+
 static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
 {
        return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
                clk_disable_unprepare(mxsfb->clk_disp_axi);
 }
 
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+       u32 reg;
+
+       writel(mask, addr + MXS_CLR_ADDR);
+       return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+       int ret;
+
+       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+       if (ret)
+               return ret;
+
+       writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+       ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+       if (ret)
+               return ret;
+
+       return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
 static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
 {
        struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
         */
        mxsfb_enable_axi_clk(mxsfb);
 
+       /* Mandatory eLCDIF reset as per the Reference Manual */
+       err = mxsfb_reset_block(mxsfb->base);
+       if (err)
+               return;
+
        /* Clear the FIFOs */
        writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
 
index 008c145b7f29f60a298419931f1922555de5e35a..ca44233ceaccb87482068d992e4c415f756fa540 100644 (file)
@@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
        u32 tmp, wm_mask;
 
        if (radeon_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
 
                /* watermark for high clocks */
                if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
index 0bf103536404e5dde2d480bf692a496a6865e817..534637203e709909bc11c934c7e1e20b4551e31e 100644 (file)
@@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
        fixed20_12 a, b, c;
 
        if (radeon_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
                priority_a_cnt = 0;
                priority_b_cnt = 0;
                dram_channels = evergreen_get_number_of_dram_channels(rdev);
index 7431eb4a11b7f721fc8e44240c09742b02dc0446..d34d1cf33895766c55a122adad041be62e42c70c 100644 (file)
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
        }
 
        /* TODO: is this still necessary on NI+ ? */
-       if ((cmd == 0 || cmd == 1 || cmd == 0x3) &&
+       if ((cmd == 0 || cmd == 0x3) &&
            (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
                DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
                          start, end);
index 76d1888528e675c700b543fa6e10c77a466054d7..5303f25d5280ed49548db40007444dee7cf79abe 100644 (file)
@@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
        fixed20_12 a, b, c;
 
        if (radeon_crtc->base.enabled && num_heads && mode) {
-               active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-               line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+               active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+                                           (u32)mode->clock);
+               line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+                                         (u32)mode->clock);
+               line_time = min(line_time, (u32)65535);
                priority_a_cnt = 0;
                priority_b_cnt = 0;
 
index 9a1e34e48f64fb0ab684a20811a0be0ff0a09ab6..81f86a67c10d28416a3fbe3d69694b8c7dfc34ee 100644 (file)
@@ -451,18 +451,6 @@ fail:
 
 
 #ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *
-tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
-{
-       struct tegra_drm_context *context;
-
-       mutex_lock(&file->lock);
-       context = idr_find(&file->contexts, id);
-       mutex_unlock(&file->lock);
-
-       return context;
-}
-
 static int tegra_gem_create(struct drm_device *drm, void *data,
                            struct drm_file *file)
 {
@@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
        if (err < 0)
                return err;
 
-       err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
+       err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
        if (err < 0) {
                client->ops->close_channel(context);
                return err;
@@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = tegra_drm_file_get_context(fpriv, args->context);
+       context = idr_find(&fpriv->contexts, args->context);
        if (!context) {
                err = -EINVAL;
                goto unlock;
@@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = tegra_drm_file_get_context(fpriv, args->context);
+       context = idr_find(&fpriv->contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
@@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = tegra_drm_file_get_context(fpriv, args->context);
+       context = idr_find(&fpriv->contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
@@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = tegra_drm_file_get_context(fpriv, args->context);
+       context = idr_find(&fpriv->contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
index f05ebb14fa636bce578c2fd00db88dbc835fdbe9..ac65f52850a6351e3ecb3ee27816b4c3d8df5e67 100644 (file)
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
 
        host->rst = devm_reset_control_get(&pdev->dev, "host1x");
        if (IS_ERR(host->rst)) {
-               err = PTR_ERR(host->clk);
+               err = PTR_ERR(host->rst);
                dev_err(&pdev->dev, "failed to get reset: %d\n", err);
                return err;
        }
index 04cee65531d761c18e53775ffc784c3c3d993daa..6e040692f1d8f2c7032c51ad96540a341f80c35b 100644 (file)
@@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid)
                                 * hid-rmi should take care of them,
                                 * not hid-generic
                                 */
-                               if (IS_ENABLED(CONFIG_HID_RMI))
-                                       hid->group = HID_GROUP_RMI;
+                               hid->group = HID_GROUP_RMI;
                break;
        }
 
+       /* fall back to generic driver in case specific driver doesn't exist */
+       switch (hid->group) {
+       case HID_GROUP_MULTITOUCH_WIN_8:
+               /* fall-through */
+       case HID_GROUP_MULTITOUCH:
+               if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
+                       hid->group = HID_GROUP_GENERIC;
+               break;
+       case HID_GROUP_SENSOR_HUB:
+               if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
+                       hid->group = HID_GROUP_GENERIC;
+               break;
+       case HID_GROUP_RMI:
+               if (!IS_ENABLED(CONFIG_HID_RMI))
+                       hid->group = HID_GROUP_GENERIC;
+               break;
+       case HID_GROUP_WACOM:
+               if (!IS_ENABLED(CONFIG_HID_WACOM))
+                       hid->group = HID_GROUP_GENERIC;
+               break;
+       case HID_GROUP_LOGITECH_DJ_DEVICE:
+               if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
+                       hid->group = HID_GROUP_GENERIC;
+               break;
+       }
        vfree(parser);
        return 0;
 }
@@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
  * used as a driver. See hid_scan_report().
  */
 static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
        { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@@ -1851,62 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
        { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+       { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
-       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
 #endif
-       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+#if IS_ENABLED(CONFIG_HID_ELECOM)
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
        { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
        { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
        { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+        { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+        { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
        { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+       { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@@ -1915,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
        { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
        { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@@ -1930,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+       { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
 #if IS_ENABLED(CONFIG_HID_LENOVO)
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
 #endif
-       { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1957,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -1969,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
-       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -1995,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
        { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+       { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
        { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -2017,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
        { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+       { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
        { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
        { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
        { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+       { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+#endif
 #if IS_ENABLED(CONFIG_HID_ROCCAT)
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2051,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
 #endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+       { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2072,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+       { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2083,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
        { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+       { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -2096,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+       { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2117,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
        { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
        { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+#endif
        { }
 };
 
index 26b05106f0d3152e74bc0377dc9006203fc2c7db..93d28c0ec8bf0ef867bc958592c58f60d3ce2913 100644 (file)
@@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev)
        dev->addr_len           = 1;
        dev->tx_queue_len       = SSIP_TXQUEUE_LEN;
 
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->header_ops         = &phonet_header_ops;
 }
 
index dd4190b50df6a71a43d0fb22175ddb3f1e7d64c7..6066bbfc42fe4126a849495563e1eae58d4244d4 100644 (file)
@@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
 static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
-       int count;
+       unsigned int count, tmp;
 
        for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
                if (!meson_sar_adc_get_fifo_count(indio_dev))
                        break;
 
-               regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0);
+               regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
        }
 }
 
index b0c7d8ee5cb8d859d683b2b0aca40ef5999d2546..6888167ca1e6cb65ab926b36fe8199684a00527d 100644 (file)
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
        adc->dev = dev;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!iores)
+               return -EINVAL;
+
        adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
-       if (IS_ERR(adc->base))
-               return PTR_ERR(adc->base);
+       if (!adc->base)
+               return -ENOMEM;
 
        init_completion(&adc->completion);
        spin_lock_init(&adc->lock);
index dd99d273bae9bf620f2543b39400d10e5ae66fd6..ff03324dee132f49455fd156bacbc9b450146a19 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/poll.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/sizes.h>
index 9fabed47053ddaf2517500845d337429f6c9af22..2b5a320f42c51e1872835b19454f8924f2e004ea 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
 #include <linux/iio/buffer-dmaengine.h>
 
index 96dabbd2f004b3d91c1cde6cca2f6dfdaf091d58..88a7c5d4e4d2850ae9755b5fb79ff595855cac7c 100644 (file)
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
 static const struct inv_mpu6050_reg_map reg_set_6500 = {
        .sample_rate_div        = INV_MPU6050_REG_SAMPLE_RATE_DIV,
        .lpf                    = INV_MPU6050_REG_CONFIG,
+       .accel_lpf              = INV_MPU6500_REG_ACCEL_CONFIG_2,
        .user_ctrl              = INV_MPU6050_REG_USER_CTRL,
        .fifo_en                = INV_MPU6050_REG_FIFO_EN,
        .gyro_config            = INV_MPU6050_REG_GYRO_CONFIG,
@@ -210,6 +211,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
 }
 EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
 
+/**
+ *  inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ *  MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ *  MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+                                   enum inv_mpu6050_filter_e val)
+{
+       int result;
+
+       result = regmap_write(st->map, st->reg->lpf, val);
+       if (result)
+               return result;
+
+       switch (st->chip_type) {
+       case INV_MPU6050:
+       case INV_MPU6000:
+       case INV_MPU9150:
+               /* old chips, nothing to do */
+               result = 0;
+               break;
+       default:
+               /* set accel lpf */
+               result = regmap_write(st->map, st->reg->accel_lpf, val);
+               break;
+       }
+
+       return result;
+}
+
 /**
  *  inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
  *
@@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
        if (result)
                return result;
 
-       d = INV_MPU6050_FILTER_20HZ;
-       result = regmap_write(st->map, st->reg->lpf, d);
+       result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
        if (result)
                return result;
 
@@ -537,6 +568,8 @@ error_write_raw:
  *                  would be alising. This function basically search for the
  *                  correct low pass parameters based on the fifo rate, e.g,
  *                  sampling frequency.
+ *
+ *  lpf is set automatically when setting sampling rate to avoid any aliases.
  */
 static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
 {
@@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
        while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
                i++;
        data = d[i];
-       result = regmap_write(st->map, st->reg->lpf, data);
+       result = inv_mpu6050_set_lpf_regs(st, data);
        if (result)
                return result;
        st->chip_config.lpf = data;
index ef13de7a2c20fd6ef3e450f8d3094483af71b9a9..953a0c09d5685a18d447e4ad07f3f334b32cd8cf 100644 (file)
@@ -28,6 +28,7 @@
  *  struct inv_mpu6050_reg_map - Notable registers.
  *  @sample_rate_div:  Divider applied to gyro output rate.
  *  @lpf:              Configures internal low pass filter.
+ *  @accel_lpf:                Configures accelerometer low pass filter.
  *  @user_ctrl:                Enables/resets the FIFO.
  *  @fifo_en:          Determines which data will appear in FIFO.
  *  @gyro_config:      gyro config register.
@@ -47,6 +48,7 @@
 struct inv_mpu6050_reg_map {
        u8 sample_rate_div;
        u8 lpf;
+       u8 accel_lpf;
        u8 user_ctrl;
        u8 fifo_en;
        u8 gyro_config;
@@ -188,6 +190,7 @@ struct inv_mpu6050_state {
 #define INV_MPU6050_FIFO_THRESHOLD           500
 
 /* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2      0x1D
 #define INV_MPU6500_REG_ACCEL_OFFSET        0x77
 
 /* delay time in milliseconds */
index 02971e239a182debc69aea1306169be981d97086..ece6926fa2e6aa242e38a189a7b3a8f7ed22e140 100644 (file)
@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                return ret;
 
        rt = (struct rt6_info *)dst;
-       if (ipv6_addr_any(&fl6.saddr)) {
-               ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
-                                        &fl6.daddr, 0, &fl6.saddr);
-               if (ret)
-                       goto put;
-
+       if (ipv6_addr_any(&src_in->sin6_addr)) {
                src_in->sin6_family = AF_INET6;
                src_in->sin6_addr = fl6.saddr;
        }
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
 
        *pdst = dst;
        return 0;
-put:
-       dst_release(dst);
-       return ret;
 }
 #else
 static int addr6_resolve(struct sockaddr_in6 *src_in,
index ebf7be8d4139b87dd8232568603ab1aa8c19d045..08772836fded416e41be8eccfe53cf3e8427691b 100644 (file)
 #define BNXT_RE_MAX_SRQC_COUNT         (64 * 1024)
 #define BNXT_RE_MAX_CQ_COUNT           (64 * 1024)
 
+#define BNXT_RE_UD_QP_HW_STALL         0x400000
+
+#define BNXT_RE_RQ_WQE_THRESHOLD       32
+
 struct bnxt_re_work {
        struct work_struct      work;
        unsigned long           event;
index 7ba9e699d7abc65cb82fbd588e9ea12e7f71433b..c7bd68311d0c5317973ecf9c3fb8fce8fe8ffad4 100644 (file)
 #include "ib_verbs.h"
 #include <rdma/bnxt_re-abi.h>
 
+static int __from_ib_access_flags(int iflags)
+{
+       int qflags = 0;
+
+       if (iflags & IB_ACCESS_LOCAL_WRITE)
+               qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+       if (iflags & IB_ACCESS_REMOTE_READ)
+               qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+       if (iflags & IB_ACCESS_REMOTE_WRITE)
+               qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+       if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+               qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+       if (iflags & IB_ACCESS_MW_BIND)
+               qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+       if (iflags & IB_ZERO_BASED)
+               qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+       if (iflags & IB_ACCESS_ON_DEMAND)
+               qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+       return qflags;
+};
+
+static enum ib_access_flags __to_ib_access_flags(int qflags)
+{
+       enum ib_access_flags iflags = 0;
+
+       if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+               iflags |= IB_ACCESS_LOCAL_WRITE;
+       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+               iflags |= IB_ACCESS_REMOTE_WRITE;
+       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+               iflags |= IB_ACCESS_REMOTE_READ;
+       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+               iflags |= IB_ACCESS_REMOTE_ATOMIC;
+       if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+               iflags |= IB_ACCESS_MW_BIND;
+       if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+               iflags |= IB_ZERO_BASED;
+       if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+               iflags |= IB_ACCESS_ON_DEMAND;
+       return iflags;
+};
+
 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
                             struct bnxt_qplib_sge *sg_list, int num)
 {
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
        ib_attr->max_total_mcast_qp_attach = 0;
        ib_attr->max_ah = dev_attr->max_ah;
 
-       ib_attr->max_fmr = dev_attr->max_fmr;
-       ib_attr->max_map_per_fmr = 1;   /* ? */
+       ib_attr->max_fmr = 0;
+       ib_attr->max_map_per_fmr = 0;
 
        ib_attr->max_srq = dev_attr->max_srq;
        ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
        return IB_LINK_LAYER_ETHERNET;
 }
 
+#define        BNXT_RE_FENCE_PBL_SIZE  DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
+
+static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+       struct bnxt_re_fence_data *fence = &pd->fence;
+       struct ib_mr *ib_mr = &fence->mr->ib_mr;
+       struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+
+       memset(wqe, 0, sizeof(*wqe));
+       wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+       wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+       wqe->bind.zero_based = false;
+       wqe->bind.parent_l_key = ib_mr->lkey;
+       wqe->bind.va = (u64)(unsigned long)fence->va;
+       wqe->bind.length = fence->size;
+       wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+       wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+       /* Save the initial rkey in fence structure for now;
+        * wqe->bind.r_key will be set at (re)bind time.
+        */
+       fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+       struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+                                            qplib_qp);
+       struct ib_pd *ib_pd = qp->ib_qp.pd;
+       struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+       struct bnxt_re_fence_data *fence = &pd->fence;
+       struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+       struct bnxt_qplib_swqe wqe;
+       int rc;
+
+       memcpy(&wqe, fence_wqe, sizeof(wqe));
+       wqe.bind.r_key = fence->bind_rkey;
+       fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+       dev_dbg(rdev_to_dev(qp->rdev),
+               "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+               wqe.bind.r_key, qp->qplib_qp.id, pd);
+       rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+       if (rc) {
+               dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+               return rc;
+       }
+       bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+       return rc;
+}
+
+static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+       struct bnxt_re_fence_data *fence = &pd->fence;
+       struct bnxt_re_dev *rdev = pd->rdev;
+       struct device *dev = &rdev->en_dev->pdev->dev;
+       struct bnxt_re_mr *mr = fence->mr;
+
+       if (fence->mw) {
+               bnxt_re_dealloc_mw(fence->mw);
+               fence->mw = NULL;
+       }
+       if (mr) {
+               if (mr->ib_mr.rkey)
+                       bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+                                            true);
+               if (mr->ib_mr.lkey)
+                       bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+               kfree(mr);
+               fence->mr = NULL;
+       }
+       if (fence->dma_addr) {
+               dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
+                                DMA_BIDIRECTIONAL);
+               fence->dma_addr = 0;
+       }
+}
+
+static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+{
+       int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+       struct bnxt_re_fence_data *fence = &pd->fence;
+       struct bnxt_re_dev *rdev = pd->rdev;
+       struct device *dev = &rdev->en_dev->pdev->dev;
+       struct bnxt_re_mr *mr = NULL;
+       dma_addr_t dma_addr = 0;
+       struct ib_mw *mw;
+       u64 pbl_tbl;
+       int rc;
+
+       dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+                                 DMA_BIDIRECTIONAL);
+       rc = dma_mapping_error(dev, dma_addr);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+               rc = -EIO;
+               fence->dma_addr = 0;
+               goto fail;
+       }
+       fence->dma_addr = dma_addr;
+
+       /* Allocate a MR */
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+       fence->mr = mr;
+       mr->rdev = rdev;
+       mr->qplib_mr.pd = &pd->qplib_pd;
+       mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+       mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+       rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+               goto fail;
+       }
+
+       /* Register MR */
+       mr->ib_mr.lkey = mr->qplib_mr.lkey;
+       mr->qplib_mr.va = (u64)(unsigned long)fence->va;
+       mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
+       pbl_tbl = dma_addr;
+       rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
+                              BNXT_RE_FENCE_PBL_SIZE, false);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+               goto fail;
+       }
+       mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+       /* Create a fence MW only for kernel consumers */
+       mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
+       if (!mw) {
+               dev_err(rdev_to_dev(rdev),
+                       "Failed to create fence-MW for PD: %p\n", pd);
+               rc = -EINVAL;
+               goto fail;
+       }
+       fence->mw = mw;
+
+       bnxt_re_create_fence_wqe(pd);
+       return 0;
+
+fail:
+       bnxt_re_destroy_fence_mr(pd);
+       return rc;
+}
+
 /* Protection Domains */
 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
 {
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
        struct bnxt_re_dev *rdev = pd->rdev;
        int rc;
 
+       bnxt_re_destroy_fence_mr(pd);
        if (ib_pd->uobject && pd->dpi.dbr) {
                struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
                struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
                }
        }
 
+       if (!udata)
+               if (bnxt_re_create_fence_mr(pd))
+                       dev_warn(rdev_to_dev(rdev),
+                                "Failed to create Fence-MR\n");
        return &pd->ib_pd;
 dbfail:
        (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
        /* Shadow QP SQ depth should be same as QP1 RQ depth */
        qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
        qp->qplib_qp.sq.max_sge = 2;
+       /* Q full delta can be 1 since it is internal QP */
+       qp->qplib_qp.sq.q_full_delta = 1;
 
        qp->qplib_qp.scq = qp1_qp->scq;
        qp->qplib_qp.rcq = qp1_qp->rcq;
 
        qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
        qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+       /* Q full delta can be 1 since it is internal QP */
+       qp->qplib_qp.rq.q_full_delta = 1;
 
        qp->qplib_qp.mtu = qp1_qp->mtu;
 
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
        qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
                                  IB_SIGNAL_ALL_WR) ? true : false);
 
-       entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
-       qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
-                                       dev_attr->max_qp_wqes + 1);
-
        qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
        if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
                qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
                qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
                                                dev_attr->max_qp_wqes + 1);
 
+               qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+                                               qp_init_attr->cap.max_recv_wr;
+
                qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
                if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
                        qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
        qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
 
        if (qp_init_attr->qp_type == IB_QPT_GSI) {
+               /* Allocate 1 more than what's provided */
+               entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
+               qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+                                               dev_attr->max_qp_wqes + 1);
+               qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+                                               qp_init_attr->cap.max_send_wr;
                qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
                if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
                        qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
                }
 
        } else {
+               /* Allocate 128 + 1 more than what's provided */
+               entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
+                                            BNXT_QPLIB_RESERVED_QP_WRS + 1);
+               qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+                                               dev_attr->max_qp_wqes +
+                                               BNXT_QPLIB_RESERVED_QP_WRS + 1);
+               qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+
+               /*
+                * Reserving one slot for Phantom WQE. Application can
+                * post one extra entry in this case. But allowing this to avoid
+                * unexpected Queue full condition
+                */
+
+               qp->qplib_qp.sq.q_full_delta -= 1;
+
                qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
                qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
                if (udata) {
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 
        qp->ib_qp.qp_num = qp->qplib_qp.id;
        spin_lock_init(&qp->sq_lock);
+       spin_lock_init(&qp->rq_lock);
 
        if (udata) {
                struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
        }
 }
 
-static int __from_ib_access_flags(int iflags)
-{
-       int qflags = 0;
-
-       if (iflags & IB_ACCESS_LOCAL_WRITE)
-               qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
-       if (iflags & IB_ACCESS_REMOTE_READ)
-               qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
-       if (iflags & IB_ACCESS_REMOTE_WRITE)
-               qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
-       if (iflags & IB_ACCESS_REMOTE_ATOMIC)
-               qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
-       if (iflags & IB_ACCESS_MW_BIND)
-               qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
-       if (iflags & IB_ZERO_BASED)
-               qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
-       if (iflags & IB_ACCESS_ON_DEMAND)
-               qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
-       return qflags;
-};
-
-static enum ib_access_flags __to_ib_access_flags(int qflags)
-{
-       enum ib_access_flags iflags = 0;
-
-       if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
-               iflags |= IB_ACCESS_LOCAL_WRITE;
-       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
-               iflags |= IB_ACCESS_REMOTE_WRITE;
-       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
-               iflags |= IB_ACCESS_REMOTE_READ;
-       if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
-               iflags |= IB_ACCESS_REMOTE_ATOMIC;
-       if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
-               iflags |= IB_ACCESS_MW_BIND;
-       if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
-               iflags |= IB_ZERO_BASED;
-       if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
-               iflags |= IB_ACCESS_ON_DEMAND;
-       return iflags;
-};
-
 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
                                    struct bnxt_re_qp *qp1_qp,
                                    int qp_attr_mask)
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
                entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
                qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
                                                dev_attr->max_qp_wqes + 1);
+               qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+                                               qp_attr->cap.max_send_wr;
+               /*
+                * Reserving one slot for Phantom WQE. Some application can
+                * post one extra entry in this case. Allowing this to avoid
+                * unexpected Queue full condition
+                */
+               qp->qplib_qp.sq.q_full_delta -= 1;
                qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
                if (qp->qplib_qp.rq.max_wqe) {
                        entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
                        qp->qplib_qp.rq.max_wqe =
                                min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+                       qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+                                                      qp_attr->cap.max_recv_wr;
                        qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
                } else {
                        /* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
        return payload_sz;
 }
 
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+       if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+            qp->ib_qp.qp_type == IB_QPT_GSI ||
+            qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+            qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+               int qp_attr_mask;
+               struct ib_qp_attr qp_attr;
+
+               qp_attr_mask = IB_QP_STATE;
+               qp_attr.qp_state = IB_QPS_RTS;
+               bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+               qp->qplib_qp.wqe_cnt = 0;
+       }
+}
+
 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
                                       struct bnxt_re_qp *qp,
                                struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@ bad:
                wr = wr->next;
        }
        bnxt_qplib_post_send_db(&qp->qplib_qp);
+       bnxt_ud_qp_hw_stall_workaround(qp);
        spin_unlock_irqrestore(&qp->sq_lock, flags);
        return rc;
 }
@@ -2024,6 +2234,7 @@ bad:
                wr = wr->next;
        }
        bnxt_qplib_post_send_db(&qp->qplib_qp);
+       bnxt_ud_qp_hw_stall_workaround(qp);
        spin_unlock_irqrestore(&qp->sq_lock, flags);
 
        return rc;
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
        struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
        struct bnxt_qplib_swqe wqe;
        int rc = 0, payload_sz = 0;
+       unsigned long flags;
+       u32 count = 0;
 
+       spin_lock_irqsave(&qp->rq_lock, flags);
        while (wr) {
                /* House keeping */
                memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
                        *bad_wr = wr;
                        break;
                }
+
+               /* Ring DB if the RQEs posted reaches a threshold value */
+               if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+                       bnxt_qplib_post_recv_db(&qp->qplib_qp);
+                       count = 0;
+               }
+
                wr = wr->next;
        }
-       bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+       if (count)
+               bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+       spin_unlock_irqrestore(&qp->rq_lock, flags);
+
        return rc;
 }
 
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
                wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
 }
 
+static int send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+       struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+       unsigned long flags;
+       int rc = 0;
+
+       spin_lock_irqsave(&qp->sq_lock, flags);
+
+       rc = bnxt_re_bind_fence_mw(lib_qp);
+       if (!rc) {
+               lib_qp->sq.phantom_wqe_cnt++;
+               dev_dbg(&lib_qp->sq.hwq.pdev->dev,
+                       "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+                       lib_qp->id, lib_qp->sq.hwq.prod,
+                       HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+                       lib_qp->sq.phantom_wqe_cnt);
+       }
+
+       spin_unlock_irqrestore(&qp->sq_lock, flags);
+       return rc;
+}
+
 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
 {
        struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
        struct bnxt_re_qp *qp;
        struct bnxt_qplib_cqe *cqe;
        int i, ncqe, budget;
+       struct bnxt_qplib_q *sq;
+       struct bnxt_qplib_qp *lib_qp;
        u32 tbl_idx;
        struct bnxt_re_sqp_entries *sqp_entry = NULL;
        unsigned long flags;
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
        }
        cqe = &cq->cql[0];
        while (budget) {
-               ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
+               lib_qp = NULL;
+               ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+               if (lib_qp) {
+                       sq = &lib_qp->sq;
+                       if (sq->send_phantom) {
+                               qp = container_of(lib_qp,
+                                                 struct bnxt_re_qp, qplib_qp);
+                               if (send_phantom_wqe(qp) == -ENOMEM)
+                                       dev_err(rdev_to_dev(cq->rdev),
+                                               "Phantom failed! Scheduled to send again\n");
+                               else
+                                       sq->send_phantom = false;
+                       }
+               }
+
                if (!ncqe)
                        break;
 
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
        struct bnxt_re_dev *rdev = mr->rdev;
        int rc;
 
+       rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+               return rc;
+       }
+
        if (mr->npages && mr->pages) {
                rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
                                                        &mr->qplib_frpl);
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
                mr->npages = 0;
                mr->pages = NULL;
        }
-       rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-
        if (!IS_ERR_OR_NULL(mr->ib_umem))
                ib_umem_release(mr->ib_umem);
 
@@ -2914,97 +3182,52 @@ fail:
        return ERR_PTR(rc);
 }
 
-/* Fast Memory Regions */
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
-                                struct ib_fmr_attr *fmr_attr)
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+                              struct ib_udata *udata)
 {
        struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
        struct bnxt_re_dev *rdev = pd->rdev;
-       struct bnxt_re_fmr *fmr;
+       struct bnxt_re_mw *mw;
        int rc;
 
-       if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
-           fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
-               dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
+       mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+       if (!mw)
                return ERR_PTR(-ENOMEM);
-       }
-       fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
-       if (!fmr)
-               return ERR_PTR(-ENOMEM);
-
-       fmr->rdev = rdev;
-       fmr->qplib_fmr.pd = &pd->qplib_pd;
-       fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+       mw->rdev = rdev;
+       mw->qplib_mw.pd = &pd->qplib_pd;
 
-       rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
-       if (rc)
+       mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+                              CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+                              CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+       rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
                goto fail;
+       }
+       mw->ib_mw.rkey = mw->qplib_mw.rkey;
 
-       fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
-       fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
-       fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
+       atomic_inc(&rdev->mw_count);
+       return &mw->ib_mw;
 
-       atomic_inc(&rdev->mr_count);
-       return &fmr->ib_fmr;
 fail:
-       kfree(fmr);
+       kfree(mw);
        return ERR_PTR(rc);
 }
 
-int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
-                        u64 iova)
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
 {
-       struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
-                                            ib_fmr);
-       struct bnxt_re_dev *rdev = fmr->rdev;
+       struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
+       struct bnxt_re_dev *rdev = mw->rdev;
        int rc;
 
-       fmr->qplib_fmr.va = iova;
-       fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
-
-       rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
-                              list_len, true);
-       if (rc)
-               dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
-                       fmr->ib_fmr.lkey);
-       return rc;
-}
-
-int bnxt_re_unmap_fmr(struct list_head *fmr_list)
-{
-       struct bnxt_re_dev *rdev;
-       struct bnxt_re_fmr *fmr;
-       struct ib_fmr *ib_fmr;
-       int rc = 0;
-
-       /* Validate each FMRs inside the fmr_list */
-       list_for_each_entry(ib_fmr, fmr_list, list) {
-               fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
-               rdev = fmr->rdev;
-
-               if (rdev) {
-                       rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
-                                                 &fmr->qplib_fmr, true);
-                       if (rc)
-                               break;
-               }
+       rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+       if (rc) {
+               dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+               return rc;
        }
-       return rc;
-}
-
-int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
-{
-       struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
-                                              ib_fmr);
-       struct bnxt_re_dev *rdev = fmr->rdev;
-       int rc;
 
-       rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
-       if (rc)
-               dev_err(rdev_to_dev(rdev), "Failed to free FMR");
-
-       kfree(fmr);
-       atomic_dec(&rdev->mr_count);
+       kfree(mw);
+       atomic_dec(&rdev->mw_count);
        return rc;
 }
 
index 5c3d71765454f6e8476b86fd329d63b45d7735f6..6c160f6a5398702d4b73a3c319db3660f472c190 100644 (file)
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
        u32                     refcnt;
 };
 
+#define BNXT_RE_FENCE_BYTES    64
+struct bnxt_re_fence_data {
+       u32 size;
+       u8 va[BNXT_RE_FENCE_BYTES];
+       dma_addr_t dma_addr;
+       struct bnxt_re_mr *mr;
+       struct ib_mw *mw;
+       struct bnxt_qplib_swqe bind_wqe;
+       u32 bind_rkey;
+};
+
 struct bnxt_re_pd {
        struct bnxt_re_dev      *rdev;
        struct ib_pd            ib_pd;
        struct bnxt_qplib_pd    qplib_pd;
        struct bnxt_qplib_dpi   dpi;
+       struct bnxt_re_fence_data fence;
 };
 
 struct bnxt_re_ah {
@@ -62,6 +74,7 @@ struct bnxt_re_qp {
        struct bnxt_re_dev      *rdev;
        struct ib_qp            ib_qp;
        spinlock_t              sq_lock;        /* protect sq */
+       spinlock_t              rq_lock;        /* protect rq */
        struct bnxt_qplib_qp    qplib_qp;
        struct ib_umem          *sumem;
        struct ib_umem          *rumem;
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
                               u32 max_num_sg);
 int bnxt_re_dereg_mr(struct ib_mr *mr);
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                                struct ib_fmr_attr *fmr_attr);
-int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
-                        u64 iova);
-int bnxt_re_unmap_fmr(struct list_head *fmr_list);
-int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+                              struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int mr_access_flags,
                                  struct ib_udata *udata);
index 5d355401179b8ae5107e411421c673db86164696..1fce5e73216be1bc74c23d2773690dc824d0e251 100644 (file)
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
        ibdev->dereg_mr                 = bnxt_re_dereg_mr;
        ibdev->alloc_mr                 = bnxt_re_alloc_mr;
        ibdev->map_mr_sg                = bnxt_re_map_mr_sg;
-       ibdev->alloc_fmr                = bnxt_re_alloc_fmr;
-       ibdev->map_phys_fmr             = bnxt_re_map_phys_fmr;
-       ibdev->unmap_fmr                = bnxt_re_unmap_fmr;
-       ibdev->dealloc_fmr              = bnxt_re_dealloc_fmr;
 
        ibdev->reg_user_mr              = bnxt_re_reg_user_mr;
        ibdev->alloc_ucontext           = bnxt_re_alloc_ucontext;
index 43d08b5e908525eae96b567178fdc8ad15dec9c5..f05500bcdcf1e35d21a88aba70b013d0d0d8a2cf 100644 (file)
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_create_qp1 req;
-       struct creq_create_qp1_resp *resp;
+       struct creq_create_qp1_resp resp;
        struct bnxt_qplib_pbl *pbl;
        struct bnxt_qplib_q *sq = &qp->sq;
        struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 
        req.pd_id = cpu_to_le32(qp->pd->id);
 
-       resp = (struct creq_create_qp1_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
-               rc = -EINVAL;
-               goto fail;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
-               rc = -ETIMEDOUT;
-               goto fail;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               rc = -EINVAL;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
                goto fail;
-       }
-       qp->id = le32_to_cpu(resp->xid);
+
+       qp->id = le32_to_cpu(resp.xid);
        qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
        sq->flush_in_progress = false;
        rq->flush_in_progress = false;
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
        struct cmdq_create_qp req;
-       struct creq_create_qp_resp *resp;
+       struct creq_create_qp_resp resp;
        struct bnxt_qplib_pbl *pbl;
        struct sq_psn_search **psn_search_ptr;
        unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        }
        req.pd_id = cpu_to_le32(qp->pd->id);
 
-       resp = (struct creq_create_qp_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
-               rc = -EINVAL;
-               goto fail;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
-               rc = -ETIMEDOUT;
-               goto fail;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               rc = -EINVAL;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
                goto fail;
-       }
-       qp->id = le32_to_cpu(resp->xid);
+
+       qp->id = le32_to_cpu(resp.xid);
        qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
        sq->flush_in_progress = false;
        rq->flush_in_progress = false;
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_modify_qp req;
-       struct creq_modify_qp_resp *resp;
+       struct creq_modify_qp_resp resp;
        u16 cmd_flags = 0, pkey;
        u32 temp32[4];
        u32 bmask;
+       int rc;
 
        RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
 
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 
        req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
 
-       resp = (struct creq_modify_qp_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
+               return rc;
        qp->cur_qp_state = qp->state;
        return 0;
 }
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_query_qp req;
-       struct creq_query_qp_resp *resp;
+       struct creq_query_qp_resp resp;
+       struct bnxt_qplib_rcfw_sbuf *sbuf;
        struct creq_query_qp_resp_sb *sb;
        u16 cmd_flags = 0;
        u32 temp32[4];
-       int i;
+       int i, rc = 0;
 
        RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
 
+       sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+       if (!sbuf)
+               return -ENOMEM;
+       sb = sbuf->sb;
+
        req.qp_cid = cpu_to_le32(qp->id);
        req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
-       resp = (struct creq_query_qp_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    (void **)&sb, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         (void *)sbuf, 0);
+       if (rc)
+               goto bail;
        /* Extract the context from the side buffer */
        qp->state = sb->en_sqd_async_notify_state &
                        CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
        memcpy(qp->smac, sb->src_mac, 6);
        qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
-       return 0;
+bail:
+       bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+       return rc;
 }
 
 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_destroy_qp req;
-       struct creq_destroy_qp_resp *resp;
+       struct creq_destroy_qp_resp resp;
        unsigned long flags;
        u16 cmd_flags = 0;
+       int rc;
 
        RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
 
        req.qp_cid = cpu_to_le32(qp->id);
-       resp = (struct creq_destroy_qp_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
+               return rc;
 
        /* Must walk the associated CQs to nullified the QP ptr */
        spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
                rc = -EINVAL;
                goto done;
        }
-       if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
-           HWQ_CMP(sq->hwq.cons, &sq->hwq)) {
+
+       if (bnxt_qplib_queue_full(sq)) {
+               dev_err(&sq->hwq.pdev->dev,
+                       "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+                       sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
+                       sq->q_full_delta);
                rc = -ENOMEM;
                goto done;
        }
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
        }
 
        sq->hwq.prod++;
+
+       qp->wqe_cnt++;
+
 done:
        return rc;
 }
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
                rc = -EINVAL;
                goto done;
        }
-       if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
-           HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
+       if (bnxt_qplib_queue_full(rq)) {
                dev_err(&rq->hwq.pdev->dev,
                        "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
                rc = -EINVAL;
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_create_cq req;
-       struct creq_create_cq_resp *resp;
+       struct creq_create_cq_resp resp;
        struct bnxt_qplib_pbl *pbl;
        u16 cmd_flags = 0;
        int rc;
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
                        (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
                         CMDQ_CREATE_CQ_CNQ_ID_SFT);
 
-       resp = (struct creq_create_cq_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
-               rc = -ETIMEDOUT;
-               goto fail;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               rc = -EINVAL;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
                goto fail;
-       }
-       cq->id = le32_to_cpu(resp->xid);
+
+       cq->id = le32_to_cpu(resp.xid);
        cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
        cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
        init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_destroy_cq req;
-       struct creq_destroy_cq_resp *resp;
+       struct creq_destroy_cq_resp resp;
        u16 cmd_flags = 0;
+       int rc;
 
        RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
 
        req.cq_cid = cpu_to_le32(cq->id);
-       resp = (struct creq_destroy_cq_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
+               return rc;
        bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
        return 0;
 }
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
        return rc;
 }
 
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+                    u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
+{
+       struct bnxt_qplib_q *sq = &qp->sq;
+       struct bnxt_qplib_swq *swq;
+       u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+       struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+       struct cq_req *peek_req_hwcqe;
+       struct bnxt_qplib_qp *peek_qp;
+       struct bnxt_qplib_q *peek_sq;
+       int i, rc = 0;
+
+       /* Normal mode */
+       /* Check for the psn_search marking before completing */
+       swq = &sq->swq[sw_sq_cons];
+       if (swq->psn_search &&
+           le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+               /* Unmark */
+               swq->psn_search->flags_next_psn = cpu_to_le32
+                       (le32_to_cpu(swq->psn_search->flags_next_psn)
+                                    & ~0x80000000);
+               dev_dbg(&cq->hwq.pdev->dev,
+                       "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+                       cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+               sq->condition = true;
+               sq->send_phantom = true;
+
+               /* TODO: Only ARM if the previous SQE is ARMALL */
+               bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
+
+               rc = -EAGAIN;
+               goto out;
+       }
+       if (sq->condition) {
+               /* Peek at the completions */
+               peek_raw_cq_cons = cq->hwq.cons;
+               peek_sw_cq_cons = cq_cons;
+               i = cq->hwq.max_elements;
+               while (i--) {
+                       peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
+                       peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+                       peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
+                                                    [CQE_IDX(peek_sw_cq_cons)];
+                       /* If the next hwcqe is VALID */
+                       if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
+                                         cq->hwq.max_elements)) {
+                               /* If the next hwcqe is a REQ */
+                               if ((peek_hwcqe->cqe_type_toggle &
+                                   CQ_BASE_CQE_TYPE_MASK) ==
+                                   CQ_BASE_CQE_TYPE_REQ) {
+                                       peek_req_hwcqe = (struct cq_req *)
+                                                        peek_hwcqe;
+                                       peek_qp = (struct bnxt_qplib_qp *)
+                                               ((unsigned long)
+                                                le64_to_cpu
+                                                (peek_req_hwcqe->qp_handle));
+                                       peek_sq = &peek_qp->sq;
+                                       peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
+                                               peek_req_hwcqe->sq_cons_idx) - 1
+                                               , &sq->hwq);
+                                       /* If the hwcqe's sq's wr_id matches */
+                                       if (peek_sq == sq &&
+                                           sq->swq[peek_sq_cons_idx].wr_id ==
+                                           BNXT_QPLIB_FENCE_WRID) {
+                                               /*
+                                                *  Unbreak only if the phantom
+                                                *  comes back
+                                                */
+                                               dev_dbg(&cq->hwq.pdev->dev,
+                                                       "FP:Got Phantom CQE");
+                                               sq->condition = false;
+                                               sq->single = true;
+                                               rc = 0;
+                                               goto out;
+                                       }
+                               }
+                               /* Valid but not the phantom, so keep looping */
+                       } else {
+                               /* Not valid yet, just exit and wait */
+                               rc = -EINVAL;
+                               goto out;
+                       }
+                       peek_sw_cq_cons++;
+                       peek_raw_cq_cons++;
+               }
+               dev_err(&cq->hwq.pdev->dev,
+                       "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+                       cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+               rc = -EINVAL;
+       }
+out:
+       return rc;
+}
+
 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
                                     struct cq_req *hwcqe,
-                                    struct bnxt_qplib_cqe **pcqe, int *budget)
+                                    struct bnxt_qplib_cqe **pcqe, int *budget,
+                                    u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
 {
        struct bnxt_qplib_qp *qp;
        struct bnxt_qplib_q *sq;
        struct bnxt_qplib_cqe *cqe;
-       u32 sw_cons, cqe_cons;
+       u32 sw_sq_cons, cqe_sq_cons;
+       struct bnxt_qplib_swq *swq;
        int rc = 0;
 
        qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
        }
        sq = &qp->sq;
 
-       cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
-       if (cqe_cons > sq->hwq.max_elements) {
+       cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
+       if (cqe_sq_cons > sq->hwq.max_elements) {
                dev_err(&cq->hwq.pdev->dev,
                        "QPLIB: FP: CQ Process req reported ");
                dev_err(&cq->hwq.pdev->dev,
                        "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
-                       cqe_cons, sq->hwq.max_elements);
+                       cqe_sq_cons, sq->hwq.max_elements);
                return -EINVAL;
        }
        /* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
 
        /* Require to walk the sq's swq to fabricate CQEs for all previously
         * signaled SWQEs due to CQE aggregation from the current sq cons
-        * to the cqe_cons
+        * to the cqe_sq_cons
         */
        cqe = *pcqe;
        while (*budget) {
-               sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
-               if (sw_cons == cqe_cons)
+               sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
+               if (sw_sq_cons == cqe_sq_cons)
+                       /* Done */
                        break;
+
+               swq = &sq->swq[sw_sq_cons];
                memset(cqe, 0, sizeof(*cqe));
                cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
                cqe->qp_handle = (u64)(unsigned long)qp;
                cqe->src_qp = qp->id;
-               cqe->wr_id = sq->swq[sw_cons].wr_id;
-               cqe->type = sq->swq[sw_cons].type;
+               cqe->wr_id = swq->wr_id;
+               if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+                       goto skip;
+               cqe->type = swq->type;
 
                /* For the last CQE, check for status.  For errors, regardless
                 * of the request being signaled or not, it must complete with
                 * the hwcqe error status
                 */
-               if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons &&
+               if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
                    hwcqe->status != CQ_REQ_STATUS_OK) {
                        cqe->status = hwcqe->status;
                        dev_err(&cq->hwq.pdev->dev,
                                "QPLIB: FP: CQ Processed Req ");
                        dev_err(&cq->hwq.pdev->dev,
                                "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
-                               sw_cons, cqe->wr_id, cqe->status);
+                               sw_sq_cons, cqe->wr_id, cqe->status);
                        cqe++;
                        (*budget)--;
                        sq->flush_in_progress = true;
                        /* Must block new posting of SQ and RQ */
                        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+                       sq->condition = false;
+                       sq->single = false;
                } else {
-                       if (sq->swq[sw_cons].flags &
-                           SQ_SEND_FLAGS_SIGNAL_COMP) {
+                       if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+                               /* Before we complete, do WA 9060 */
+                               if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+                                             cqe_sq_cons)) {
+                                       *lib_qp = qp;
+                                       goto out;
+                               }
                                cqe->status = CQ_REQ_STATUS_OK;
                                cqe++;
                                (*budget)--;
                        }
                }
+skip:
                sq->hwq.cons++;
+               if (sq->single)
+                       break;
        }
+out:
        *pcqe = cqe;
-       if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) {
+       if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
                /* Out of budget */
                rc = -EAGAIN;
                goto done;
        }
+       /*
+        * Back to normal completion mode only after it has completed all of
+        * the WC for this CQE
+        */
+       sq->single = false;
        if (!sq->flush_in_progress)
                goto done;
 flush:
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
 }
 
 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
-                      int num_cqes)
+                      int num_cqes, struct bnxt_qplib_qp **lib_qp)
 {
        struct cq_base *hw_cqe, **hw_cqe_ptr;
        unsigned long flags;
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
                case CQ_BASE_CQE_TYPE_REQ:
                        rc = bnxt_qplib_cq_process_req(cq,
                                                       (struct cq_req *)hw_cqe,
-                                                      &cqe, &budget);
+                                                      &cqe, &budget,
+                                                      sw_cons, lib_qp);
                        break;
                case CQ_BASE_CQE_TYPE_RES_RC:
                        rc = bnxt_qplib_cq_process_res_rc(cq,
index f0150f8da1e3929b309d14006b68469a965d8945..36b7b7db0e3f9782104ee8b049e674edb830226b 100644 (file)
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
 
 struct bnxt_qplib_swqe {
        /* General */
+#define        BNXT_QPLIB_FENCE_WRID   0x46454E43      /* "FENC" */
        u64                             wr_id;
        u8                              reqs_type;
        u8                              type;
@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
        struct scatterlist              *sglist;
        u32                             nmap;
        u32                             max_wqe;
+       u16                             q_full_delta;
        u16                             max_sge;
        u32                             psn;
        bool                            flush_in_progress;
+       bool                            condition;
+       bool                            single;
+       bool                            send_phantom;
+       u32                             phantom_wqe_cnt;
+       u32                             phantom_cqe_cnt;
+       u32                             next_cq_cons;
 };
 
 struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
        u8                              timeout;
        u8                              retry_cnt;
        u8                              rnr_retry;
+       u64                             wqe_cnt;
        u32                             min_rnr_timer;
        u32                             max_rd_atomic;
        u32                             max_dest_rd_atomic;
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
        (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) ==         \
           !((raw_cons) & (cp_bit)))
 
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
+{
+       return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
+                      &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
+                                                &qplib_q->hwq);
+}
+
 struct bnxt_qplib_cqe {
        u8                              status;
        u8                              type;
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
-                      int num);
+                      int num, struct bnxt_qplib_qp **qp);
 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
index 23fb7260662b134b8d030f6c0ef7c6648976c434..16e42754dbeccfef863509b0bd8226683582df2a 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/prefetch.h>
+#include <linux/delay.h>
+
 #include "roce_hsi.h"
 #include "qplib_res.h"
 #include "qplib_rcfw.h"
 static void bnxt_qplib_service_creq(unsigned long data);
 
 /* Hardware communication channel */
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 {
        u16 cbit;
        int rc;
 
-       cookie &= RCFW_MAX_COOKIE_VALUE;
        cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
-       if (!test_bit(cbit, rcfw->cmdq_bitmap))
-               dev_warn(&rcfw->pdev->dev,
-                        "QPLIB: CMD bit %d for cookie 0x%x is not set?",
-                        cbit, cookie);
-
        rc = wait_event_timeout(rcfw->waitq,
                                !test_bit(cbit, rcfw->cmdq_bitmap),
                                msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
-       if (!rc) {
-               dev_warn(&rcfw->pdev->dev,
-                        "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
-                        RCFW_CMD_WAIT_TIME_MS, cookie);
-       }
-
-       return rc;
+       return rc ? 0 : -ETIMEDOUT;
 };
 
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 {
-       u32 count = -1;
+       u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
        u16 cbit;
 
-       cookie &= RCFW_MAX_COOKIE_VALUE;
        cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
        if (!test_bit(cbit, rcfw->cmdq_bitmap))
                goto done;
        do {
+               mdelay(1); /* 1m sec */
                bnxt_qplib_service_creq((unsigned long)rcfw);
        } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
 done:
-       return count;
+       return count ? 0 : -ETIMEDOUT;
 };
 
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
-                                  struct cmdq_base *req, void **crsbe,
-                                  u8 is_block)
+static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+                         struct creq_base *resp, void *sb, u8 is_block)
 {
-       struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
        struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
        struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
-       struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
-       struct bnxt_qplib_crsqe *crsqe = NULL;
-       struct bnxt_qplib_crsbe **crsb_ptr;
+       struct bnxt_qplib_crsq *crsqe;
        u32 sw_prod, cmdq_prod;
-       u8 retry_cnt = 0xFF;
-       dma_addr_t dma_addr;
        unsigned long flags;
        u32 size, opcode;
        u16 cookie, cbit;
        int pg, idx;
        u8 *preq;
 
-retry:
        opcode = req->opcode;
        if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
            (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@ retry:
                dev_err(&rcfw->pdev->dev,
                        "QPLIB: RCFW not initialized, reject opcode 0x%x",
                        opcode);
-               return NULL;
+               return -EINVAL;
        }
 
        if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
            opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
                dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
-               return NULL;
+               return -EINVAL;
        }
 
        /* Cmdq are in 16-byte units, each request can consume 1 or more
         * cmdqe
         */
        spin_lock_irqsave(&cmdq->lock, flags);
-       if (req->cmd_size > cmdq->max_elements -
-           ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
-            (cmdq->max_elements - 1))) {
+       if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
                dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
                spin_unlock_irqrestore(&cmdq->lock, flags);
-
-               if (!retry_cnt--)
-                       return NULL;
-               goto retry;
+               return -EAGAIN;
        }
 
-       retry_cnt = 0xFF;
 
-       cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
+       cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
        cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
        if (is_block)
                cookie |= RCFW_CMD_IS_BLOCKING;
+
+       set_bit(cbit, rcfw->cmdq_bitmap);
        req->cookie = cpu_to_le16(cookie);
-       if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: RCFW MAX outstanding cmd reached!");
-               atomic_dec(&rcfw->seq_num);
+       crsqe = &rcfw->crsqe_tbl[cbit];
+       if (crsqe->resp) {
                spin_unlock_irqrestore(&cmdq->lock, flags);
-
-               if (!retry_cnt--)
-                       return NULL;
-               goto retry;
+               return -EBUSY;
        }
-       /* Reserve a resp buffer slot if requested */
-       if (req->resp_size && crsbe) {
-               spin_lock(&crsb->lock);
-               sw_prod = HWQ_CMP(crsb->prod, crsb);
-               crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
-               *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
-                                         [get_crsb_idx(sw_prod)];
-               bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
-               req->resp_addr = cpu_to_le64(dma_addr);
-               crsb->prod++;
-               spin_unlock(&crsb->lock);
-
-               req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
-                                 BNXT_QPLIB_CMDQE_UNITS - 1) /
-                                BNXT_QPLIB_CMDQE_UNITS;
+       memset(resp, 0, sizeof(*resp));
+       crsqe->resp = (struct creq_qp_event *)resp;
+       crsqe->resp->cookie = req->cookie;
+       crsqe->req_size = req->cmd_size;
+       if (req->resp_size && sb) {
+               struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
+
+               req->resp_addr = cpu_to_le64(sbuf->dma_addr);
+               req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+                                 BNXT_QPLIB_CMDQE_UNITS;
        }
+
        cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
        preq = (u8 *)req;
        size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@ retry:
                preq += min_t(u32, size, sizeof(*cmdqe));
                size -= min_t(u32, size, sizeof(*cmdqe));
                cmdq->prod++;
+               rcfw->seq_num++;
        } while (size > 0);
 
+       rcfw->seq_num++;
+
        cmdq_prod = cmdq->prod;
        if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
-               /* The very first doorbell write is required to set this flag
-                * which prompts the FW to reset its internal pointers
+               /* The very first doorbell write
+                * is required to set this flag
+                * which prompts the FW to reset
+                * its internal pointers
                 */
                cmdq_prod |= FIRMWARE_FIRST_FLAG;
                rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
        }
-       sw_prod = HWQ_CMP(crsq->prod, crsq);
-       crsqe = &crsq->crsq[sw_prod];
-       memset(crsqe, 0, sizeof(*crsqe));
-       crsq->prod++;
-       crsqe->req_size = req->cmd_size;
 
        /* ring CMDQ DB */
+       wmb();
        writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
               rcfw->cmdq_bar_reg_prod_off);
        writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@ retry:
 done:
        spin_unlock_irqrestore(&cmdq->lock, flags);
        /* Return the CREQ response pointer */
-       return crsqe ? &crsqe->qp_event : NULL;
+       return 0;
 }
 
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+                                struct cmdq_base *req,
+                                struct creq_base *resp,
+                                void *sb, u8 is_block)
+{
+       struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
+       u16 cookie;
+       u8 opcode, retry_cnt = 0xFF;
+       int rc = 0;
+
+       do {
+               opcode = req->opcode;
+               rc = __send_message(rcfw, req, resp, sb, is_block);
+               cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
+               if (!rc)
+                       break;
+
+               if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
+                       /* send failed */
+                       dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+                               cookie, opcode);
+                       return rc;
+               }
+               is_block ? mdelay(1) : usleep_range(500, 1000);
+
+       } while (retry_cnt--);
+
+       if (is_block)
+               rc = __block_for_resp(rcfw, cookie);
+       else
+               rc = __wait_for_resp(rcfw, cookie);
+       if (rc) {
+               /* timed out */
+               dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+                       cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+               return rc;
+       }
+
+       if (evnt->status) {
+               /* failed with status */
+               dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+                       cookie, opcode, evnt->status);
+               rc = -EFAULT;
+       }
+
+       return rc;
+}
 /* Completions */
 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
                                         struct creq_func_event *func_event)
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
                                       struct creq_qp_event *qp_event)
 {
-       struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
        struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
-       struct bnxt_qplib_crsqe *crsqe;
-       u16 cbit, cookie, blocked = 0;
+       struct bnxt_qplib_crsq *crsqe;
        unsigned long flags;
-       u32 sw_cons;
+       u16 cbit, blocked = 0;
+       u16 cookie;
+       __le16  mcookie;
 
        switch (qp_event->event) {
        case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
        default:
                /* Command Response */
                spin_lock_irqsave(&cmdq->lock, flags);
-               sw_cons = HWQ_CMP(crsq->cons, crsq);
-               crsqe = &crsq->crsq[sw_cons];
-               crsq->cons++;
-               memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
-
-               cookie = le16_to_cpu(crsqe->qp_event.cookie);
+               cookie = le16_to_cpu(qp_event->cookie);
+               mcookie = qp_event->cookie;
                blocked = cookie & RCFW_CMD_IS_BLOCKING;
                cookie &= RCFW_MAX_COOKIE_VALUE;
                cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
+               crsqe = &rcfw->crsqe_tbl[cbit];
+               if (crsqe->resp &&
+                   crsqe->resp->cookie  == mcookie) {
+                       memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+                       crsqe->resp = NULL;
+               } else {
+                       dev_err(&rcfw->pdev->dev,
+                               "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
+                               crsqe->resp ? "mismatch" : "collision",
+                               crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+               }
                if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
                        dev_warn(&rcfw->pdev->dev,
                                 "QPLIB: CMD bit %d was not requested", cbit);
-
                cmdq->cons += crsqe->req_size;
-               spin_unlock_irqrestore(&cmdq->lock, flags);
+               crsqe->req_size = 0;
+
                if (!blocked)
                        wake_up(&rcfw->waitq);
-               break;
+               spin_unlock_irqrestore(&cmdq->lock, flags);
        }
        return 0;
 }
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
        struct creq_base *creqe, **creq_ptr;
        u32 sw_cons, raw_cons;
        unsigned long flags;
-       u32 type;
+       u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
 
-       /* Service the CREQ until empty */
+       /* Service the CREQ until budget is over */
        spin_lock_irqsave(&creq->lock, flags);
        raw_cons = creq->cons;
-       while (1) {
+       while (budget > 0) {
                sw_cons = HWQ_CMP(raw_cons, creq);
                creq_ptr = (struct creq_base **)creq->pbl_ptr;
                creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
                type = creqe->type & CREQ_BASE_TYPE_MASK;
                switch (type) {
                case CREQ_BASE_TYPE_QP_EVENT:
-                       if (!bnxt_qplib_process_qp_event
-                           (rcfw, (struct creq_qp_event *)creqe))
-                               rcfw->creq_qp_event_processed++;
-                       else {
-                               dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
-                               dev_warn(&rcfw->pdev->dev,
-                                        "QPLIB: type = 0x%x not handled",
-                                        type);
-                       }
+                       bnxt_qplib_process_qp_event
+                               (rcfw, (struct creq_qp_event *)creqe);
+                       rcfw->creq_qp_event_processed++;
                        break;
                case CREQ_BASE_TYPE_FUNC_EVENT:
                        if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
                        break;
                }
                raw_cons++;
+               budget--;
        }
+
        if (creq->cons != raw_cons) {
                creq->cons = raw_cons;
                CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
 /* RCFW */
 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
 {
-       struct creq_deinitialize_fw_resp *resp;
        struct cmdq_deinitialize_fw req;
+       struct creq_deinitialize_fw_resp resp;
        u16 cmd_flags = 0;
+       int rc;
 
        RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
-       resp = (struct creq_deinitialize_fw_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp)
-               return -EINVAL;
-
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
-               return -ETIMEDOUT;
-
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
-               return -EFAULT;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         NULL, 0);
+       if (rc)
+               return rc;
 
        clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
        return 0;
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
                         struct bnxt_qplib_ctx *ctx, int is_virtfn)
 {
-       struct creq_initialize_fw_resp *resp;
        struct cmdq_initialize_fw req;
+       struct creq_initialize_fw_resp resp;
        u16 cmd_flags = 0, level;
+       int rc;
 
        RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
 
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
 
 skip_ctx_setup:
        req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
-       resp = (struct creq_initialize_fw_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: RCFW: INITIALIZE_FW send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: RCFW: INITIALIZE_FW timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: RCFW: INITIALIZE_FW failed");
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         NULL, 0);
+       if (rc)
+               return rc;
        set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
        return 0;
 }
 
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
 {
-       bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
-       kfree(rcfw->crsq.crsq);
+       kfree(rcfw->crsqe_tbl);
        bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
        bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
-
        rcfw->pdev = NULL;
 }
 
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
                goto fail;
        }
 
-       rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
-       rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
-                                 sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
-       if (!rcfw->crsq.crsq)
+       rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+                                 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+       if (!rcfw->crsqe_tbl)
                goto fail;
 
-       rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
-       if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
-                                     &rcfw->crsb.max_elements,
-                                     BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
-                                     HWQ_TYPE_CTX)) {
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: HW channel CRSB allocation failed");
-               goto fail;
-       }
        return 0;
 
 fail:
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
        int rc;
 
        /* General */
-       atomic_set(&rcfw->seq_num, 0);
+       rcfw->seq_num = 0;
        rcfw->flags = FIRMWARE_FIRST_FLAG;
        bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
                                  sizeof(unsigned long));
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 
        rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
 
-       /* CRSQ */
-       rcfw->crsq.prod = 0;
-       rcfw->crsq.cons = 0;
-
        /* CREQ */
        rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
        res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
        __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
        return 0;
 }
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+               struct bnxt_qplib_rcfw *rcfw,
+               u32 size)
+{
+       struct bnxt_qplib_rcfw_sbuf *sbuf;
+
+       sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+       if (!sbuf)
+               return NULL;
+
+       sbuf->size = size;
+       sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
+                                      &sbuf->dma_addr, GFP_ATOMIC);
+       if (!sbuf->sb)
+               goto bail;
+
+       return sbuf;
+bail:
+       kfree(sbuf);
+       return NULL;
+}
+
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+                              struct bnxt_qplib_rcfw_sbuf *sbuf)
+{
+       if (sbuf->sb)
+               dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
+                                 sbuf->sb, sbuf->dma_addr);
+       kfree(sbuf);
+}
index d3567d75bf58fe4e5e496c78ec27643af7d80cdc..09ce121770cdafd1af01bd815788f6b25eac3bf0 100644 (file)
@@ -73,6 +73,7 @@
 #define RCFW_MAX_OUTSTANDING_CMD       BNXT_QPLIB_CMDQE_MAX_CNT
 #define RCFW_MAX_COOKIE_VALUE          0x7FFF
 #define RCFW_CMD_IS_BLOCKING           0x8000
+#define RCFW_BLOCKED_CMD_WAIT_COUNT    0x4E20
 
 /* Cmdq contains a fix number of a 16-Byte slots */
 struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
        u8                      data[1024];
 };
 
-/* CRSQ SB */
-#define BNXT_QPLIB_CRSBE_MAX_CNT       4
-#define BNXT_QPLIB_CRSBE_UNITS         sizeof(struct bnxt_qplib_crsbe)
-#define BNXT_QPLIB_CRSBE_CNT_PER_PG    (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
-
-#define MAX_CRSB_IDX                   (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
-#define MAX_CRSB_IDX_PER_PG            (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
-
-static inline u32 get_crsb_pg(u32 val)
-{
-       return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
-}
-
-static inline u32 get_crsb_idx(u32 val)
-{
-       return val & MAX_CRSB_IDX_PER_PG;
-}
-
-static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
-                                           u32 prod, dma_addr_t *dma_addr)
-{
-               *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
-               *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
-                             BNXT_QPLIB_CRSBE_UNITS;
-}
-
 /* CREQ */
 /* Allocate 1 per QP for async error notification for now */
 #define BNXT_QPLIB_CREQE_MAX_CNT       (64 * 1024)
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
 #define CREQ_DB(db, raw_cons, cp_bit)                          \
        writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
 
+#define CREQ_ENTRY_POLL_BUDGET         0x100
+
 /* HWQ */
-struct bnxt_qplib_crsqe {
-       struct creq_qp_event    qp_event;
+
+struct bnxt_qplib_crsq {
+       struct creq_qp_event    *resp;
        u32                     req_size;
 };
 
-struct bnxt_qplib_crsq {
-       struct bnxt_qplib_crsqe *crsq;
-       u32                     prod;
-       u32                     cons;
-       u32                     max_elements;
+struct bnxt_qplib_rcfw_sbuf {
+       void *sb;
+       dma_addr_t dma_addr;
+       u32 size;
 };
 
 /* RCFW Communication Channels */
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
        wait_queue_head_t       waitq;
        int                     (*aeq_handler)(struct bnxt_qplib_rcfw *,
                                               struct creq_func_event *);
-       atomic_t                seq_num;
+       u32                     seq_num;
 
        /* Bar region info */
        void __iomem            *cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
 
        /* Actual Cmd and Resp Queues */
        struct bnxt_qplib_hwq   cmdq;
-       struct bnxt_qplib_crsq  crsq;
-       struct bnxt_qplib_hwq   crsb;
+       struct bnxt_qplib_crsq  *crsqe_tbl;
 };
 
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
                                        (struct bnxt_qplib_rcfw *,
                                         struct creq_func_event *));
 
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
-                                  struct cmdq_base *req, void **crsbe,
-                                  u8 is_block);
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+                               struct bnxt_qplib_rcfw *rcfw,
+                               u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+                              struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+                                struct cmdq_base *req, struct creq_base *resp,
+                                void *sbuf, u8 is_block);
 
 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
index 6277d802ca4bc7e231e010331b5045c92cb57aad..2e4855509719cbf5ffa1725c8bee4d7e8e2c2f34 100644 (file)
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
 
 #define HWQ_CMP(idx, hwq)      ((idx) & ((hwq)->max_elements - 1))
 
+#define HWQ_FREE_SLOTS(hwq)    (hwq->max_elements - \
+                               ((HWQ_CMP(hwq->prod, hwq)\
+                               - HWQ_CMP(hwq->cons, hwq))\
+                               & (hwq->max_elements - 1)))
 enum bnxt_qplib_hwq_type {
        HWQ_TYPE_CTX,
        HWQ_TYPE_QUEUE,
index 7b31eccedf11fa8f36a29e6251127144e75a59a5..fde18cf0e406b9f78e2af96d70077622f3d02732 100644 (file)
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
                            struct bnxt_qplib_dev_attr *attr)
 {
        struct cmdq_query_func req;
-       struct creq_query_func_resp *resp;
+       struct creq_query_func_resp resp;
+       struct bnxt_qplib_rcfw_sbuf *sbuf;
        struct creq_query_func_resp_sb *sb;
        u16 cmd_flags = 0;
        u32 temp;
        u8 *tqm_alloc;
-       int i;
+       int i, rc = 0;
 
        RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
 
-       req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
-       resp = (struct creq_query_func_resp *)
-               bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
-                                            0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
+       sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+       if (!sbuf) {
                dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
+                       "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+               return -ENOMEM;
        }
+
+       sb = sbuf->sb;
+       req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         (void *)sbuf, 0);
+       if (rc)
+               goto bail;
+
        /* Extract the context from the side buffer */
        attr->max_qp = le32_to_cpu(sb->max_qp);
        attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
                sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
                BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
        attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+       /*
+        * 128 WQEs needs to be reserved for the HW (8916). Prevent
+        * reporting the max number
+        */
+       attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
        attr->max_qp_sges = sb->max_sge;
        attr->max_cq = le32_to_cpu(sb->max_cq);
        attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
                attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
                attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
        }
-       return 0;
+
+bail:
+       bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+       return rc;
 }
 
 /* SGID */
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        /* Remove GID from the SGID table */
        if (update) {
                struct cmdq_delete_gid req;
-               struct creq_delete_gid_resp *resp;
+               struct creq_delete_gid_resp resp;
                u16 cmd_flags = 0;
+               int rc;
 
                RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
                if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                        return -EINVAL;
                }
                req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
-               resp = (struct creq_delete_gid_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
-                                                    0);
-               if (!resp) {
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: SP: DELETE_GID send failed");
-                       return -EINVAL;
-               }
-               if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
-                                                  le16_to_cpu(req.cookie))) {
-                       /* Cmd timed out */
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: SP: DELETE_GID timed out");
-                       return -ETIMEDOUT;
-               }
-               if (resp->status ||
-                   le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: SP: DELETE_GID failed ");
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                               resp->status, le16_to_cpu(req.cookie),
-                               le16_to_cpu(resp->cookie));
-                       return -EINVAL;
-               }
+               rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                                 (void *)&resp, NULL, 0);
+               if (rc)
+                       return rc;
        }
        memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
               sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                                                   struct bnxt_qplib_res,
                                                   sgid_tbl);
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
-       int i, free_idx, rc = 0;
+       int i, free_idx;
 
        if (!sgid_tbl) {
                dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        }
        if (update) {
                struct cmdq_add_gid req;
-               struct creq_add_gid_resp *resp;
+               struct creq_add_gid_resp resp;
                u16 cmd_flags = 0;
                u32 temp32[4];
                u16 temp16[3];
+               int rc;
 
                RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
 
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                req.src_mac[1] = cpu_to_be16(temp16[1]);
                req.src_mac[2] = cpu_to_be16(temp16[2]);
 
-               resp = (struct creq_add_gid_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-               if (!resp) {
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: SP: ADD_GID send failed");
-                       return -EINVAL;
-               }
-               if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
-                                                  le16_to_cpu(req.cookie))) {
-                       /* Cmd timed out */
-                       dev_err(&res->pdev->dev,
-                               "QPIB: SP: ADD_GID timed out");
-                       return -ETIMEDOUT;
-               }
-               if (resp->status ||
-                   le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-                       dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
-                       dev_err(&res->pdev->dev,
-                               "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                               resp->status, le16_to_cpu(req.cookie),
-                               le16_to_cpu(resp->cookie));
-                       return -EINVAL;
-               }
-               sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
+               rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                                 (void *)&resp, NULL, 0);
+               if (rc)
+                       return rc;
+               sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
        }
        /* Add GID to the sgid_tbl */
        memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 
        *index = free_idx;
        /* unlock */
-       return rc;
+       return 0;
 }
 
 /* pkeys */
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_create_ah req;
-       struct creq_create_ah_resp *resp;
+       struct creq_create_ah_resp resp;
        u16 cmd_flags = 0;
        u32 temp32[4];
        u16 temp16[3];
+       int rc;
 
        RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
 
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
        req.dest_mac[1] = cpu_to_le16(temp16[1]);
        req.dest_mac[2] = cpu_to_le16(temp16[2]);
 
-       resp = (struct creq_create_ah_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 1);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
-       ah->id = le32_to_cpu(resp->xid);
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         NULL, 1);
+       if (rc)
+               return rc;
+
+       ah->id = le32_to_cpu(resp.xid);
        return 0;
 }
 
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_destroy_ah req;
-       struct creq_destroy_ah_resp *resp;
+       struct creq_destroy_ah_resp resp;
        u16 cmd_flags = 0;
+       int rc;
 
        /* Clean up the AH table in the device */
        RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
 
        req.ah_cid = cpu_to_le32(ah->id);
 
-       resp = (struct creq_destroy_ah_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 1);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         NULL, 1);
+       if (rc)
+               return rc;
        return 0;
 }
 
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_deallocate_key req;
-       struct creq_deallocate_key_resp *resp;
+       struct creq_deallocate_key_resp resp;
        u16 cmd_flags = 0;
+       int rc;
 
        if (mrw->lkey == 0xFFFFFFFF) {
                dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
        else
                req.key = cpu_to_le32(mrw->lkey);
 
-       resp = (struct creq_deallocate_key_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
-               dev_err(&res->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+                                         NULL, 0);
+       if (rc)
+               return rc;
+
        /* Free the qplib's MRW memory */
        if (mrw->hwq.max_elements)
                bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_allocate_mrw req;
-       struct creq_allocate_mrw_resp *resp;
+       struct creq_allocate_mrw_resp resp;
        u16 cmd_flags = 0;
        unsigned long tmp;
+       int rc;
 
        RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
 
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
        tmp = (unsigned long)mrw;
        req.mrw_handle = cpu_to_le64(tmp);
 
-       resp = (struct creq_allocate_mrw_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, 0);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
-               return -EINVAL;
-       }
-       if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-               /* Cmd timed out */
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
+       if (rc)
+               return rc;
+
        if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
            (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
            (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
-               mrw->rkey = le32_to_cpu(resp->xid);
+               mrw->rkey = le32_to_cpu(resp.xid);
        else
-               mrw->lkey = le32_to_cpu(resp->xid);
+               mrw->lkey = le32_to_cpu(resp.xid);
        return 0;
 }
 
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_deregister_mr req;
-       struct creq_deregister_mr_resp *resp;
+       struct creq_deregister_mr_resp resp;
        u16 cmd_flags = 0;
        int rc;
 
        RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
 
        req.lkey = cpu_to_le32(mrw->lkey);
-       resp = (struct creq_deregister_mr_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, block);
-       if (!resp) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
-               return -EINVAL;
-       }
-       if (block)
-               rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
-                                                   le16_to_cpu(req.cookie));
-       else
-               rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
-                                                  le16_to_cpu(req.cookie));
-       if (!rc) {
-               /* Cmd timed out */
-               dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
-               return -ETIMEDOUT;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
-               dev_err(&rcfw->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, block);
+       if (rc)
+               return rc;
 
        /* Free the qplib's MR memory */
        if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_register_mr req;
-       struct creq_register_mr_resp *resp;
+       struct creq_register_mr_resp resp;
        u16 cmd_flags = 0, level;
        int pg_ptrs, pages, i, rc;
        dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
        req.key = cpu_to_le32(mr->lkey);
        req.mr_size = cpu_to_le64(mr->total_size);
 
-       resp = (struct creq_register_mr_resp *)
-                       bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                                    NULL, block);
-       if (!resp) {
-               dev_err(&res->pdev->dev, "SP: REG_MR send failed");
-               rc = -EINVAL;
-               goto fail;
-       }
-       if (block)
-               rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
-                                                   le16_to_cpu(req.cookie));
-       else
-               rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
-                                                  le16_to_cpu(req.cookie));
-       if (!rc) {
-               /* Cmd timed out */
-               dev_err(&res->pdev->dev, "SP: REG_MR timed out");
-               rc = -ETIMEDOUT;
-               goto fail;
-       }
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
-               dev_err(&res->pdev->dev,
-                       "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               rc = -EINVAL;
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, block);
+       if (rc)
                goto fail;
-       }
+
        return 0;
 
 fail:
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_map_tc_to_cos req;
-       struct creq_map_tc_to_cos_resp *resp;
+       struct creq_map_tc_to_cos_resp resp;
        u16 cmd_flags = 0;
-       int tleft;
+       int rc = 0;
 
        RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
        req.cos0 = cpu_to_le16(cids[0]);
        req.cos1 = cpu_to_le16(cids[1]);
 
-       resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
-       if (!resp) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
-               return -EINVAL;
-       }
-
-       tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
-       if (!tleft) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
-               return -ETIMEDOUT;
-       }
-
-       if (resp->status ||
-           le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-               dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
-               dev_err(&res->pdev->dev,
-                       "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-                       resp->status, le16_to_cpu(req.cookie),
-                       le16_to_cpu(resp->cookie));
-               return -EINVAL;
-       }
-
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+                                         (void *)&resp, NULL, 0);
        return 0;
 }
index 1442a617e968e1d367a9fd9e69a0540da645ce6f..a543f959098bd11972383a9d907725c08497a8f4 100644 (file)
@@ -40,6 +40,8 @@
 #ifndef __BNXT_QPLIB_SP_H__
 #define __BNXT_QPLIB_SP_H__
 
+#define BNXT_QPLIB_RESERVED_QP_WRS     128
+
 struct bnxt_qplib_dev_attr {
        char                            fw_ver[32];
        u16                             max_sgid;
index f96a96dbcf1ff4e40b75de36122a3efd0405faae..ae0b79aeea2ec141ebc83b9960f59718035eb1bf 100644 (file)
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
                kfree(entry);
        }
 
-       list_for_each_safe(pos, nxt, &uctx->qpids) {
+       list_for_each_safe(pos, nxt, &uctx->cqids) {
                entry = list_entry(pos, struct c4iw_qid_list, entry);
                list_del_init(&entry->entry);
                kfree(entry);
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
        rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
        if (!rdev->free_workq) {
                err = -ENOMEM;
-               goto err_free_status_page;
+               goto err_free_status_page_and_wr_log;
        }
 
        rdev->status_page->db_off = 0;
 
        return 0;
-err_free_status_page:
+err_free_status_page_and_wr_log:
+       if (c4iw_wr_log && rdev->wr_log)
+               kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
        c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
        destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
+       c4iw_release_dev_ucontext(rdev, &rdev->uctx);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
+       c4iw_ocqp_pool_destroy(rdev);
        c4iw_destroy_resource(&rdev->resource);
 }
 
index 0c79983c8b1a0a4e6189fb5e02439ff3522a9098..9ecc089d4529440fa87929fb74461558db920549 100644 (file)
@@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
        dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
        dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
-       dev->ib_dev.alloc_rdma_netdev   = mlx5_ib_alloc_rdma_netdev;
-       dev->ib_dev.free_rdma_netdev    = mlx5_ib_free_rdma_netdev;
+       if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+               dev->ib_dev.alloc_rdma_netdev   = mlx5_ib_alloc_rdma_netdev;
+               dev->ib_dev.free_rdma_netdev    = mlx5_ib_free_rdma_netdev;
+       }
        if (mlx5_core_is_pf(mdev)) {
                dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
                dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
index aa08c76a42450a2ef86d1854b45a80d7bcd4387c..d961f79b317cba526fe52ca4af6adc40c0781868 100644 (file)
 #define QEDR_MSG_QP   "  QP"
 #define QEDR_MSG_GSI  " GSI"
 
-#define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
+#define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
+
+#define FW_PAGE_SIZE           (RDMA_RING_PAGE_SIZE)
+#define FW_PAGE_SHIFT          (12)
 
 struct qedr_dev;
 
index 17685cfea6a2dcb435e659675805dcbc29139e7b..d6723c365c7fba36168bc11c227de8bc34eef862 100644 (file)
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
 
 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
                               struct qedr_pbl *pbl,
-                              struct qedr_pbl_info *pbl_info)
+                              struct qedr_pbl_info *pbl_info, u32 pg_shift)
 {
        int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+       u32 fw_pg_cnt, fw_pg_per_umem_pg;
        struct qedr_pbl *pbl_tbl;
        struct scatterlist *sg;
        struct regpair *pbe;
+       u64 pg_addr;
        int entry;
-       u32 addr;
 
        if (!pbl_info->num_pbes)
                return;
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 
        shift = umem->page_shift;
 
+       fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
+
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                pages = sg_dma_len(sg) >> shift;
+               pg_addr = sg_dma_address(sg);
                for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
-                       /* store the page address in pbe */
-                       pbe->lo = cpu_to_le32(sg_dma_address(sg) +
-                                             (pg_cnt << shift));
-                       addr = upper_32_bits(sg_dma_address(sg) +
-                                            (pg_cnt << shift));
-                       pbe->hi = cpu_to_le32(addr);
-                       pbe_cnt++;
-                       total_num_pbes++;
-                       pbe++;
-
-                       if (total_num_pbes == pbl_info->num_pbes)
-                               return;
-
-                       /* If the given pbl is full storing the pbes,
-                        * move to next pbl.
-                        */
-                       if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
-                               pbl_tbl++;
-                               pbe = (struct regpair *)pbl_tbl->va;
-                               pbe_cnt = 0;
+                       for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+                               pbe->lo = cpu_to_le32(pg_addr);
+                               pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+
+                               pg_addr += BIT(pg_shift);
+                               pbe_cnt++;
+                               total_num_pbes++;
+                               pbe++;
+
+                               if (total_num_pbes == pbl_info->num_pbes)
+                                       return;
+
+                               /* If the given pbl is full storing the pbes,
+                                * move to next pbl.
+                                */
+                               if (pbe_cnt ==
+                                   (pbl_info->pbl_size / sizeof(u64))) {
+                                       pbl_tbl++;
+                                       pbe = (struct regpair *)pbl_tbl->va;
+                                       pbe_cnt = 0;
+                               }
+
+                               fw_pg_cnt++;
                        }
                }
        }
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
                                       u64 buf_addr, size_t buf_len,
                                       int access, int dmasync)
 {
-       int page_cnt;
+       u32 fw_pages;
        int rc;
 
        q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
                return PTR_ERR(q->umem);
        }
 
-       page_cnt = ib_umem_page_count(q->umem);
-       rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+       fw_pages = ib_umem_page_count(q->umem) <<
+           (q->umem->page_shift - FW_PAGE_SHIFT);
+
+       rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
        if (rc)
                goto err0;
 
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
                goto err0;
        }
 
-       qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+               qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
+                                  FW_PAGE_SHIFT);
 
        return 0;
 
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
                goto err1;
 
        qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
-                          &mr->info.pbl_info);
+                          &mr->info.pbl_info, mr->umem->page_shift);
 
        rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
        if (rc) {
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
                case IB_WC_REG_MR:
                        qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
                        break;
+               case IB_WC_RDMA_READ:
+               case IB_WC_SEND:
+                       wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+                       break;
                default:
                        break;
                }
index ecdba2fce0837675f4a9ae6cddb80c7b1c1c0a3d..1ac5b8551a4d37325e782e5dc438ed4915d7243f 100644 (file)
@@ -68,6 +68,7 @@
 static inline u32 rxe_crc32(struct rxe_dev *rxe,
                            u32 crc, void *next, size_t len)
 {
+       u32 retval;
        int err;
 
        SHASH_DESC_ON_STACK(shash, rxe->tfm);
@@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
                return crc32_le(crc, next, len);
        }
 
-       return *(u32 *)shash_desc_ctx(shash);
+       retval = *(u32 *)shash_desc_ctx(shash);
+       barrier_data(shash_desc_ctx(shash));
+       return retval;
 }
 
 int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
index 83d709e74dfb87ab0dcec30c87f0e49cd57022d2..073e66783f1dd8a4b62f9fc59a84319b507b51b8 100644 (file)
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
 
                sge = ibwr->sg_list;
                for (i = 0; i < num_sge; i++, sge++) {
-                       if (qp->is_user && copy_from_user(p, (__user void *)
-                                           (uintptr_t)sge->addr, sge->length))
-                               return -EFAULT;
-
-                       else if (!qp->is_user)
-                               memcpy(p, (void *)(uintptr_t)sge->addr,
-                                      sge->length);
+                       memcpy(p, (void *)(uintptr_t)sge->addr,
+                                       sge->length);
 
                        p += sge->length;
                }
index 0060b2f9f659d7ad1ee9a1767d0029dc8529ff4c..efe7402f48852195efae4fc70cf843d0398dda30 100644 (file)
@@ -863,7 +863,6 @@ dev_stop:
        set_bit(IPOIB_STOP_REAPER, &priv->flags);
        cancel_delayed_work(&priv->ah_reap_task);
        set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
-       napi_enable(&priv->napi);
        ipoib_ib_dev_stop(dev);
        return -1;
 }
index a115c0b7a310ed630c1c32ffd9e2c17574358f7c..1015a63de6aed6f3a8b88f3816dc5720a2d24092 100644 (file)
@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
 
        ipoib_transport_dev_cleanup(dev);
 
+       netif_napi_del(&priv->napi);
+
        ipoib_cm_dev_cleanup(dev);
 
        kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@ out_rx_ring_cleanup:
        kfree(priv->rx_ring);
 
 out:
+       netif_napi_del(&priv->napi);
        return -ENOMEM;
 }
 
@@ -2237,6 +2240,7 @@ event_failed:
 
 device_init_failed:
        free_netdev(priv->dev);
+       kfree(priv);
 
 alloc_mem_failed:
        return ERR_PTR(result);
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
 
 static void ipoib_remove_one(struct ib_device *device, void *client_data)
 {
-       struct ipoib_dev_priv *priv, *tmp;
+       struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
        struct list_head *dev_list = client_data;
 
        if (!dev_list)
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                flush_workqueue(priv->wq);
 
                unregister_netdev(priv->dev);
-               free_netdev(priv->dev);
+               if (device->free_rdma_netdev)
+                       device->free_rdma_netdev(priv->dev);
+               else
+                       free_netdev(priv->dev);
+
+               list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+                       kfree(cpriv);
+
                kfree(priv);
        }
 
index 36dc4fcaa3cdbcd2516d44fb9360c5dcccce1404..081b33deff1bcbf6f381c9993af174a3dc5e90b4 100644 (file)
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
        snprintf(intf_name, sizeof intf_name, "%s.%04x",
                 ppriv->dev->name, pkey);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
        if (!priv)
                return -ENOMEM;
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
        down_write(&ppriv->vlan_rwsem);
 
        /*
@@ -167,8 +167,10 @@ out:
 
        rtnl_unlock();
 
-       if (result)
+       if (result) {
                free_netdev(priv->dev);
+               kfree(priv);
+       }
 
        return result;
 }
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
        if (dev) {
                free_netdev(dev);
+               kfree(priv);
                return 0;
        }
 
index bb3ac5fe5846b9e2d27b71bea9bf0bff9bcb8e2d..72a391e01011c8356474765cde4ab18d2f997f10 100644 (file)
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
 {
        struct irq_domain *root_domain =
-               irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+               irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
                                &xtensa_mx_irq_domain_ops,
                                &xtensa_mx_irq_chip);
        irq_set_default_host(root_domain);
index 472ae17709647201606a10344fd741d77e81e988..f728755fa2922019e6b117105f3a9b9dfe0fc3e9 100644 (file)
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
 int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
 {
        struct irq_domain *root_domain =
-               irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+               irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
                                &xtensa_irq_domain_ops, &xtensa_irq_chip);
        irq_set_default_host(root_domain);
        return 0;
index 1548259297c185e8cfefb4f6df7127b97755da2b..2cfd9389ee96f8cc69a40681374a19a31aa174ed 100644 (file)
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
 
                spin_lock_irqsave(lock, flags);
                val = bcm6328_led_read(addr);
-               val |= (BIT(reg) << (((sel % 4) * 4) + 16));
+               val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
                bcm6328_led_write(addr, val);
                spin_unlock_irqrestore(lock, flags);
        }
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
 
                spin_lock_irqsave(lock, flags);
                val = bcm6328_led_read(addr);
-               val |= (BIT(reg) << ((sel % 4) * 4));
+               val |= (BIT(reg % 4) << ((sel % 4) * 4));
                bcm6328_led_write(addr, val);
                spin_unlock_irqrestore(lock, flags);
        }
index afa3b40992140b2cf194ef971502a517b1cc9bd2..e95ea65380c864b5aef90c9eec0bb000b4b51086 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/sched/loadavg.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
-#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = {
        .deactivate = heartbeat_trig_deactivate,
 };
 
-static int heartbeat_pm_notifier(struct notifier_block *nb,
-                                unsigned long pm_event, void *unused)
-{
-       int rc;
-
-       switch (pm_event) {
-       case PM_SUSPEND_PREPARE:
-       case PM_HIBERNATION_PREPARE:
-       case PM_RESTORE_PREPARE:
-               led_trigger_unregister(&heartbeat_led_trigger);
-               break;
-       case PM_POST_SUSPEND:
-       case PM_POST_HIBERNATION:
-       case PM_POST_RESTORE:
-               rc = led_trigger_register(&heartbeat_led_trigger);
-               if (rc)
-                       pr_err("could not re-register heartbeat trigger\n");
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_DONE;
-}
-
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
                                     unsigned long code, void *unused)
 {
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block heartbeat_pm_nb = {
-       .notifier_call = heartbeat_pm_notifier,
-};
-
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void)
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &heartbeat_panic_nb);
                register_reboot_notifier(&heartbeat_reboot_nb);
-               register_pm_notifier(&heartbeat_pm_nb);
        }
        return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
-       unregister_pm_notifier(&heartbeat_pm_nb);
        unregister_reboot_notifier(&heartbeat_reboot_nb);
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &heartbeat_panic_nb);
index 4e25a950ae6f5af73c7bc7bd305a51368ef7d52b..43428cec3a01cde9ca4deb920ceb1fde47289076 100644 (file)
@@ -1,5 +1,6 @@
 config MEDIA_CEC_RC
        bool "HDMI CEC RC integration"
        depends on CEC_CORE && RC_CORE
+       depends on CEC_CORE=m || RC_CORE=y
        ---help---
          Pass on CEC remote control messages to the RC framework.
index 0860fb458757df3871ccab290867fcf7c4d5c11e..999926f731c88827802029ba2a928ee457e49ffb 100644 (file)
@@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
                        bool block, struct cec_msg __user *parg)
 {
        struct cec_msg msg = {};
-       long err = 0;
+       long err;
 
        if (copy_from_user(&msg, parg, sizeof(msg)))
                return -EFAULT;
-       mutex_lock(&adap->lock);
-       if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
-               err = -ENONET;
-       mutex_unlock(&adap->lock);
-       if (err)
-               return err;
 
        err = cec_receive_msg(fh, &msg, block);
        if (err)
index acef4eca269f1c1f3f1e36d6ca8c76e1523cd9b6..3251cba89e8f63052091aa5b1bdaf7f2bb74afc3 100644 (file)
@@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
 static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
                u8 mask, u8 val)
 {
-       i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
+       i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
 }
 
 static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
index e12ec50bf0bf7531cb76ef129149d77f531fa525..90a5f8fd5eea68d6e1f007843c6bb67f7c8790b2 100644 (file)
@@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
        static unsigned long delt;
        unsigned long deltintr;
        unsigned long flags;
+       int counter = 0;
        int iir, lsr;
 
        while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
+               if (++counter > 256) {
+                       dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
+                       break;
+               }
+
                switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
                case UART_IIR_MSI:
                        (void)inb(io + UART_MSR);
index 71bd68548c9c87d3359a458efe9069c59a81e81a..4126552c90556ede32ec1bf2aee53c4d3afc3811 100644 (file)
@@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv)
        serio_set_drvdata(serio, rain);
        INIT_WORK(&rain->work, rain_irq_work_handler);
        mutex_init(&rain->write_lock);
+       spin_lock_init(&rain->buf_lock);
 
        err = serio_open(serio, drv);
        if (err)
index 94afbbf928072ba6c24c850bdc41c3ab0820a96d..c0175ea7e7ad186d7d62c352c917cb3d14e28649 100644 (file)
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
 
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
 {
-       if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+       if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
                return NULL;
 
        return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
index 1842ed341af10e38902f1178f409d645b7301679..de962c2d5e000a834bc40064be3bbe426e55bf5a 100644 (file)
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
        int i;
        bool use_desc_chain_mode = true;
 
+       /*
+        * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
+        * reported. For some strange reason this occurs in descriptor
+        * chain mode only. So let's fall back to bounce buffer mode
+        * for command SD_IO_RW_EXTENDED.
+        */
+       if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
+               return;
+
        for_each_sg(data->sg, sg, data->sg_len, i)
                /* check for 8 byte alignment */
                if (sg->offset & 7) {
index b44a6aeb346d0404144dde0304a502268de1da91..e5386ab706ec7fa394162b338c6ff4ad1de8944c 100644 (file)
@@ -90,10 +90,13 @@ enum ad_link_speed_type {
        AD_LINK_SPEED_100MBPS,
        AD_LINK_SPEED_1000MBPS,
        AD_LINK_SPEED_2500MBPS,
+       AD_LINK_SPEED_5000MBPS,
        AD_LINK_SPEED_10000MBPS,
+       AD_LINK_SPEED_14000MBPS,
        AD_LINK_SPEED_20000MBPS,
        AD_LINK_SPEED_25000MBPS,
        AD_LINK_SPEED_40000MBPS,
+       AD_LINK_SPEED_50000MBPS,
        AD_LINK_SPEED_56000MBPS,
        AD_LINK_SPEED_100000MBPS,
 };
@@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port)
  *     %AD_LINK_SPEED_100MBPS,
  *     %AD_LINK_SPEED_1000MBPS,
  *     %AD_LINK_SPEED_2500MBPS,
+ *     %AD_LINK_SPEED_5000MBPS,
  *     %AD_LINK_SPEED_10000MBPS
+ *     %AD_LINK_SPEED_14000MBPS,
  *     %AD_LINK_SPEED_20000MBPS
  *     %AD_LINK_SPEED_25000MBPS
  *     %AD_LINK_SPEED_40000MBPS
+ *     %AD_LINK_SPEED_50000MBPS
  *     %AD_LINK_SPEED_56000MBPS
  *     %AD_LINK_SPEED_100000MBPS
  */
@@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port)
                        speed = AD_LINK_SPEED_2500MBPS;
                        break;
 
+               case SPEED_5000:
+                       speed = AD_LINK_SPEED_5000MBPS;
+                       break;
+
                case SPEED_10000:
                        speed = AD_LINK_SPEED_10000MBPS;
                        break;
 
+               case SPEED_14000:
+                       speed = AD_LINK_SPEED_14000MBPS;
+                       break;
+
                case SPEED_20000:
                        speed = AD_LINK_SPEED_20000MBPS;
                        break;
@@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port)
                        speed = AD_LINK_SPEED_40000MBPS;
                        break;
 
+               case SPEED_50000:
+                       speed = AD_LINK_SPEED_50000MBPS;
+                       break;
+
                case SPEED_56000:
                        speed = AD_LINK_SPEED_56000MBPS;
                        break;
@@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
                case AD_LINK_SPEED_2500MBPS:
                        bandwidth = nports * 2500;
                        break;
+               case AD_LINK_SPEED_5000MBPS:
+                       bandwidth = nports * 5000;
+                       break;
                case AD_LINK_SPEED_10000MBPS:
                        bandwidth = nports * 10000;
                        break;
+               case AD_LINK_SPEED_14000MBPS:
+                       bandwidth = nports * 14000;
+                       break;
                case AD_LINK_SPEED_20000MBPS:
                        bandwidth = nports * 20000;
                        break;
@@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
                case AD_LINK_SPEED_40000MBPS:
                        bandwidth = nports * 40000;
                        break;
+               case AD_LINK_SPEED_50000MBPS:
+                       bandwidth = nports * 50000;
+                       break;
                case AD_LINK_SPEED_56000MBPS:
                        bandwidth = nports * 56000;
                        break;
index 2359478b977f0e008335e51dc8f63adc2dc35087..8ab6bdbe16820dd3e56ec517838903bedda515de 100644 (file)
@@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev)
        struct bonding *bond = netdev_priv(bond_dev);
        if (bond->wq)
                destroy_workqueue(bond->wq);
-       free_netdev(bond_dev);
 }
 
 void bond_setup(struct net_device *bond_dev)
@@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev)
        bond_dev->netdev_ops = &bond_netdev_ops;
        bond_dev->ethtool_ops = &bond_ethtool_ops;
 
-       bond_dev->destructor = bond_destructor;
+       bond_dev->needs_free_netdev = true;
+       bond_dev->priv_destructor = bond_destructor;
 
        SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
@@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name)
 
        rtnl_unlock();
        if (res < 0)
-               bond_destructor(bond_dev);
+               free_netdev(bond_dev);
        return res;
 }
 
index ddabce7594565232a5e903c23248ea884995c149..71a7c3b44fdde3c3a7ede652a7dab681d577e6ce 100644 (file)
@@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev)
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
        dev->priv_flags |= IFF_NO_QUEUE;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        dev->netdev_ops = &cfhsi_netdevops;
        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
                skb_queue_head_init(&cfhsi->qhead[i]);
index c2dea4916e5d720bb29814153f302ec364fe4f61..76e1d3545105e8abb9e2ef644e03fab0d2f5b243 100644 (file)
@@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev)
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CAIF_MAX_MTU;
        dev->priv_flags |= IFF_NO_QUEUE;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        skb_queue_head_init(&serdev->head);
        serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
        serdev->common.use_frag = true;
index 3a529fbe539fb6ee098f0b85515a49bdc1b64d90..fc21afe852b9817a2bdd242fc371834c515213dd 100644 (file)
@@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev)
        dev->flags = IFF_NOARP | IFF_POINTOPOINT;
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        skb_queue_head_init(&cfspi->qhead);
        skb_queue_head_init(&cfspi->chead);
        cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
index 6122768c86444ec5b9c5fa67d7a63aa2690ba2f5..1794ea0420b794e76c75fafdede7e1a73ca86868 100644 (file)
@@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
        netdev->tx_queue_len = 100;
        netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
        netdev->mtu = CFV_DEF_MTU_SIZE;
-       netdev->destructor = free_netdev;
+       netdev->needs_free_netdev = true;
 }
 
 /* Create debugfs counters for the device */
index 611d16a7061de5cb45f5e7bf7903b45472657965..ae4ed03dc6420d2e8eb0c6c0267982ab6ca58780 100644 (file)
@@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
        can_update_state_error_stats(dev, new_state);
        priv->state = new_state;
 
+       if (!cf)
+               return;
+
        if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
                cf->can_id |= CAN_ERR_BUSOFF;
                return;
index 0d57be5ea97bafb2e9a119d28429549a91b80012..85268be0c913df5f5dc595405ade48efbd8add0a 100644 (file)
@@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
                                struct pucan_rx_msg *msg_list, int msg_count)
 {
        void *msg_ptr = msg_list;
-       int i, msg_size;
+       int i, msg_size = 0;
 
        for (i = 0; i < msg_count; i++) {
                msg_size = peak_canfd_handle_msg(priv, msg_ptr);
index eb7173713bbcb0c339ae6ccbf99fc8014fb17133..6a6e896e52fa09415ae72715602fd4713e28f899 100644 (file)
@@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev)
 static void slc_free_netdev(struct net_device *dev)
 {
        int i = dev->base_addr;
-       free_netdev(dev);
+
        slcan_devs[i] = NULL;
 }
 
@@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = {
 static void slc_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &slc_netdev_ops;
-       dev->destructor         = slc_free_netdev;
+       dev->needs_free_netdev  = true;
+       dev->priv_destructor    = slc_free_netdev;
 
        dev->hard_header_len    = 0;
        dev->addr_len           = 0;
@@ -761,8 +762,6 @@ static void __exit slcan_exit(void)
                if (sl->tty) {
                        printk(KERN_ERR "%s: tty discipline still running\n",
                               dev->name);
-                       /* Intentionally leak the control block. */
-                       dev->destructor = NULL;
                }
 
                unregister_netdev(dev);
index eecee7f8dfb70763aa0e7c3f90f85ae66ff85385..afcc1312dbaf8f67bce640e17dbc20c06592dc45 100644 (file)
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
                             sizeof(*dm),
                             1000);
 
+       kfree(dm);
+
        return rc;
 }
 
index 57913dbbae0a970f5f28051d10f064792d69333c..1ca76e03e965ce0b1185c3220a86cb306d89e3e5 100644 (file)
@@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf,
        const struct peak_usb_adapter *peak_usb_adapter = NULL;
        int i, err = -ENOMEM;
 
-       usb_dev = interface_to_usbdev(intf);
-
        /* get corresponding PCAN-USB adapter */
        for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
                if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
@@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf,
        if (!peak_usb_adapter) {
                /* should never come except device_id bad usage in this file */
                pr_err("%s: didn't find device id. 0x%x in devices list\n",
-                       PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct);
+                       PCAN_USB_DRIVER_NAME, usb_id_product);
                return -ENODEV;
        }
 
index facca33d53e9fd24b996dc6dfd38c925b8128fdb..a8cb33264ff1ef32e9f9d44955e07f768f1cc543 100644 (file)
@@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = {
 static void vcan_setup(struct net_device *dev)
 {
        dev->type               = ARPHRD_CAN;
-       dev->mtu                = CAN_MTU;
+       dev->mtu                = CANFD_MTU;
        dev->hard_header_len    = 0;
        dev->addr_len           = 0;
        dev->tx_queue_len       = 0;
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
                dev->flags |= IFF_ECHO;
 
        dev->netdev_ops         = &vcan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 static struct rtnl_link_ops vcan_link_ops __read_mostly = {
index 7fbb2479568160b3b57fedb187429618937d2248..cfe889e8f1723823d5a3b9db71e9ba438624a9a2 100644 (file)
@@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = {
 static void vxcan_setup(struct net_device *dev)
 {
        dev->type               = ARPHRD_CAN;
-       dev->mtu                = CAN_MTU;
+       dev->mtu                = CANFD_MTU;
        dev->hard_header_len    = 0;
        dev->addr_len           = 0;
        dev->tx_queue_len       = 0;
        dev->flags              = (IFF_NOARP|IFF_ECHO);
        dev->netdev_ops         = &vxcan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 /* forward declaration for rtnl_create_link() */
index 149244aac20aa765551b93ca25d78018b28f17f9..9905b52fe293c221b1ae9852d72016191ebbadda 100644 (file)
@@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev)
        struct dummy_priv *priv = netdev_priv(dev);
 
        kfree(priv->vfinfo);
-       free_netdev(dev);
 }
 
 static void dummy_setup(struct net_device *dev)
@@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev)
        /* Initialize the device structure. */
        dev->netdev_ops = &dummy_netdev_ops;
        dev->ethtool_ops = &dummy_ethtool_ops;
-       dev->destructor = dummy_free_netdev;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = dummy_free_netdev;
 
        /* Fill in device structure with ethernet-generic values. */
        dev->flags |= IFF_NOARP;
index 08d11cede9c972596ee683c5d255fe143b76b9b8..f5b237e0bd60e2f0e2e6fd5a95d78515285629b1 100644 (file)
@@ -61,6 +61,8 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
        tail_masked = admin_queue->sq.tail & queue_size_mask;
 
        /* In case of queue FULL */
-       cnt = admin_queue->sq.tail - admin_queue->sq.head;
+       cnt = atomic_read(&admin_queue->outstanding_cmds);
        if (cnt >= admin_queue->q_depth) {
-               pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
-                        admin_queue->sq.tail, admin_queue->sq.head,
-                        admin_queue->q_depth);
+               pr_debug("admin queue is full.\n");
                admin_queue->stats.out_of_space++;
                return ERR_PTR(-ENOSPC);
        }
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
                                                     struct ena_com_admin_queue *admin_queue)
 {
-       unsigned long flags;
-       u32 start_time;
+       unsigned long flags, timeout;
        int ret;
 
-       start_time = ((u32)jiffies_to_usecs(jiffies));
+       timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
+
+       while (1) {
+               spin_lock_irqsave(&admin_queue->q_lock, flags);
+               ena_com_handle_admin_completion(admin_queue);
+               spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+               if (comp_ctx->status != ENA_CMD_SUBMITTED)
+                       break;
 
-       while (comp_ctx->status == ENA_CMD_SUBMITTED) {
-               if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
-                   ADMIN_CMD_TIMEOUT_US) {
+               if (time_is_before_jiffies(timeout)) {
                        pr_err("Wait for completion (polling) timeout\n");
                        /* ENA didn't have any completion */
                        spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
                        goto err;
                }
 
-               spin_lock_irqsave(&admin_queue->q_lock, flags);
-               ena_com_handle_admin_completion(admin_queue);
-               spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-
                msleep(100);
        }
 
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 {
+       u32 mask_value = 0;
+
+       if (polling)
+               mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+       writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
        ena_dev->admin_queue.polling = polling;
 }
 
index 67b2338f8fb34100df983fc11727d4e661548b24..3ee55e2fd69465e12603890bce1b530be551a2d9 100644 (file)
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = {
        ENA_STAT_TX_ENTRY(tx_poll),
        ENA_STAT_TX_ENTRY(doorbells),
        ENA_STAT_TX_ENTRY(prepare_ctx_err),
-       ENA_STAT_TX_ENTRY(missing_tx_comp),
        ENA_STAT_TX_ENTRY(bad_req_id),
 };
 
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
        ENA_STAT_RX_ENTRY(dma_mapping_err),
        ENA_STAT_RX_ENTRY(bad_desc_num),
        ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+       ENA_STAT_RX_ENTRY(empty_rx_ring),
 };
 
 static const struct ena_stats ena_stats_ena_com_strings[] = {
index 7c1214d7885566ded4dfc05e85c2ee86b8d3c949..4f16ed38bcf3a267f84894177da1f89713a0eb3e 100644 (file)
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
                rxr->sgl_size = adapter->max_rx_sgl_size;
                rxr->smoothed_interval =
                        ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+               rxr->empty_rx_queue = 0;
        }
 }
 
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
        rx_ring->per_napi_bytes = 0;
 }
 
+static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
+                                       struct ena_ring *rx_ring)
+{
+       struct ena_eth_io_intr_reg intr_reg;
+
+       /* Update intr register: rx intr delay,
+        * tx intr delay and interrupt unmask
+        */
+       ena_com_update_intr_reg(&intr_reg,
+                               rx_ring->smoothed_interval,
+                               tx_ring->smoothed_interval,
+                               true);
+
+       /* It is a shared MSI-X.
+        * Tx and Rx CQ have pointer to it.
+        * So we use one of them to reach the intr reg
+        */
+       ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+}
+
 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
                                             struct ena_ring *rx_ring)
 {
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
 {
        struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
        struct ena_ring *tx_ring, *rx_ring;
-       struct ena_eth_io_intr_reg intr_reg;
 
        u32 tx_work_done;
        u32 rx_work_done;
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
                        if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
                                ena_adjust_intr_moderation(rx_ring, tx_ring);
 
-                       /* Update intr register: rx intr delay,
-                        * tx intr delay and interrupt unmask
-                        */
-                       ena_com_update_intr_reg(&intr_reg,
-                                               rx_ring->smoothed_interval,
-                                               tx_ring->smoothed_interval,
-                                               true);
-
-                       /* It is a shared MSI-X.
-                        * Tx and Rx CQ have pointer to it.
-                        * So we use one of them to reach the intr reg
-                        */
-                       ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+                       ena_unmask_interrupt(tx_ring, rx_ring);
                }
 
-
                ena_update_ring_numa_node(tx_ring, rx_ring);
 
                ret = rx_work_done;
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter)
 
        ena_napi_enable_all(adapter);
 
+       /* Enable completion queues interrupt */
+       for (i = 0; i < adapter->num_queues; i++)
+               ena_unmask_interrupt(&adapter->tx_ring[i],
+                                    &adapter->rx_ring[i]);
+
        /* schedule napi in case we had pending packets
         * from the last time we disable napi
         */
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
                          "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
                          qid, rc);
                ena_com_destroy_io_queue(ena_dev, ena_qid);
+               return rc;
        }
 
        ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
                          "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
                          qid, rc);
                ena_com_destroy_io_queue(ena_dev, ena_qid);
+               return rc;
        }
 
        ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_info->tx_descs = nb_hw_desc;
        tx_info->last_jiffies = jiffies;
+       tx_info->print_once = 0;
 
        tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
                tx_ring->ring_size);
@@ -2550,13 +2565,44 @@ err:
                "Reset attempt failed. Can not reset the device\n");
 }
 
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static int check_missing_comp_in_queue(struct ena_adapter *adapter,
+                                      struct ena_ring *tx_ring)
 {
        struct ena_tx_buffer *tx_buf;
        unsigned long last_jiffies;
+       u32 missed_tx = 0;
+       int i;
+
+       for (i = 0; i < tx_ring->ring_size; i++) {
+               tx_buf = &tx_ring->tx_buffer_info[i];
+               last_jiffies = tx_buf->last_jiffies;
+               if (unlikely(last_jiffies &&
+                            time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+                       if (!tx_buf->print_once)
+                               netif_notice(adapter, tx_err, adapter->netdev,
+                                            "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
+                                            tx_ring->qid, i);
+
+                       tx_buf->print_once = 1;
+                       missed_tx++;
+
+                       if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+                               netif_err(adapter, tx_err, adapter->netdev,
+                                         "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+                                         missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+                               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+                               return -EIO;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
        struct ena_ring *tx_ring;
-       int i, j, budget;
-       u32 missed_tx;
+       int i, budget, rc;
 
        /* Make sure the driver doesn't turn the device in other process */
        smp_rmb();
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
        for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
                tx_ring = &adapter->tx_ring[i];
 
-               for (j = 0; j < tx_ring->ring_size; j++) {
-                       tx_buf = &tx_ring->tx_buffer_info[j];
-                       last_jiffies = tx_buf->last_jiffies;
-                       if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
-                               netif_notice(adapter, tx_err, adapter->netdev,
-                                            "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
-                                            tx_ring->qid, j);
-
-                               u64_stats_update_begin(&tx_ring->syncp);
-                               missed_tx = tx_ring->tx_stats.missing_tx_comp++;
-                               u64_stats_update_end(&tx_ring->syncp);
-
-                               /* Clear last jiffies so the lost buffer won't
-                                * be counted twice.
-                                */
-                               tx_buf->last_jiffies = 0;
-
-                               if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
-                                       netif_err(adapter, tx_err, adapter->netdev,
-                                                 "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
-                                                 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
-                                       set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-                               }
-                       }
-               }
+               rc = check_missing_comp_in_queue(adapter, tx_ring);
+               if (unlikely(rc))
+                       return;
 
                budget--;
                if (!budget)
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
        adapter->last_monitored_tx_qid = i % adapter->num_queues;
 }
 
+/* trigger napi schedule after 2 consecutive detections */
+#define EMPTY_RX_REFILL 2
+/* For the rare case where the device runs out of Rx descriptors and the
+ * napi handler failed to refill new Rx descriptors (due to a lack of memory
+ * for example).
+ * This case will lead to a deadlock:
+ * The device won't send interrupts since all the new Rx packets will be dropped
+ * The napi handler won't allocate new Rx descriptors so the device will be
+ * able to send new packets.
+ *
+ * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
+ * It is recommended to have at least 512MB, with a minimum of 128MB for
+ * constrained environment).
+ *
+ * When such a situation is detected - Reschedule napi
+ */
+static void check_for_empty_rx_ring(struct ena_adapter *adapter)
+{
+       struct ena_ring *rx_ring;
+       int i, refill_required;
+
+       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+               return;
+
+       if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+               return;
+
+       for (i = 0; i < adapter->num_queues; i++) {
+               rx_ring = &adapter->rx_ring[i];
+
+               refill_required =
+                       ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+               if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
+                       rx_ring->empty_rx_queue++;
+
+                       if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
+                               u64_stats_update_begin(&rx_ring->syncp);
+                               rx_ring->rx_stats.empty_rx_ring++;
+                               u64_stats_update_end(&rx_ring->syncp);
+
+                               netif_err(adapter, drv, adapter->netdev,
+                                         "trigger refill for ring %d\n", i);
+
+                               napi_schedule(rx_ring->napi);
+                               rx_ring->empty_rx_queue = 0;
+                       }
+               } else {
+                       rx_ring->empty_rx_queue = 0;
+               }
+       }
+}
+
 /* Check for keep alive expiration */
 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
 {
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data)
 
        check_for_missing_tx_completions(adapter);
 
+       check_for_empty_rx_ring(adapter);
+
        if (debug_area)
                ena_dump_stats_to_buf(adapter, debug_area);
 
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
 {
        int release_bars;
 
+       if (ena_dev->mem_bar)
+               devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+
+       devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+
        release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
        pci_release_selected_regions(pdev, release_bars);
 }
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_free_ena_dev;
        }
 
-       ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
-                                  pci_resource_len(pdev, ENA_REG_BAR));
+       ena_dev->reg_bar = devm_ioremap(&pdev->dev,
+                                       pci_resource_start(pdev, ENA_REG_BAR),
+                                       pci_resource_len(pdev, ENA_REG_BAR));
        if (!ena_dev->reg_bar) {
                dev_err(&pdev->dev, "failed to remap regs bar\n");
                rc = -EFAULT;
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
 
        if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-               ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
-                                             pci_resource_len(pdev, ENA_MEM_BAR));
+               ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+                                                  pci_resource_start(pdev, ENA_MEM_BAR),
+                                                  pci_resource_len(pdev, ENA_MEM_BAR));
                if (!ena_dev->mem_bar) {
                        rc = -EFAULT;
                        goto err_device_destroy;
index 0e22bce6239d0e06c73a366e0d98a2348a9b7fa9..a4d3d5e2106885b093424dc2ae3856c002653d06 100644 (file)
@@ -45,7 +45,7 @@
 
 #define DRV_MODULE_VER_MAJOR   1
 #define DRV_MODULE_VER_MINOR   1
-#define DRV_MODULE_VER_SUBMINOR 2
+#define DRV_MODULE_VER_SUBMINOR 7
 
 #define DRV_MODULE_NAME                "ena"
 #ifndef DRV_MODULE_VERSION
@@ -146,7 +146,18 @@ struct ena_tx_buffer {
        u32 tx_descs;
        /* num of buffers used by this skb */
        u32 num_of_bufs;
-       /* Save the last jiffies to detect missing tx packets */
+
+       /* Used for detect missing tx packets to limit the number of prints */
+       u32 print_once;
+       /* Save the last jiffies to detect missing tx packets
+        *
+        * sets to non zero value on ena_start_xmit and set to zero on
+        * napi and timer_Service_routine.
+        *
+        * while this value is not protected by lock,
+        * a given packet is not expected to be handled by ena_start_xmit
+        * and by napi/timer_service at the same time.
+        */
        unsigned long last_jiffies;
        struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
 } ____cacheline_aligned;
@@ -170,7 +181,6 @@ struct ena_stats_tx {
        u64 napi_comp;
        u64 tx_poll;
        u64 doorbells;
-       u64 missing_tx_comp;
        u64 bad_req_id;
 };
 
@@ -184,6 +194,7 @@ struct ena_stats_rx {
        u64 dma_mapping_err;
        u64 bad_desc_num;
        u64 rx_copybreak_pkt;
+       u64 empty_rx_ring;
 };
 
 struct ena_ring {
@@ -231,6 +242,7 @@ struct ena_ring {
                struct ena_stats_tx tx_stats;
                struct ena_stats_rx rx_stats;
        };
+       int empty_rx_queue;
 } ____cacheline_aligned;
 
 struct ena_stats_dev {
index b8e3d88f08790a2e16ae3fc6b5edb8bedaaadf2f..a66aee51ab5b049c4e9f6367aebdd57cbbf5fc5a 100644 (file)
@@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
                             struct aq_hw_caps_s *aq_hw_caps,
                             u32 *regs_buff);
 
-int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
-                                struct ethtool_cmd *cmd);
-
 int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
                              unsigned int power_state);
 
index 5f49334dcad5a8c8602cc3aa2e8795b2d489bb43..f619c4cac51f0f3a8a6631091c8e022de087a719 100644 (file)
@@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                /* when transmitting in a vf, start bd must hold the ethertype
                 * for fw to enforce it
                 */
+               u16 vlan_tci = 0;
 #ifndef BNX2X_STOP_ON_ERROR
-               if (IS_VF(bp))
+               if (IS_VF(bp)) {
 #endif
-                       tx_start_bd->vlan_or_ethertype =
-                               cpu_to_le16(ntohs(eth->h_proto));
+                       /* Still need to consider inband vlan for enforced */
+                       if (__vlan_get_tag(skb, &vlan_tci)) {
+                               tx_start_bd->vlan_or_ethertype =
+                                       cpu_to_le16(ntohs(eth->h_proto));
+                       } else {
+                               tx_start_bd->bd_flags.as_bitfield |=
+                                       (X_ETH_INBAND_VLAN <<
+                                        ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+                               tx_start_bd->vlan_or_ethertype =
+                                       cpu_to_le16(vlan_tci);
+                       }
 #ifndef BNX2X_STOP_ON_ERROR
-               else
+               } else {
                        /* used by FW for packet accounting */
                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+               }
 #endif
        }
 
index bdfd53b46bc568286ac9debc70bb14563329040b..9ca994d0bab66eeec49c53275d5257350a5f4f8e 100644 (file)
@@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
        /* release VF resources */
        bnx2x_vf_free_resc(bp, vf);
 
+       vf->malicious = false;
+
        /* re-open the mailbox */
        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
        return;
@@ -1822,9 +1824,11 @@ get_vf:
                   vf->abs_vfid, qidx);
                bnx2x_vf_handle_rss_update_eqe(bp, vf);
        case EVENT_RING_OPCODE_VF_FLR:
-       case EVENT_RING_OPCODE_MALICIOUS_VF:
                /* Do nothing for now */
                return 0;
+       case EVENT_RING_OPCODE_MALICIOUS_VF:
+               vf->malicious = true;
+               return 0;
        }
 
        return 0;
@@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
                        continue;
                }
 
+               if (vf->malicious) {
+                       DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+                              "vf %d malicious so no stats for it\n",
+                              vf->abs_vfid);
+                       continue;
+               }
+
                DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
                       "add addresses for vf %d\n", vf->abs_vfid);
                for_each_vfq(vf, j) {
@@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
 {
        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
                       sizeof(struct bnx2x_vf_mbx_msg));
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+       BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
                       sizeof(union pf_vf_bulletin));
 }
 
index 888d0b6632e86f2f7ab7e2f9e605be87fa4c7061..53466f6cebabc4cd3089a0b45fdaa74e8bd9d905 100644 (file)
@@ -141,6 +141,7 @@ struct bnx2x_virtf {
 #define VF_RESET       3       /* VF FLR'd, pending cleanup */
 
        bool flr_clnup_stage;   /* true during flr cleanup */
+       bool malicious;         /* true if FW indicated so, until FLR */
 
        /* dma */
        dma_addr_t fw_stat_map;
index 77ed2f628f9ca23854ae8b062ff919ce6d2e3425..ea1bfcf1870afbc5806e61dd6416669216f38ce7 100644 (file)
@@ -4525,7 +4525,7 @@ static void dummy_setup(struct net_device *dev)
        /* Initialize the device structure. */
        dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
        dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 }
 
 static int config_mgmt_dev(struct pci_dev *pdev)
index 508923f39ccfe0cbc39ed23305af46860eef32c9..259e69a52ec52acde37849b9858c9953029679af 100644 (file)
@@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev)
 {
        struct emac_regs __iomem *p = dev->emacp;
        int n = 20;
+       bool __maybe_unused try_internal_clock = false;
 
        DBG(dev, "reset" NL);
 
@@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev)
        }
 
 #ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
        /*
         * PPC460EX/GT Embedded Processor Advanced User's Manual
         * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev)
         * of the EMAC. If none is present, select the internal clock
         * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
         * After a soft reset, select the external clock.
+        *
+        * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+        * ethernet cable is not attached. This causes the reset to timeout
+        * and the PHY detection code in emac_init_phy() is unable to
+        * communicate and detect the AR8035-A PHY. As a result, the emac
+        * driver bails out early and the user has no ethernet.
+        * In order to stay compatible with existing configurations, the
+        * driver will temporarily switch to the internal clock, after
+        * the first reset fails.
         */
        if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-               if (dev->phy_address == 0xffffffff &&
-                   dev->phy_map == 0xffffffff) {
+               if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+                                          dev->phy_map == 0xffffffff)) {
                        /* No PHY: select internal loop clock before reset */
                        dcri_clrset(SDR0, SDR0_ETH_CFG,
                                    0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev)
 
 #ifdef CONFIG_PPC_DCR_NATIVE
        if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-               if (dev->phy_address == 0xffffffff &&
-                   dev->phy_map == 0xffffffff) {
+               if (!n && !try_internal_clock) {
+                       /* first attempt has timed out. */
+                       n = 20;
+                       try_internal_clock = true;
+                       goto do_retry;
+               }
+
+               if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+                                          dev->phy_map == 0xffffffff)) {
                        /* No PHY: restore external clock source after reset */
                        dcri_clrset(SDR0, SDR0_ETH_CFG,
                                    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
@@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus)
        return emac_reset(dev);
 }
 
+static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
+                                   struct phy_device *phy_dev)
+{
+       phy_dev->autoneg = phy->autoneg;
+       phy_dev->speed = phy->speed;
+       phy_dev->duplex = phy->duplex;
+       phy_dev->advertising = phy->advertising;
+       return phy_start_aneg(phy_dev);
+}
+
 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
 {
        struct net_device *ndev = phy->dev;
        struct emac_instance *dev = netdev_priv(ndev);
 
-       dev->phy.autoneg = AUTONEG_ENABLE;
-       dev->phy.speed = SPEED_1000;
-       dev->phy.duplex = DUPLEX_FULL;
-       dev->phy.advertising = advertise;
        phy->autoneg = AUTONEG_ENABLE;
-       phy->speed = dev->phy.speed;
-       phy->duplex = dev->phy.duplex;
        phy->advertising = advertise;
-       return phy_start_aneg(dev->phy_dev);
+       return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
 }
 
 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
@@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
        struct net_device *ndev = phy->dev;
        struct emac_instance *dev = netdev_priv(ndev);
 
-       dev->phy.autoneg =  AUTONEG_DISABLE;
-       dev->phy.speed = speed;
-       dev->phy.duplex = fd;
        phy->autoneg = AUTONEG_DISABLE;
        phy->speed = speed;
        phy->duplex = fd;
-       return phy_start_aneg(dev->phy_dev);
+       return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
 }
 
 static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy)
 {
        struct net_device *ndev = phy->dev;
        struct emac_instance *dev = netdev_priv(ndev);
+       struct phy_device *phy_dev = dev->phy_dev;
        int res;
 
-       res = phy_read_status(dev->phy_dev);
+       res = phy_read_status(phy_dev);
        if (res)
                return res;
 
-       dev->phy.speed = phy->speed;
-       dev->phy.duplex = phy->duplex;
-       dev->phy.pause = phy->pause;
-       dev->phy.asym_pause = phy->asym_pause;
+       phy->speed = phy_dev->speed;
+       phy->duplex = phy_dev->duplex;
+       phy->pause = phy_dev->pause;
+       phy->asym_pause = phy_dev->asym_pause;
        return 0;
 }
 
@@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy)
        struct emac_instance *dev = netdev_priv(ndev);
 
        phy_start(dev->phy_dev);
-       dev->phy.autoneg = phy->autoneg;
-       dev->phy.speed = phy->speed;
-       dev->phy.duplex = phy->duplex;
-       dev->phy.advertising = phy->advertising;
-       dev->phy.pause = phy->pause;
-       dev->phy.asym_pause = phy->asym_pause;
-
        return phy_init_hw(dev->phy_dev);
 }
 
index a93757c255f77445e2245ee8b065d2c6ee31cf3f..c0fbeb387db46627bed908e8f4cb145ed0bcfe4b 100644 (file)
@@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
 }
 #endif
 
+static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       return -EOPNOTSUPP;
+}
+
 static const struct net_device_ops ibmvnic_netdev_ops = {
        .ndo_open               = ibmvnic_open,
        .ndo_stop               = ibmvnic_close,
@@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ibmvnic_netpoll_controller,
 #endif
+       .ndo_change_mtu         = ibmvnic_change_mtu,
 };
 
 /* ethtool functions */
index cdde3cc28fb59f16234fe5aaf501f569abb62dc4..44d9610f7a15d739c3b8b9312cd81bd9a8dc0ed0 100644 (file)
@@ -399,6 +399,7 @@ struct i40e_pf {
 #define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
 #define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
 #define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED         BIT_ULL(4)
 #define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
 #define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
 #define I40E_FLAG_IWARP_ENABLED                        BIT_ULL(10)
index 7a8eb486b9ea554421fc7f62929a973b9c6efad4..894c8e57ba004398c5a86981930504ace9027a29 100644 (file)
@@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
        I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
        I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
        I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
-       I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+       I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
        I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
 };
 
@@ -4092,7 +4092,7 @@ flags_complete:
 
        /* Only allow ATR evict on hardware that is capable of handling it */
        if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
-               pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
 
        if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
                u16 sw_flags = 0, valid_flags = 0;
index 150caf6ca2b4bb1da5e0ea63f37fd086c058efad..a7a4b28b4144c62c35982dba952f7e77a5809f2e 100644 (file)
@@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    (pf->hw.aq.api_min_ver > 4))) {
                /* Supported in FW API version higher than 1.4 */
                pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
-               pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-       } else {
-               pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
        }
 
+       /* Enable HW ATR eviction if possible */
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+               pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
+
        pf->eeprom_version = 0xDEAD;
        pf->lan_veb = I40E_NO_VEB;
        pf->lan_vsi = I40E_NO_VSI;
index cd894f4023b1b68cc4e202ff7064e63e2f8be031..77115c25d96fa88e8348bd0e891d03ca964b4d12 100644 (file)
@@ -2341,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Due to lack of space, no more new filters can be programmed */
        if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
                return;
-       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
                /* HW ATR eviction will take care of removing filters on FIN
                 * and RST packets.
                 */
@@ -2403,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
-       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
                dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
index 95c23fbaa2113214d9cd34ddfa176a7d1cd8d89e..0fb38ca78900312c59a6cd0f0373dc6cd87ecf84 100644 (file)
@@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
                                           VLAN_VID_MASK));
        }
 
+       spin_unlock_bh(&vsi->mac_filter_hash_lock);
        if (vlan_id || qos)
                ret = i40e_vsi_add_pvid(vsi, vlanprio);
        else
                i40e_vsi_remove_pvid(vsi);
+       spin_lock_bh(&vsi->mac_filter_hash_lock);
 
        if (vlan_id) {
                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
index 9b875d776b296d165c4ae3a994bd527a8dafb698..33c901622ed5b94aef2a0335aa4576962072d34c 100644 (file)
@@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
                                    dma_addr_t *dma_addr,
                                    phys_addr_t *phys_addr)
 {
-       int cpu = smp_processor_id();
+       int cpu = get_cpu();
 
        *dma_addr = mvpp2_percpu_read(priv, cpu,
                                      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
@@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
                if (sizeof(phys_addr_t) == 8)
                        *phys_addr |= (u64)phys_addr_highbits << 32;
        }
+
+       put_cpu();
 }
 
 /* Free all buffers from the pool */
@@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
        return bm;
 }
 
-/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
-{
-       return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
-}
-
 /* Release buffer to BM */
 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
                                     dma_addr_t buf_dma_addr,
                                     phys_addr_t buf_phys_addr)
 {
-       int cpu = smp_processor_id();
+       int cpu = get_cpu();
 
        if (port->priv->hw_version == MVPP22) {
                u32 val = 0;
@@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
                           MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
        mvpp2_percpu_write(port->priv, cpu,
                           MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+
+       put_cpu();
 }
 
 /* Refill BM pool */
-static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
                              dma_addr_t dma_addr,
                              phys_addr_t phys_addr)
 {
-       int pool = mvpp2_bm_cookie_pool_get(bm);
-
        mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
 }
 
@@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
 {
        u32 val;
 
-       return;
-
        /* Only GOP port 0 has an XLG MAC */
        if (port->gop_id == 0) {
                val = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
 }
 
-/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
-                                struct mvpp2_rx_desc *rx_desc)
-{
-       int cpu = smp_processor_id();
-       int pool;
-
-       pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
-               MVPP2_RXD_BM_POOL_ID_MASK) >>
-               MVPP2_RXD_BM_POOL_ID_OFFS;
-
-       return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
-              ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
-}
-
 /* Tx descriptors helper methods */
 
 /* Get pointer to next Tx descriptor to be processed (send) by HW */
@@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
                                   struct mvpp2_rx_queue *rxq)
 {
-       int cpu = smp_processor_id();
+       int cpu = get_cpu();
 
        if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
                rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
@@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
                           rxq->pkts_coal);
+
+       put_cpu();
 }
 
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
 
        /* Set Rx descriptors queue starting address - indirect access */
-       cpu = smp_processor_id();
+       cpu = get_cpu();
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
        if (port->priv->hw_version == MVPP21)
                rxq_dma = rxq->descs_dma;
@@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+       put_cpu();
 
        /* Set Offset */
        mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
 
        for (i = 0; i < rx_received; i++) {
                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
-               u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
+               u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+               int pool;
+
+               pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                       MVPP2_RXD_BM_POOL_ID_OFFS;
 
-               mvpp2_pool_refill(port, bm,
+               mvpp2_pool_refill(port, pool,
                                  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
                                  mvpp2_rxdesc_cookie_get(port, rx_desc));
        }
@@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
         * free descriptor number
         */
        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
-       cpu = smp_processor_id();
+       cpu = get_cpu();
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+       put_cpu();
 }
 
 /* Create and initialize a Tx queue */
@@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        txq->last_desc = txq->size - 1;
 
        /* Set Tx descriptors queue starting address - indirect access */
-       cpu = smp_processor_id();
+       cpu = get_cpu();
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
                           txq->descs_dma);
@@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
                           MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
                           MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+       put_cpu();
 
        /* WRR / EJP configuration - indirect access */
        tx_port_num = mvpp2_egress_port(port);
@@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
 
        /* Set Tx descriptors queue starting address and size */
-       cpu = smp_processor_id();
+       cpu = get_cpu();
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+       put_cpu();
 }
 
 /* Cleanup Tx ports */
@@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
        int delay, pending, cpu;
        u32 val;
 
-       cpu = smp_processor_id();
+       cpu = get_cpu();
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
        val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
        val |= MVPP2_TXQ_DRAIN_EN_MASK;
@@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
 
        val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+       put_cpu();
 
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
 
 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
 static int mvpp2_rx_refill(struct mvpp2_port *port,
-                          struct mvpp2_bm_pool *bm_pool, u32 bm)
+                          struct mvpp2_bm_pool *bm_pool, int pool)
 {
        dma_addr_t dma_addr;
        phys_addr_t phys_addr;
@@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
        if (!buf)
                return -ENOMEM;
 
-       mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+       mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
 
        return 0;
 }
@@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                unsigned int frag_size;
                dma_addr_t dma_addr;
                phys_addr_t phys_addr;
-               u32 bm, rx_status;
+               u32 rx_status;
                int pool, rx_bytes, err;
                void *data;
 
@@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
                data = (void *)phys_to_virt(phys_addr);
 
-               bm = mvpp2_bm_cookie_build(port, rx_desc);
-               pool = mvpp2_bm_cookie_pool_get(bm);
+               pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                       MVPP2_RXD_BM_POOL_ID_OFFS;
                bm_pool = &port->priv->bm_pools[pool];
 
                /* In case of an error, release the requested buffer pointer
@@ -5516,7 +5506,7 @@ err_drop_frame:
                        dev->stats.rx_errors++;
                        mvpp2_rx_error(port, rx_desc);
                        /* Return the buffer to the pool */
-                       mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+                       mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
                        continue;
                }
 
@@ -5531,7 +5521,7 @@ err_drop_frame:
                        goto err_drop_frame;
                }
 
-               err = mvpp2_rx_refill(port, bm_pool, bm);
+               err = mvpp2_rx_refill(port, bm_pool, pool);
                if (err) {
                        netdev_err(port->dev, "failed to refill BM pools\n");
                        goto err_drop_frame;
index 2fd044b238750fedc61384fda55bf25206d25b51..944fc17424642189d82b5540aaa3751e7ea2c9e2 100644 (file)
@@ -458,13 +458,15 @@ struct mlx5e_mpw_info {
 
 struct mlx5e_rx_am_stats {
        int ppms; /* packets per msec */
+       int bpms; /* bytes per msec */
        int epms; /* events per msec */
 };
 
 struct mlx5e_rx_am_sample {
-       ktime_t         time;
-       unsigned int    pkt_ctr;
-       u16             event_ctr;
+       ktime_t time;
+       u32     pkt_ctr;
+       u32     byte_ctr;
+       u16     event_ctr;
 };
 
 struct mlx5e_rx_am { /* Adaptive Moderation */
index 02dd3a95ed8f013d0d4795d5054bd79ae4ca1201..acf32fe952cdef6f994e2937433ac4b1b7962e84 100644 (file)
@@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
        mlx5e_am_step(am);
 }
 
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+       (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
 static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
                                  struct mlx5e_rx_am_stats *prev)
 {
-       int diff;
-
-       if (!prev->ppms)
-               return curr->ppms ? MLX5E_AM_STATS_BETTER :
+       if (!prev->bpms)
+               return curr->bpms ? MLX5E_AM_STATS_BETTER :
                                    MLX5E_AM_STATS_SAME;
 
-       diff = curr->ppms - prev->ppms;
-       if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
-               return (diff > 0) ? MLX5E_AM_STATS_BETTER :
-                                   MLX5E_AM_STATS_WORSE;
+       if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+               return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
+                                                  MLX5E_AM_STATS_WORSE;
 
-       if (!prev->epms)
-               return curr->epms ? MLX5E_AM_STATS_WORSE :
-                                   MLX5E_AM_STATS_SAME;
+       if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+               return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
+                                                  MLX5E_AM_STATS_WORSE;
 
-       diff = curr->epms - prev->epms;
-       if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
-               return (diff < 0) ? MLX5E_AM_STATS_BETTER :
-                                   MLX5E_AM_STATS_WORSE;
+       if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+               return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
+                                                  MLX5E_AM_STATS_WORSE;
 
        return MLX5E_AM_STATS_SAME;
 }
@@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
 {
        s->time      = ktime_get();
        s->pkt_ctr   = rq->stats.packets;
+       s->byte_ctr  = rq->stats.bytes;
        s->event_ctr = rq->cq.event_ctr;
 }
 
 #define MLX5E_AM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
 
 static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
                                struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
 {
        /* u32 holds up to 71 minutes, should be enough */
        u32 delta_us = ktime_us_delta(end->time, start->time);
-       unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+       u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+       u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+                            start->byte_ctr);
 
        if (!delta_us)
                return;
 
-       curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
-       curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+       curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+       curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+       curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
+                                       delta_us);
 }
 
 void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
 
        switch (am->state) {
        case MLX5E_AM_MEASURE_IN_PROGRESS:
-               nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+               nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
+                                 am->start_sample.event_ctr);
                if (nevents < MLX5E_AM_NEVENTS)
                        break;
                mlx5e_am_sample(rq, &end_sample);
index 53e4992d6511f578c5bb31e993afb0986789a979..f81c3aa60b469bef9223fd4313b9d136803ff2b1 100644 (file)
@@ -417,20 +417,13 @@ struct mlx5e_stats {
 };
 
 static const struct counter_desc mlx5e_pme_status_desc[] = {
-       { "module_plug", 0 },
        { "module_unplug", 8 },
 };
 
 static const struct counter_desc mlx5e_pme_error_desc[] = {
-       { "module_pwr_budget_exd", 0 },  /* power budget exceed */
-       { "module_long_range", 8 },      /* long range for non MLNX cable */
-       { "module_bus_stuck", 16 },      /* bus stuck (I2C or data shorted) */
-       { "module_no_eeprom", 24 },      /* no eeprom/retry time out */
-       { "module_enforce_part", 32 },   /* enforce part number list */
-       { "module_unknown_id", 40 },     /* unknown identifier */
-       { "module_high_temp", 48 },      /* high temperature */
+       { "module_bus_stuck", 16 },       /* bus stuck (I2C or data shorted) */
+       { "module_high_temp", 48 },       /* high temperature */
        { "module_bad_shorted", 56 },    /* bad or shorted cable/module */
-       { "module_unknown_status", 64 },
 };
 
 #endif /* __MLX5_EN_STATS_H__ */
index 0e487e8ca634bce0108979823e755918fe27c3fb..8f5125ccd8d4f430a2c0e5d60273e7002ca527bd 100644 (file)
@@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
        ft_attr.level   = level;
        ft_attr.prio    = prio;
 
-       return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
+       return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
 }
 
 struct mlx5_flow_table*
index 44f59b1d6f0f27f7bb4f818b11f341af28ba09dc..f27f84ffbc850487557ad0184960d7c872abb160 100644 (file)
@@ -275,10 +275,8 @@ static void poll_health(unsigned long data)
        struct mlx5_core_health *health = &dev->priv.health;
        u32 count;
 
-       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               mod_timer(&health->timer, get_next_poll_jiffies());
-               return;
-       }
+       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               goto out;
 
        count = ioread32be(health->health_counter);
        if (count == health->prev)
@@ -290,8 +288,6 @@ static void poll_health(unsigned long data)
        if (health->miss_counter == MAX_MISSES) {
                dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
                print_health_info(dev);
-       } else {
-               mod_timer(&health->timer, get_next_poll_jiffies());
        }
 
        if (in_fatal(dev) && !health->sick) {
@@ -305,6 +301,9 @@ static void poll_health(unsigned long data)
                                "new health works are not permitted at this stage\n");
                spin_unlock(&health->wq_lock);
        }
+
+out:
+       mod_timer(&health->timer, get_next_poll_jiffies());
 }
 
 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
index af945edfee1905dbe676218cb53123535a37171f..4f577a5abf884645910203aace25ad9605e171d8 100644 (file)
@@ -537,8 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
-       /* If the HCA supports 4K UARs use it */
-       if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+       /* Enable 4K UAR only when HCA supports it and page size is bigger
+        * than 4K.
+        */
+       if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
                MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
 
        MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
index 483241b4b05db2add64ff928ccc9419fe733355a..a672f6a860dc4e7a49b2175283d2720c610d4453 100644 (file)
@@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
                                qed_wr(p_hwfn,
                                       p_ptt,
                                       s_storm_defs[storm_id].cm_ctx_wr_addr,
-                                      BIT(9) | lid);
+                                      (i << 9) | lid);
                                *(dump_buf + offset) = qed_rd(p_hwfn,
                                                              p_ptt,
                                                              rd_reg_addr);
index aa6476439aee7f4c65784af4a451a8ffe5173561..e0ef02f9503bae027268b1b058f9875b6c1365a7 100644 (file)
@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
 {
        /* Context type from W/B descriptor must be zero */
        if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
-               return -EINVAL;
+               return 0;
 
        /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
        if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
-               return 0;
+               return 1;
 
-       return 1;
+       return 0;
 }
 
 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
                }
        }
 exit:
-       return ret;
+       if (likely(ret == 0))
+               return 1;
+
+       return 0;
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
index 12236daf7bb6d5358fdafe50e37227e19b95bc33..d16d11bfc046467c41edab070bd3015bd932d338 100644 (file)
@@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
                return;
 
        /* check tx tstamp status */
-       if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+       if (priv->hw->desc->get_tx_timestamp_status(p)) {
                /* get the valid tstamp */
                ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 
-               netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+               netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
                /* pass tstamp to stack */
                skb_tstamp_tx(skb, &shhwtstamp);
        }
@@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                return;
 
        /* Check if timestamp is available */
-       if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+       if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
                /* For GMAC4, the valid timestamp is from CTX next desc. */
                if (priv->plat->has_gmac4)
                        ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
                else
                        ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
-               netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+               netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamp->hwtstamp = ns_to_ktime(ns);
        } else  {
-               netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+               netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
        }
 }
 
@@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                        /* PTP v1, UDP, any kind of event packet */
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
                        /* take time stamp for all event messages */
-                       snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       if (priv->plat->has_gmac4)
+                               snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+                       else
+                               snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        /* take time stamp for all event messages */
-                       snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       if (priv->plat->has_gmac4)
+                               snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+                       else
+                               snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        /* take time stamp for all event messages */
-                       snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       if (priv->plat->has_gmac4)
+                               snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+                       else
+                               snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
index 48fb72fc423c9f7c29aa713376867c90c32932af..f4b31d69f60eb7291191a9ae714ef4377db508bb 100644 (file)
@@ -59,7 +59,8 @@
 /* Enable Snapshot for Messages Relevant to Master */
 #define        PTP_TCR_TSMSTRENA       BIT(15)
 /* Select PTP packets for Taking Snapshots */
-#define        PTP_TCR_SNAPTYPSEL_1    GENMASK(17, 16)
+#define        PTP_TCR_SNAPTYPSEL_1    BIT(16)
+#define        PTP_GMAC4_TCR_SNAPTYPSEL_1      GENMASK(17, 16)
 /* Enable MAC address for PTP Frame Filtering */
 #define        PTP_TCR_TSENMACADDR     BIT(18)
 
index 6ebb0f559a427fdb4d27d9b668b46d7151650043..199459bd69612478e596dc97248d2a40defd6db8 100644 (file)
@@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev)
 
        dev->netdev_ops = &geneve_netdev_ops;
        dev->ethtool_ops = &geneve_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 
        SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
index 7b652bb7ebe407b35c054009b521b173fd9fa361..ca110cd2a4e42cdebed760bdad103627b556e2f7 100644 (file)
@@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = {
 static void gtp_link_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &gtp_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 
        dev->hard_header_len = 0;
        dev->addr_len = 0;
index 922bf440e9f1cb2d20ba2d2419f73972b09714c3..021a8ec411ab8316d6080592e2469e37c912bc31 100644 (file)
@@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev)
 {
        /* Finish setting up the DEVICE info. */
        dev->netdev_ops         = &sp_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->mtu                = SIXP_MTU;
        dev->hard_header_len    = AX25_MAX_HEADER_LEN;
        dev->header_ops         = &ax25_header_ops;
index f62e7f325cf92edeca25c40a1d11e53166017049..78a6414c5fd994445c7e530065bd6cfa7e5ad213 100644 (file)
@@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = {
 static void bpq_setup(struct net_device *dev)
 {
        dev->netdev_ops      = &bpq_netdev_ops;
-       dev->destructor      = free_netdev;
+       dev->needs_free_netdev = true;
 
        memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
        memcpy(dev->dev_addr,  &ax25_defaddr, AX25_ADDR_LEN);
index 262b2ea576a38e4bb7d1c442f2dfcbc2c40fa302..6066f1bcaf2d55e7be1708f1a96e592849611169 100644 (file)
@@ -171,6 +171,8 @@ struct rndis_device {
        spinlock_t request_lock;
        struct list_head req_list;
 
+       struct work_struct mcast_work;
+
        u8 hw_mac_adr[ETH_ALEN];
        u8 rss_key[NETVSC_HASH_KEYLEN];
        u16 ind_table[ITAB_NUM];
@@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 int rndis_filter_device_add(struct hv_device *dev,
                            struct netvsc_device_info *info);
+void rndis_filter_update(struct netvsc_device *nvdev);
 void rndis_filter_device_remove(struct hv_device *dev,
                                struct netvsc_device *nvdev);
 int rndis_filter_set_rss_param(struct rndis_device *rdev,
@@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev,
                         struct vmbus_channel *channel,
                         void *data, u32 buflen);
 
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
 
 void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -696,7 +698,6 @@ struct net_device_context {
        /* list protection */
        spinlock_t lock;
 
-       struct work_struct work;
        u32 msg_enable; /* debug level */
 
        u32 tx_checksum_mask;
index 4421a6d0037579bbff80ee909d42510947f77690..82d6c022ca859735c412eadad487556eb34b6f33 100644 (file)
@@ -56,37 +56,12 @@ static int debug = -1;
 module_param(debug, int, S_IRUGO);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
-static void do_set_multicast(struct work_struct *w)
-{
-       struct net_device_context *ndevctx =
-               container_of(w, struct net_device_context, work);
-       struct hv_device *device_obj = ndevctx->device_ctx;
-       struct net_device *ndev = hv_get_drvdata(device_obj);
-       struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
-       struct rndis_device *rdev;
-
-       if (!nvdev)
-               return;
-
-       rdev = nvdev->extension;
-       if (rdev == NULL)
-               return;
-
-       if (ndev->flags & IFF_PROMISC)
-               rndis_filter_set_packet_filter(rdev,
-                       NDIS_PACKET_TYPE_PROMISCUOUS);
-       else
-               rndis_filter_set_packet_filter(rdev,
-                       NDIS_PACKET_TYPE_BROADCAST |
-                       NDIS_PACKET_TYPE_ALL_MULTICAST |
-                       NDIS_PACKET_TYPE_DIRECTED);
-}
-
 static void netvsc_set_multicast_list(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 
-       schedule_work(&net_device_ctx->work);
+       rndis_filter_update(nvdev);
 }
 
 static int netvsc_open(struct net_device *net)
@@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net)
 
        netif_tx_disable(net);
 
-       /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
-       cancel_work_sync(&net_device_ctx->work);
        ret = rndis_filter_close(nvdev);
        if (ret != 0) {
                netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -1028,7 +1001,7 @@ static const struct {
 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+       struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
 
        if (!nvdev)
                return -ENODEV;
@@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void netvsc_poll_controller(struct net_device *net)
+static void netvsc_poll_controller(struct net_device *dev)
 {
-       /* As netvsc_start_xmit() works synchronous we don't have to
-        * trigger anything here.
-        */
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *ndev;
+       int i;
+
+       rcu_read_lock();
+       ndev = rcu_dereference(ndc->nvdev);
+       if (ndev) {
+               for (i = 0; i < ndev->num_chn; i++) {
+                       struct netvsc_channel *nvchan = &ndev->chan_table[i];
+
+                       napi_schedule(&nvchan->napi);
+               }
+       }
+       rcu_read_unlock();
 }
 #endif
 
@@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev,
        hv_set_drvdata(dev, net);
 
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
-       INIT_WORK(&net_device_ctx->work, do_set_multicast);
 
        spin_lock_init(&net_device_ctx->lock);
        INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev)
        netif_device_detach(net);
 
        cancel_delayed_work_sync(&ndev_ctx->dwork);
-       cancel_work_sync(&ndev_ctx->work);
 
        /*
         * Call to the vsc driver to let it know that the device is being
index f9d5b0b8209a7ffffa07984ba6d942ddb011610b..cb79cd081f427d6a22c2d48427779593965e793c 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "hyperv_net.h"
 
+static void rndis_set_multicast(struct work_struct *w);
 
 #define RNDIS_EXT_LEN PAGE_SIZE
 struct rndis_request {
@@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void)
        spin_lock_init(&device->request_lock);
 
        INIT_LIST_HEAD(&device->req_list);
+       INIT_WORK(&device->mcast_work, rndis_set_multicast);
 
        device->state = RNDIS_DEV_UNINITIALIZED;
 
@@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev)
        return ret;
 }
 
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+                                         u32 new_filter)
 {
        struct rndis_request *request;
        struct rndis_set_request *set;
@@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        return ret;
 }
 
+static void rndis_set_multicast(struct work_struct *w)
+{
+       struct rndis_device *rdev
+               = container_of(w, struct rndis_device, mcast_work);
+
+       if (rdev->ndev->flags & IFF_PROMISC)
+               rndis_filter_set_packet_filter(rdev,
+                                              NDIS_PACKET_TYPE_PROMISCUOUS);
+       else
+               rndis_filter_set_packet_filter(rdev,
+                                              NDIS_PACKET_TYPE_BROADCAST |
+                                              NDIS_PACKET_TYPE_ALL_MULTICAST |
+                                              NDIS_PACKET_TYPE_DIRECTED);
+}
+
+void rndis_filter_update(struct netvsc_device *nvdev)
+{
+       struct rndis_device *rdev = nvdev->extension;
+
+       schedule_work(&rdev->mcast_work);
+}
+
 static int rndis_filter_init_device(struct rndis_device *dev)
 {
        struct rndis_request *request;
@@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
        if (dev->state != RNDIS_DEV_DATAINITIALIZED)
                return 0;
 
+       /* Make sure rndis_set_multicast doesn't re-enable filter! */
+       cancel_work_sync(&dev->mcast_work);
+
        ret = rndis_filter_set_packet_filter(dev, 0);
        if (ret == -ENODEV)
                ret = 0;
index 312fce7302d3252903282599223063e7d97bb863..144ea5ae8ab4abda588cb6b23292c9f3ccf06c9c 100644 (file)
@@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev)
                __skb_queue_purge(&txp->tq);
        }
        kfree(dp->tx_private);
-       free_netdev(dev);
 }
 
 static void ifb_setup(struct net_device *dev)
@@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev)
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netif_keep_dst(dev);
        eth_hw_addr_random(dev);
-       dev->destructor = ifb_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
index 618ed88fad0fc1d4e227f0e84fde74462b2bc496..7c7680c8f0e32e149167f7943dc02bdc9646da23 100644 (file)
@@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev)
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        dev->header_ops = &ipvlan_header_ops;
        dev->ethtool_ops = &ipvlan_ethtool_ops;
 }
index 224f65cb576bbf106a4779ef5f60b75f34903b1c..30612497643c08caa8a3bf352b13f784f729f725 100644 (file)
@@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev)
 {
        dev_net(dev)->loopback_dev = NULL;
        free_percpu(dev->lstats);
-       free_netdev(dev);
 }
 
 static const struct net_device_ops loopback_ops = {
@@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev)
        dev->ethtool_ops        = &loopback_ethtool_ops;
        dev->header_ops         = &eth_header_ops;
        dev->netdev_ops         = &loopback_ops;
-       dev->destructor         = loopback_dev_free;
+       dev->needs_free_netdev  = true;
+       dev->priv_destructor    = loopback_dev_free;
 }
 
 /* Setup and register the loopback device. */
index cdc347be68f23196d6ba820d21475260bc5bae3b..79411675f0e66376ecf2fa640b51c7172d8085d9 100644 (file)
@@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev)
        free_percpu(macsec->secy.tx_sc.stats);
 
        dev_put(real_dev);
-       free_netdev(dev);
 }
 
 static void macsec_setup(struct net_device *dev)
@@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev)
        dev->max_mtu = ETH_MAX_MTU;
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->netdev_ops = &macsec_netdev_ops;
-       dev->destructor = macsec_free_netdev;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = macsec_free_netdev;
        SET_NETDEV_DEVTYPE(dev, &macsec_type);
 
        eth_zero_addr(dev->broadcast);
index 346ad2ff39989d3da4cacc6ec1965ca882c475cd..67bf7ebae5c6dba9b9ea28c0c9c0801093f55eb7 100644 (file)
@@ -1092,7 +1092,7 @@ void macvlan_common_setup(struct net_device *dev)
        netif_keep_dst(dev);
        dev->priv_flags        |= IFF_UNICAST_FLT;
        dev->netdev_ops         = &macvlan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->header_ops         = &macvlan_hard_header_ops;
        dev->ethtool_ops        = &macvlan_ethtool_ops;
 }
index 06ee6395117f9df71a07d22326bc6c15c1e87f2e..0e27920c2b6b6219e70871ad13031bf56711907e 100644 (file)
@@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item,
                if (err)
                        goto out_unlock;
 
-               pr_info("netconsole: network logging started\n");
+               pr_info("network logging started\n");
        } else {        /* false */
                /* We need to disable the netconsole before cleaning it up
                 * otherwise we might end up in write_msg() with
index b91603835d2680aa08911da7870ea176c5dd0792..c4b3362da4a2e33184b02dff0df3082b4d7e45e4 100644 (file)
@@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev)
 
        dev->netdev_ops = &nlmon_ops;
        dev->ethtool_ops = &nlmon_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 
        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        NETIF_F_HIGHDMA | NETIF_F_LLTX;
index c360dd6ead2213b112282ff508b963354cf15d72..3ab6c58d4be6fd42812bfd783715c13ef42e13c4 100644 (file)
@@ -127,6 +127,7 @@ config MDIO_THUNDER
        tristate "ThunderX SOCs MDIO buses"
        depends on 64BIT
        depends on PCI
+       depends on !(MDIO_DEVICE=y && PHYLIB=m)
        select MDIO_CAVIUM
        help
          This driver supports the MDIO interfaces found on Cavium
index 7524caa0f29d9806e11826c7ecfd57842bff1822..eebb0e1c70ff51a40189fd9690217c34ce9bb4cb 100644 (file)
@@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed)
                return "5Gbps";
        case SPEED_10000:
                return "10Gbps";
+       case SPEED_14000:
+               return "14Gbps";
        case SPEED_20000:
                return "20Gbps";
        case SPEED_25000:
index 1da31dc47f863845d50823b69e51f2abab5bf3f0..74b907206aa749d894531005716a0c6dd3d0ec1f 100644 (file)
@@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev)
 static void sl_free_netdev(struct net_device *dev)
 {
        int i = dev->base_addr;
-       free_netdev(dev);
+
        slip_devs[i] = NULL;
 }
 
@@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = {
 static void sl_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &sl_netdev_ops;
-       dev->destructor         = sl_free_netdev;
+       dev->needs_free_netdev  = true;
+       dev->priv_destructor    = sl_free_netdev;
 
        dev->hard_header_len    = 0;
        dev->addr_len           = 0;
@@ -1369,8 +1370,6 @@ static void __exit slip_exit(void)
                if (sl->tty) {
                        printk(KERN_ERR "%s: tty discipline still running\n",
                               dev->name);
-                       /* Intentionally leak the control block. */
-                       dev->destructor = NULL;
                }
 
                unregister_netdev(dev);
index 6c5d5ef46f75aa9a9089ac80bbee30a7f579b016..fba8c136aa7c1513b288e641cfe8cd6dca304ccd 100644 (file)
@@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev)
        struct team *team = netdev_priv(dev);
 
        free_percpu(team->pcpu_stats);
-       free_netdev(dev);
 }
 
 static int team_open(struct net_device *dev)
@@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev)
 
        dev->netdev_ops = &team_netdev_ops;
        dev->ethtool_ops = &team_ethtool_ops;
-       dev->destructor = team_destructor;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = team_destructor;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->priv_flags |= IFF_TEAM;
index bbd707b9ef7a6a305804ed0d56c3fc0e1db7d565..9ee7d4275640919f2293182cb47dca41412975ba 100644 (file)
@@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev)
        free_percpu(tun->pcpu_stats);
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
-       free_netdev(dev);
 }
 
 static void tun_setup(struct net_device *dev)
@@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev)
        tun->group = INVALID_GID;
 
        dev->ethtool_ops = &tun_ethtool_ops;
-       dev->destructor = tun_free_netdev;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = tun_free_netdev;
        /* We prefer our own queue length */
        dev->tx_queue_len = TUN_READQ_SIZE;
 }
index eb52de8205f0d48044bcb6c1302ce7bda779128e..c7a350bbaaa7c881a831ec8ed7b4b59dd83c6de9 100644 (file)
@@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev)
        dev->addr_len           = 1;
        dev->tx_queue_len       = 3;
 
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 /*
index 8f923a147fa93117296312c59fdc6761fef50f3c..32a22f4e8356422ba144a3e3485810537afaad2c 100644 (file)
@@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev)
        dev->addr_len        = 0;
        dev->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
        dev->netdev_ops      = &qmimux_netdev_ops;
-       dev->destructor      = free_netdev;
+       dev->needs_free_netdev = true;
 }
 
 static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
@@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9063, 8)},    /* Sierra Wireless EM7305 */
+       {QMI_FIXED_INTF(0x1199, 0x9063, 10)},   /* Sierra Wireless EM7305 */
        {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx */
        {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx */
        {QMI_FIXED_INTF(0x1199, 0x9079, 8)},    /* Sierra Wireless EM74xx */
@@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+       {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
+       {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},    /* Telewell TW-3G HSPA+ */
        {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
        {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},    /* Olivetti Olicard 100 */
        {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},    /* Olivetti Olicard 120 */
index ddc62cb69be828a730e6ed32ecc9ee951fef8d3b..1a419a45e2a2e77e07e1746c9035230da465424d 100644 (file)
@@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
                break;
        }
 
+       dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
+
        return version;
 }
 
index 38f0f03a29c8898131110a620e73da4776cb7020..0156fe8cac172a909cfe4ed5b9567572132b888d 100644 (file)
@@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev)
 static void veth_dev_free(struct net_device *dev)
 {
        free_percpu(dev->vstats);
-       free_netdev(dev);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev)
                               NETIF_F_HW_VLAN_STAG_TX |
                               NETIF_F_HW_VLAN_CTAG_RX |
                               NETIF_F_HW_VLAN_STAG_RX);
-       dev->destructor = veth_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = veth_dev_free;
        dev->max_mtu = ETH_MAX_MTU;
 
        dev->hw_features = VETH_FEATURES;
index db882493875cd97d30ac5a2b26a764a5b65d9e2e..022c0b5f9844242bc08b766eabf208fb41d9e03d 100644 (file)
 #include <net/addrconf.h>
 #include <net/l3mdev.h>
 #include <net/fib_rules.h>
+#include <net/netns/generic.h>
 
 #define DRV_NAME       "vrf"
 #define DRV_VERSION    "1.0"
 
 #define FIB_RULE_PREF  1000       /* default preference for FIB rules */
-static bool add_fib_rules = true;
+
+static unsigned int vrf_net_id;
 
 struct net_vrf {
        struct rtable __rcu     *rth;
@@ -1348,7 +1350,7 @@ static void vrf_setup(struct net_device *dev)
        dev->netdev_ops = &vrf_netdev_ops;
        dev->l3mdev_ops = &vrf_l3mdev_ops;
        dev->ethtool_ops = &vrf_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 
        /* Fill in device structure with ethernet-generic values. */
        eth_hw_addr_random(dev);
@@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[])
 {
        struct net_vrf *vrf = netdev_priv(dev);
+       bool *add_fib_rules;
+       struct net *net;
        int err;
 
        if (!data || !data[IFLA_VRF_TABLE])
@@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
        if (err)
                goto out;
 
-       if (add_fib_rules) {
+       net = dev_net(dev);
+       add_fib_rules = net_generic(net, vrf_net_id);
+       if (*add_fib_rules) {
                err = vrf_add_fib_rules(dev);
                if (err) {
                        unregister_netdevice(dev);
                        goto out;
                }
-               add_fib_rules = false;
+               *add_fib_rules = false;
        }
 
 out:
@@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
        .notifier_call = vrf_device_event,
 };
 
+/* Initialize per network namespace state */
+static int __net_init vrf_netns_init(struct net *net)
+{
+       bool *add_fib_rules = net_generic(net, vrf_net_id);
+
+       *add_fib_rules = true;
+
+       return 0;
+}
+
+static struct pernet_operations vrf_net_ops __net_initdata = {
+       .init = vrf_netns_init,
+       .id   = &vrf_net_id,
+       .size = sizeof(bool),
+};
+
 static int __init vrf_init_module(void)
 {
        int rc;
 
        register_netdevice_notifier(&vrf_notifier_block);
 
-       rc = rtnl_link_register(&vrf_link_ops);
+       rc = register_pernet_subsys(&vrf_net_ops);
        if (rc < 0)
                goto error;
 
+       rc = rtnl_link_register(&vrf_link_ops);
+       if (rc < 0) {
+               unregister_pernet_subsys(&vrf_net_ops);
+               goto error;
+       }
+
        return 0;
 
 error:
index 7f0136f2dd9d6167acc9b125fb03d8c2c5f9524d..c28bdce14fd5e32b287419227d7d33bf7835c409 100644 (file)
@@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev)
 
        dev->netdev_ops = &vsockmon_ops;
        dev->ethtool_ops = &vsockmon_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 
        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        NETIF_F_HIGHDMA | NETIF_F_LLTX;
index a6b5052c1d36bb99260dd4232842fa9e8df2621c..5fa798a5c9a695ac3796d2ee4a3c1eae59a1cf9f 100644 (file)
@@ -2611,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev)
        eth_hw_addr_random(dev);
        ether_setup(dev);
 
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
        dev->features   |= NETIF_F_LLTX;
index 65ee2a6f248cfcbd2761272431c37627eafaced5..a0d76f70c4289d03e7b8c25700c58b218a934bd0 100644 (file)
@@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev)
        dev->flags              = 0;
        dev->header_ops         = &dlci_header_ops;
        dev->netdev_ops         = &dlci_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 
        dlp->receive            = dlci_receive;
 
index eb915281197efc98a0fae96cc9dcdd2691a21ee3..78596e42a3f3f27623284c86a0d791a2bf4a65b6 100644 (file)
@@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
                return -EIO;
        }
 
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        *get_dev_p(pvc, type) = dev;
        if (!used) {
                state(hdlc)->dce_changed = 1;
index 9df9ed62beff0cb3b2c0be7dbe1f15e646da74de..63f749078a1f1051f3f46bec13f1f60c7c64ea5f 100644 (file)
@@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
 static void lapbeth_setup(struct net_device *dev)
 {
        dev->netdev_ops      = &lapbeth_netdev_ops;
-       dev->destructor      = free_netdev;
+       dev->needs_free_netdev = true;
        dev->type            = ARPHRD_X25;
        dev->hard_header_len = 3;
        dev->mtu             = 1000;
index 91ee542de3d79ec0903d56f2d45463af83b01a95..b90c77ef792ef8908173aa2f762d15089dce55fd 100644 (file)
@@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev)
        struct ath6kl *ar = ath6kl_priv(dev);
 
        dev->netdev_ops = &ath6kl_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
 
        dev->needed_headroom = ETH_HLEN;
index cd1d6730eab73d514db5a1b88d442607c7834e3d..617199c0e5a0e644576b3c73be0808878d4c102b 100644 (file)
@@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
 
        if (vif)
                brcmf_free_vif(vif);
-       free_netdev(ndev);
 }
 
 static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
index a3d82368f1a9f9722e62920e44ef52f7ee8602ef..511d190c6cca1e864e50c7b1b6ae9caa45307e00 100644 (file)
@@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
                if (!ndev)
                        return ERR_PTR(-ENOMEM);
 
-               ndev->destructor = brcmf_cfg80211_free_netdev;
+               ndev->needs_free_netdev = true;
+               ndev->priv_destructor = brcmf_cfg80211_free_netdev;
                ifp = netdev_priv(ndev);
                ifp->ndev = ndev;
                /* store mapping ifidx to bsscfgidx */
index 544fc09dcb62435dc7316b2d8463c76d386ed91e..1372b20f931e0cca990a158e0ef8fdfc71fd7f88 100644 (file)
@@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
        dev->mem_end = mdev->mem_end;
 
        hostap_setup_dev(dev, local, type);
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
 
        sprintf(dev->name, "%s%s", prefix, name);
        if (!rtnl_locked)
index 002b25cff5b65e18460a2da08637886274bfdfa5..c854a557998b4266c5d63c55c0463497e8dde715 100644 (file)
@@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = {
 static void hwsim_mon_setup(struct net_device *dev)
 {
        dev->netdev_ops = &hwsim_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        ether_setup(dev);
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
index dd87b9ff64c371911a74308be55e93e3a46cdd43..39b6b5e3f6e0e4e9ec6eb81e458250485e94c487 100644 (file)
@@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
                              struct net_device *dev)
 {
        dev->netdev_ops = &mwifiex_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        /* Initialize private structure */
        priv->current_key_index = 0;
        priv->media_connected = false;
index 74cf5fffb1e1b93fdb1a6d44002d7727544195e7..c80e37a69305c18f2879957c92fa3308bb45b175 100644 (file)
@@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
 {
        if (pci_dev_is_disconnected(dev)) {
                *val = ~0;
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        }
        return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
 }
@@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
 {
        if (pci_dev_is_disconnected(dev)) {
                *val = ~0;
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        }
        return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
 }
@@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
 {
        if (pci_dev_is_disconnected(dev)) {
                *val = ~0;
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        }
        return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
 }
@@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword);
 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
 {
        if (pci_dev_is_disconnected(dev))
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte);
 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
 {
        if (pci_dev_is_disconnected(dev))
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
                                         u32 val)
 {
        if (pci_dev_is_disconnected(dev))
-               return -ENODEV;
+               return PCIBIOS_DEVICE_NOT_FOUND;
        return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_dword);
index 175edad42d2f8fe100b762bbbe39bc042dceabe4..2942066607e0e9a6393139bfab65b5c13f6b09c5 100644 (file)
@@ -5,6 +5,7 @@
 config PCI_EPF_TEST
        tristate "PCI Endpoint Test driver"
        depends on PCI_ENDPOINT
+       select CRC32
        help
           Enable this configuration option to enable the test driver
           for PCI Endpoint.
index ef29f18b195164745bb0a67d9a0852e5b3a676bc..4cc2f4ea0a25043abbfd205465cd3cedeefffa2b 100644 (file)
        } \
 }
 
-#ifdef CONFIG_PM_SLEEP
 static u8 suspend_prep_ok;
 static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
 static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
-#endif
 
 struct telemetry_susp_stats {
        u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = {
        .release        = single_release,
 };
 
-#ifdef CONFIG_PM_SLEEP
 static int pm_suspend_prep_cb(void)
 {
        struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this,
 static struct notifier_block pm_notifier = {
        .notifier_call = pm_notification,
 };
-#endif /* CONFIG_PM_SLEEP */
 
 static int __init telemetry_debugfs_init(void)
 {
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void)
        if (err < 0)
                return -EINVAL;
 
-
-#ifdef CONFIG_PM_SLEEP
        register_pm_notifier(&pm_notifier);
-#endif /* CONFIG_PM_SLEEP */
 
        debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
-       if (!debugfs_conf->telemetry_dbg_dir)
-               return -ENOMEM;
+       if (!debugfs_conf->telemetry_dbg_dir) {
+               err = -ENOMEM;
+               goto out_pm;
+       }
 
        f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
                                debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void)
 out:
        debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
        debugfs_conf->telemetry_dbg_dir = NULL;
+out_pm:
+       unregister_pm_notifier(&pm_notifier);
 
        return err;
 }
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void)
 {
        debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
        debugfs_conf->telemetry_dbg_dir = NULL;
+       unregister_pm_notifier(&pm_notifier);
 }
 
 late_initcall(telemetry_debugfs_init);
index e72abbc18ee31afb2e20808364e7830089f94d15..a66a317f3e4fedb6d7caaf2276c3fe3721391a58 100644 (file)
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
 {
        return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
 }
-MDEV_TYPE_ATTR_RO(name);
+static MDEV_TYPE_ATTR_RO(name);
 
 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
                               char *buf)
 {
        return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
 }
-MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(device_api);
 
 static ssize_t available_instances_show(struct kobject *kobj,
                                        struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
 
        return sprintf(buf, "%d\n", atomic_read(&private->avail));
 }
-MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(available_instances);
 
 static struct attribute *mdev_types_attrs[] = {
        &mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
        .attrs = mdev_types_attrs,
 };
 
-struct attribute_group *mdev_type_groups[] = {
+static struct attribute_group *mdev_type_groups[] = {
        &mdev_type_group,
        NULL,
 };
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
                                      &events, &private->nb);
 }
 
-void vfio_ccw_mdev_release(struct mdev_device *mdev)
+static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 {
        struct vfio_ccw_private *private =
                dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
        }
 }
 
-int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
+static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 {
        if (info->index != VFIO_CCW_IO_IRQ_INDEX)
                return -EINVAL;
index 9be4596d8a089c7ab8fa35703ebd8c8252608e63..ea099910b4e99466db57f1c2868cc889234e99d3 100644 (file)
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
        struct ap_driver *ap_drv = to_ap_drv(dev->driver);
        int rc;
 
+       /* Add queue/card to list of active queues/cards */
+       spin_lock_bh(&ap_list_lock);
+       if (is_card_dev(dev))
+               list_add(&to_ap_card(dev)->list, &ap_card_list);
+       else
+               list_add(&to_ap_queue(dev)->list,
+                        &to_ap_queue(dev)->card->queues);
+       spin_unlock_bh(&ap_list_lock);
+
        ap_dev->drv = ap_drv;
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-       if (rc)
+
+       if (rc) {
+               spin_lock_bh(&ap_list_lock);
+               if (is_card_dev(dev))
+                       list_del_init(&to_ap_card(dev)->list);
+               else
+                       list_del_init(&to_ap_queue(dev)->list);
+               spin_unlock_bh(&ap_list_lock);
                ap_dev->drv = NULL;
+       }
+
        return rc;
 }
 
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       if (ap_drv->remove)
+               ap_drv->remove(ap_dev);
+
+       /* Remove queue/card from list of active queues/cards */
        spin_lock_bh(&ap_list_lock);
        if (is_card_dev(dev))
                list_del_init(&to_ap_card(dev)->list);
        else
                list_del_init(&to_ap_queue(dev)->list);
        spin_unlock_bh(&ap_list_lock);
-       if (ap_drv->remove)
-               ap_drv->remove(ap_dev);
+
        return 0;
 }
 
@@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused)
                                }
                                /* get it and thus adjust reference counter */
                                get_device(&ac->ap_dev.device);
-                               /* Add card device to card list */
-                               spin_lock_bh(&ap_list_lock);
-                               list_add(&ac->list, &ap_card_list);
-                               spin_unlock_bh(&ap_list_lock);
                        }
                        /* now create the new queue device */
                        aq = ap_queue_create(qid, type);
@@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused)
                        aq->ap_dev.device.parent = &ac->ap_dev.device;
                        dev_set_name(&aq->ap_dev.device,
                                     "%02x.%04x", id, dom);
-                       /* Add queue device to card queue list */
-                       spin_lock_bh(&ap_list_lock);
-                       list_add(&aq->list, &ac->queues);
-                       spin_unlock_bh(&ap_list_lock);
                        /* Start with a device reset */
                        spin_lock_bh(&aq->lock);
                        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused)
                        /* Register device */
                        rc = device_register(&aq->ap_dev.device);
                        if (rc) {
-                               spin_lock_bh(&ap_list_lock);
-                               list_del_init(&aq->list);
-                               spin_unlock_bh(&ap_list_lock);
                                put_device(&aq->ap_dev.device);
                                continue;
                        }
index cfa161ccc74e92112e48a041cf7f24b99c9204e8..836efac968137aaa297b82476a96a7ee306c1f6a 100644 (file)
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
 
 static void ap_card_device_release(struct device *dev)
 {
-       kfree(to_ap_card(dev));
+       struct ap_card *ac = to_ap_card(dev);
+
+       if (!list_empty(&ac->list)) {
+               spin_lock_bh(&ap_list_lock);
+               list_del_init(&ac->list);
+               spin_unlock_bh(&ap_list_lock);
+       }
+       kfree(ac);
 }
 
 struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
index 480c58a637694e3c8c1f885b7eefdeb309e88102..0f1a5d02acb0e151092504754900a3d889514e17 100644 (file)
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
 
 static void ap_queue_device_release(struct device *dev)
 {
-       kfree(to_ap_queue(dev));
+       struct ap_queue *aq = to_ap_queue(dev);
+
+       if (!list_empty(&aq->list)) {
+               spin_lock_bh(&ap_list_lock);
+               list_del_init(&aq->list);
+               spin_unlock_bh(&ap_list_lock);
+       }
+       kfree(aq);
 }
 
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
index dba94b486f057822ba45d29bea1589adb4026ea5..fa732bd8672961ce89c911f5e60212db1e2da386 100644 (file)
@@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev)
                privptr->conn = NULL; privptr->fsm = NULL;
                /* privptr gets freed by free_netdev() */
        }
-       free_netdev(dev);
 }
 
 /**
@@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev)
        dev->mtu                 = NETIUCV_MTU_DEFAULT;
        dev->min_mtu             = 576;
        dev->max_mtu             = NETIUCV_MTU_MAX;
-       dev->destructor          = netiucv_free_netdevice;
+       dev->needs_free_netdev   = true;
+       dev->priv_destructor     = netiucv_free_netdevice;
        dev->hard_header_len     = NETIUCV_HDRLEN;
        dev->addr_len            = 0;
        dev->type                = ARPHRD_SLIP;
index dc6ecd8243659012629e5a2c1206861eec647eb3..ff10d1f0a7e452fb1f526395794caf0bd1ef26c4 100644 (file)
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
        if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
                i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
 
-       mutex_lock(&chip->state_lock);
        ret = i2c_smbus_write_byte_data(chip->client,
                                        AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
-       if (ret < 0) {
-               mutex_unlock(&chip->state_lock);
+       if (ret < 0)
                return ret;
-       }
 
        chip->filter_rate_setup = i;
-       mutex_unlock(&chip->state_lock);
 
        return ret;
 }
index cfe37eb026d6d4418664b767c7b61473d79b1020..859d0d6051cdf6748d298a8f547aff141e5c55c6 100644 (file)
@@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = {
 static void mon_setup(struct net_device *dev)
 {
        dev->netdev_ops = &mon_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        ether_setup(dev);
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211;
index 36c3189fc4b7f6f981ca683dd53267a5f9e541c3..bd4352fe2de315a8b1f636a73d09974f0bcd6628 100644 (file)
@@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
        mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
        strncpy(mon_ndev->name, name, IFNAMSIZ);
        mon_ndev->name[IFNAMSIZ - 1] = 0;
-       mon_ndev->destructor = rtw_ndev_destructor;
+       mon_ndev->needs_free_netdev = true;
+       mon_ndev->priv_destructor = rtw_ndev_destructor;
 
        mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
 
index f83cfc76505c441392f8384bca9cfda850d1b273..021589913681075bbcaeb50cf97add2bf4228629 100644 (file)
@@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev)
 
        if (ndev->ieee80211_ptr)
                kfree((u8 *)ndev->ieee80211_ptr);
-
-       free_netdev(ndev);
 }
 
 void rtw_dev_unload(struct adapter *padapter)
index 02db59e8b593309e6e4b161d977178947cd24c57..aa16d1ab955b43ae57c1a317beb365b2cfd2d476 100644 (file)
@@ -160,7 +160,7 @@ static int isFileReadable(char *path)
                oldfs = get_fs(); set_fs(get_ds());
 
                if (1!=readFile(fp, &buf, 1))
-                       ret = PTR_ERR(fp);
+                       ret = -EINVAL;
 
                set_fs(oldfs);
                filp_close(fp, NULL);
index 49d685ad0da90d1a1282dd9d25f31ad64db22087..45b554032332e882e9bc99cb5489044053bd663c 100644 (file)
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
        list_del(&f->list);
        if (f->unbind)
                f->unbind(c, f);
+
+       if (f->bind_deactivated)
+               usb_function_activate(f);
 }
 EXPORT_SYMBOL_GPL(usb_remove_function);
 
@@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev,
 
                f = list_first_entry(&config->functions,
                                struct usb_function, list);
-               list_del(&f->list);
-               if (f->unbind) {
-                       DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
-                       f->unbind(config, f);
-                       /* may free memory for "f" */
-               }
+
+               usb_remove_function(config, f);
        }
        list_del(&config->list);
        if (config->unbind) {
index b4058f0000e4878efae4a475f834d06f16679fdd..6a1ce6a551587f232207612404df80807af215cd 100644 (file)
@@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev)
        dev->tx_queue_len       = 1;
 
        dev->netdev_ops         = &pn_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->header_ops         = &phonet_header_ops;
 }
 
index b9ca0a26cbd93e540a2df8f78544adb81e298805..684900fcfe24c3c5ab206568f24da857434a654f 100644 (file)
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
 
        /* closing ep0 === shutdown all */
 
-       if (dev->gadget_registered)
+       if (dev->gadget_registered) {
                usb_gadget_unregister_driver (&gadgetfs_driver);
+               dev->gadget_registered = false;
+       }
 
        /* at this point "good" hardware has disconnected the
         * device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
 gadgetfs_suspend (struct usb_gadget *gadget)
 {
        struct dev_data         *dev = get_gadget_data (gadget);
+       unsigned long           flags;
 
        INFO (dev, "suspended from state %d\n", dev->state);
-       spin_lock (&dev->lock);
+       spin_lock_irqsave(&dev->lock, flags);
        switch (dev->state) {
        case STATE_DEV_SETUP:           // VERY odd... host died??
        case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
        default:
                break;
        }
-       spin_unlock (&dev->lock);
+       spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static struct usb_gadget_driver gadgetfs_driver = {
index ccabb51cb98da69b4b72d46a59846f3b97e1b8f6..7635fd7cc328caa371751d7a6ac15a5bb9ebdbae 100644 (file)
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
                /* Report reset and disconnect events to the driver */
                if (dum->driver && (disconnect || reset)) {
                        stop_activity(dum);
-                       spin_unlock(&dum->lock);
                        if (reset)
                                usb_gadget_udc_reset(&dum->gadget, dum->driver);
                        else
                                dum->driver->disconnect(&dum->gadget);
-                       spin_lock(&dum->lock);
                }
        } else if (dum_hcd->active != dum_hcd->old_active) {
-               if (dum_hcd->old_active && dum->driver->suspend) {
-                       spin_unlock(&dum->lock);
+               if (dum_hcd->old_active && dum->driver->suspend)
                        dum->driver->suspend(&dum->gadget);
-                       spin_lock(&dum->lock);
-               } else if (!dum_hcd->old_active &&  dum->driver->resume) {
-                       spin_unlock(&dum->lock);
+               else if (!dum_hcd->old_active &&  dum->driver->resume)
                        dum->driver->resume(&dum->gadget);
-                       spin_lock(&dum->lock);
-               }
        }
 
        dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
        struct dummy_hcd        *dum_hcd = gadget_to_dummy_hcd(g);
        struct dummy            *dum = dum_hcd->dum;
 
+       spin_lock_irq(&dum->lock);
        dum->driver = NULL;
+       spin_unlock_irq(&dum->lock);
 
        return 0;
 }
index 6cf07857eacaa19b873b18c299079fd07507a5cf..f2cbd7f8005e149e028a11409aaa6aedc0716c36 100644 (file)
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
                nuke(&dev->ep[i]);
 
        /* report disconnect; the driver is already quiesced */
-       if (driver) {
-               spin_unlock(&dev->lock);
+       if (driver)
                driver->disconnect(&dev->gadget);
-               spin_lock(&dev->lock);
-       }
 
        usb_reinit(dev);
 }
@@ -3348,8 +3345,6 @@ next_endpoints:
                BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
 {
        struct net2280_ep       *ep;
        u32                     tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@ __acquires(dev->lock)
                        if (disconnect || reset) {
                                stop_activity(dev, dev->driver);
                                ep0_start(dev);
-                               spin_unlock(&dev->lock);
                                if (reset)
                                        usb_gadget_udc_reset
                                                (&dev->gadget, dev->driver);
                                else
                                        (dev->driver->disconnect)
                                                (&dev->gadget);
-                               spin_lock(&dev->lock);
                                return;
                        }
                }
index 1f1687e888d623c33ffe2413fb5584f02c6b733e..fddf2731f798ea3a05a5b18bae7749c064eb4af7 100644 (file)
@@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 {
        u32 temp, port_offset, port_count;
        int i;
-       u8 major_revision;
+       u8 major_revision, minor_revision;
        struct xhci_hub *rhub;
 
        temp = readl(addr);
        major_revision = XHCI_EXT_PORT_MAJOR(temp);
+       minor_revision = XHCI_EXT_PORT_MINOR(temp);
 
        if (major_revision == 0x03) {
                rhub = &xhci->usb3_rhub;
@@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
                return;
        }
        rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
-       rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+       if (rhub->min_rev < minor_revision)
+               rhub->min_rev = minor_revision;
 
        /* Port offset and count in the third dword, see section 7.2 */
        temp = readl(addr + 2);
index fcf1f3f63e7af3d60a62675beddca912ac853428..1bcf971141c09a69f3cd1674cca282bcc6ec8d46 100644 (file)
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
                        pdev->device == 0x1042)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+                       pdev->device == 0x1142)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
        if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
                xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
index 687ebb053438b343d63c77881aeebcdc0ad7f857..41d7979d81c53df3d459d39939798659386444a8 100644 (file)
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
 
        for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
             i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
-               if (PIXEL_CLOCK)
+               if (PIXEL_CLOCK != 0)
                        edt[num++] = block - edid;
 
        /* Yikes, EDID data is totally useless */
index ec2e7e3536859cae3294d484c5bb56b5330c6db5..449fceaf79d5505a0e2c409adeb968dbaad4413c 100644 (file)
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface,
        dev_dbg(dev->gdev, "%s %s - serial #%s\n",
                usbdev->manufacturer, usbdev->product, usbdev->serial);
        dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
-               usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
-               usbdev->descriptor.bcdDevice, dev);
+               le16_to_cpu(usbdev->descriptor.idVendor),
+               le16_to_cpu(usbdev->descriptor.idProduct),
+               le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
        dev_dbg(dev->gdev, "console enable=%d\n", console);
        dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
 
index 6a3c353de7c35468e68f271126dca1c0b447ae82..05ef657235df2eb72c68e8a892d70e3edadd17d8 100644 (file)
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
        char *bufptr;
        struct urb *urb;
 
-       pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n",
-               info->node, dev->blank_mode, blank_mode);
+       pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
+                info->node, dev->blank_mode, blank_mode);
 
        if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
            (blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface,
        pr_info("%s %s - serial #%s\n",
                usbdev->manufacturer, usbdev->product, usbdev->serial);
        pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
-               usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
-               usbdev->descriptor.bcdDevice, dev);
+               le16_to_cpu(usbdev->descriptor.idVendor),
+               le16_to_cpu(usbdev->descriptor.idProduct),
+               le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
        pr_info("console enable=%d\n", console);
        pr_info("fb_defio enable=%d\n", fb_defio);
        pr_info("shadow enable=%d\n", shadow);
index f9718f012aae9be85a9568a2ce7be0740f08f5f6..badee04ef496cefd02b8f3f2faebb21fafb839b5 100644 (file)
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
 }
 static void viafb_remove_proc(struct viafb_shared *shared)
 {
-       struct proc_dir_entry *viafb_entry = shared->proc_entry,
-               *iga1_entry = shared->iga1_proc_entry,
-               *iga2_entry = shared->iga2_proc_entry;
+       struct proc_dir_entry *viafb_entry = shared->proc_entry;
 
        if (!viafb_entry)
                return;
 
-       remove_proc_entry("output_devices", iga2_entry);
+       remove_proc_entry("output_devices", shared->iga2_proc_entry);
        remove_proc_entry("iga2", viafb_entry);
-       remove_proc_entry("output_devices", iga1_entry);
+       remove_proc_entry("output_devices", shared->iga1_proc_entry);
        remove_proc_entry("iga1", viafb_entry);
        remove_proc_entry("supported_output_devices", viafb_entry);
 
index a97fdc156a03512bf36df40dd1b4278df845115f..baacc18668611b778b9270256a8962b439e4b78e 100644 (file)
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
 {
        SHASH_DESC_ON_STACK(shash, tfm);
        u32 *ctx = (u32 *)shash_desc_ctx(shash);
+       u32 retval;
        int err;
 
        shash->tfm = tfm;
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
        err = crypto_shash_update(shash, address, length);
        BUG_ON(err);
 
-       return *ctx;
+       retval = *ctx;
+       barrier_data(ctx);
+       return retval;
 }
index 987044bca1c27176ab4d6fca34e5787ae3f9404c..59cb307b15fbea58eca82535b7a7b30ec6db5409 100644 (file)
@@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
        }
 
        if (new_mode != old_mode) {
+               newattrs.ia_ctime = current_time(inode);
                newattrs.ia_mode = new_mode;
                newattrs.ia_valid = ATTR_MODE;
                ret = __ceph_setattr(inode, &newattrs);
index e8f11fa565c53ac58fddf402f6ade6320d47d490..7df550c13d7f3e25c2f0f4d6259bd92255a44f58 100644 (file)
@@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
                ceph_mdsc_put_request(req);
                if (!inode)
                        return ERR_PTR(-ESTALE);
+               if (inode->i_nlink == 0) {
+                       iput(inode);
+                       return ERR_PTR(-ESTALE);
+               }
        }
 
        return d_obtain_alias(inode);
index dcce79b844064447af8e542fe34188a4f58e22d9..4de6cdddf05928a29051e9996e75245732fee618 100644 (file)
@@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
                    attr->ia_size > inode->i_size) {
                        i_size_write(inode, attr->ia_size);
                        inode->i_blocks = calc_inode_blocks(attr->ia_size);
-                       inode->i_ctime = attr->ia_ctime;
                        ci->i_reported_size = attr->ia_size;
                        dirtied |= CEPH_CAP_FILE_EXCL;
                } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
@@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
                     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
                     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
                     only ? "ctime only" : "ignored");
-               inode->i_ctime = attr->ia_ctime;
                if (only) {
                        /*
                         * if kernel wants to dirty ctime but nothing else,
@@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
        if (dirtied) {
                inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
                                                           &prealloc_cf);
-               inode->i_ctime = current_time(inode);
+               inode->i_ctime = attr->ia_ctime;
        }
 
        release &= issued;
@@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
                req->r_inode_drop = release;
                req->r_args.setattr.mask = cpu_to_le32(mask);
                req->r_num_caps = 1;
+               req->r_stamp = attr->ia_ctime;
                err = ceph_mdsc_do_request(mdsc, NULL, req);
        }
        dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
index f38e56fa97129d646285fa2b433e8130c7b85312..0c05df44cc6c8888d56d32f53d54fba109f8508c 100644 (file)
@@ -1687,7 +1687,6 @@ struct ceph_mds_request *
 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 {
        struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
-       struct timespec ts;
 
        if (!req)
                return ERR_PTR(-ENOMEM);
@@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
        init_completion(&req->r_safe_completion);
        INIT_LIST_HEAD(&req->r_unsafe_item);
 
-       ktime_get_real_ts(&ts);
-       req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
+       req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
 
        req->r_op = op;
        req->r_direct_mode = mode;
index 8b2a994042ddeb2d1b8c00bbee3436e79fff52ad..a66f6624d89943997d1d35ead214aee85857e5d7 100644 (file)
@@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item)
 }
 EXPORT_SYMBOL(config_item_get);
 
+struct config_item *config_item_get_unless_zero(struct config_item *item)
+{
+       if (item && kref_get_unless_zero(&item->ci_kref))
+               return item;
+       return NULL;
+}
+EXPORT_SYMBOL(config_item_get_unless_zero);
+
 static void config_item_cleanup(struct config_item *item)
 {
        struct config_item_type *t = item->ci_type;
index a6ab012a2c6acf9815bb8d4d1e29fb4568084b08..c8aabba502f6d7f019890bcdeba2fcdd7a56717f 100644 (file)
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
        ret = -ENOMEM;
        sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
        if (sl) {
-               sl->sl_target = config_item_get(item);
                spin_lock(&configfs_dirent_lock);
                if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
                        spin_unlock(&configfs_dirent_lock);
-                       config_item_put(item);
                        kfree(sl);
                        return -ENOENT;
                }
+               sl->sl_target = config_item_get(item);
                list_add(&sl->sl_list, &target_sd->s_links);
                spin_unlock(&configfs_dirent_lock);
                ret = configfs_create_link(sl, parent_item->ci_dentry,
index cddf39777835d0d27a71862b244cec37528b195b..a9f995f6859eb19ea3c5cddac19d86512ccf1d1e 100644 (file)
@@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data)
 {
        struct detach_data *data = _data;
 
-       if (!data->mountpoint && !data->select.found)
+       if (!data->mountpoint && list_empty(&data->select.dispose))
                __d_drop(data->select.start);
 }
 
@@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry)
 
                d_walk(dentry, &data, detach_and_collect, check_and_drop);
 
-               if (data.select.found)
+               if (!list_empty(&data.select.dispose))
                        shrink_dentry_list(&data.select.dispose);
+               else if (!data.mountpoint)
+                       return;
 
                if (data.mountpoint) {
                        detach_mounts(data.mountpoint);
                        dput(data.mountpoint);
                }
-
-               if (!data.mountpoint && !data.select.found)
-                       break;
-
                cond_resched();
        }
 }
index 2185c7a040a12cf8b6e190df97b58bc7eecc6299..fd2e651bad6d3c620b83f9dc32e25dfad22a4e7f 100644 (file)
@@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
 {
        SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
        u32 *ctx = (u32 *)shash_desc_ctx(shash);
+       u32 retval;
        int err;
 
        shash->tfm = sbi->s_chksum_driver;
@@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
        err = crypto_shash_update(shash, address, length);
        BUG_ON(err);
 
-       return *ctx;
+       retval = *ctx;
+       barrier_data(ctx);
+       return retval;
 }
 
 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
index 8bd3e4d448b9f07a8aa148c8d08c092f45471a7c..5a4438445bf788e65ade26b741911861d7ea2084 100644 (file)
@@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
                return err;
        }
 
+       put_mnt_ns(old_mnt_ns);
+
        /* Update the pwd and root */
        set_fs_pwd(fs, &root);
        set_fs_root(fs, &root);
index 47c1d4484df9e6b69333215efc10d7a5db64210f..19d4d88fa285b39493d4b7793a69a23876a0988e 100644 (file)
@@ -1285,7 +1285,7 @@ static size_t compat_writev(struct file *file,
        if (!(file->f_mode & FMODE_CAN_WRITE))
                goto out;
 
-       ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+       ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
 
 out:
        if (ret > 0)
index d642cc0a8271b06b6fea356d7b2d8893111f465d..0315fea1d589e104ac4f10bf4db3f9f705c3f7ad 100644 (file)
@@ -400,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        /*
         * There is not enough space for user on the device
         */
-       if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
-               mutex_unlock(&UFS_SB(sb)->s_lock);
-               UFSD("EXIT (FAILED)\n");
-               return 0;
+       if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
+               if (!capable(CAP_SYS_RESOURCE)) {
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
+                       UFSD("EXIT (FAILED)\n");
+                       return 0;
+               }
        }
 
        if (goal >= uspi->s_size) 
@@ -421,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                if (result) {
                        ufs_clear_frags(inode, result + oldcount,
                                        newcount - oldcount, locked_page != NULL);
+                       *err = 0;
                        write_seqlock(&UFS_I(inode)->meta_lock);
                        ufs_cpu_to_data_ptr(sb, p, result);
-                       write_sequnlock(&UFS_I(inode)->meta_lock);
-                       *err = 0;
                        UFS_I(inode)->i_lastfrag =
                                max(UFS_I(inode)->i_lastfrag, fragment + count);
+                       write_sequnlock(&UFS_I(inode)->meta_lock);
                }
                mutex_unlock(&UFS_SB(sb)->s_lock);
                UFSD("EXIT, result %llu\n", (unsigned long long)result);
@@ -439,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        result = ufs_add_fragments(inode, tmp, oldcount, newcount);
        if (result) {
                *err = 0;
+               read_seqlock_excl(&UFS_I(inode)->meta_lock);
                UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
                                                fragment + count);
+               read_sequnlock_excl(&UFS_I(inode)->meta_lock);
                ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
                                locked_page != NULL);
                mutex_unlock(&UFS_SB(sb)->s_lock);
@@ -474,16 +478,16 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        if (result) {
                ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
                                locked_page != NULL);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                ufs_change_blocknr(inode, fragment - oldcount, oldcount,
                                   uspi->s_sbbase + tmp,
                                   uspi->s_sbbase + result, locked_page);
+               *err = 0;
                write_seqlock(&UFS_I(inode)->meta_lock);
                ufs_cpu_to_data_ptr(sb, p, result);
-               write_sequnlock(&UFS_I(inode)->meta_lock);
-               *err = 0;
                UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
                                                fragment + count);
-               mutex_unlock(&UFS_SB(sb)->s_lock);
+               write_sequnlock(&UFS_I(inode)->meta_lock);
                if (newcount < request)
                        ufs_free_fragments (inode, result + newcount, request - newcount);
                ufs_free_fragments (inode, tmp, oldcount);
index da553ffec85b459f0675c25033f173bbaeccb772..9f4590261134085cbabcea1ee9a72382239ba479 100644 (file)
@@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
        u64 phys64 = 0;
        unsigned frag = fragment & uspi->s_fpbmask;
 
-       if (!create) {
-               phys64 = ufs_frag_map(inode, offsets, depth);
-               if (phys64)
-                       map_bh(bh_result, sb, phys64 + frag);
-               return 0;
-       }
+       phys64 = ufs_frag_map(inode, offsets, depth);
+       if (!create)
+               goto done;
 
+       if (phys64) {
+               if (fragment >= UFS_NDIR_FRAGMENT)
+                       goto done;
+               read_seqlock_excl(&UFS_I(inode)->meta_lock);
+               if (fragment < UFS_I(inode)->i_lastfrag) {
+                       read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+                       goto done;
+               }
+               read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+       }
         /* This code entered only while writing ....? */
 
        mutex_lock(&UFS_I(inode)->truncate_mutex);
@@ -451,6 +458,11 @@ out:
        }
        mutex_unlock(&UFS_I(inode)->truncate_mutex);
        return err;
+
+done:
+       if (phys64)
+               map_bh(bh_result, sb, phys64 + frag);
+       return 0;
 }
 
 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -874,7 +886,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
        ctx->to = from + count;
 }
 
-#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 
 static void ufs_trunc_direct(struct inode *inode)
@@ -1112,19 +1123,24 @@ static void ufs_truncate_blocks(struct inode *inode)
        struct super_block *sb = inode->i_sb;
        struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
        unsigned offsets[4];
-       int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
+       int depth;
        int depth2;
        unsigned i;
        struct ufs_buffer_head *ubh[3];
        void *p;
        u64 block;
 
-       if (!depth)
-               return;
+       if (inode->i_size) {
+               sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
+               depth = ufs_block_to_path(inode, last, offsets);
+               if (!depth)
+                       return;
+       } else {
+               depth = 1;
+       }
 
-       /* find the last non-zero in offsets[] */
        for (depth2 = depth - 1; depth2; depth2--)
-               if (offsets[depth2])
+               if (offsets[depth2] != uspi->s_apb - 1)
                        break;
 
        mutex_lock(&ufsi->truncate_mutex);
@@ -1133,9 +1149,8 @@ static void ufs_truncate_blocks(struct inode *inode)
                offsets[0] = UFS_IND_BLOCK;
        } else {
                /* get the blocks that should be partially emptied */
-               p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
+               p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
                for (i = 0; i < depth2; i++) {
-                       offsets[i]++;   /* next branch is fully freed */
                        block = ufs_data_ptr_to_cpu(sb, p);
                        if (!block)
                                break;
@@ -1146,7 +1161,7 @@ static void ufs_truncate_blocks(struct inode *inode)
                                write_sequnlock(&ufsi->meta_lock);
                                break;
                        }
-                       p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
+                       p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
                }
                while (i--)
                        free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
@@ -1161,7 +1176,9 @@ static void ufs_truncate_blocks(struct inode *inode)
                        free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
                }
        }
+       read_seqlock_excl(&ufsi->meta_lock);
        ufsi->i_lastfrag = DIRECT_FRAGMENT;
+       read_sequnlock_excl(&ufsi->meta_lock);
        mark_inode_dirty(inode);
        mutex_unlock(&ufsi->truncate_mutex);
 }
index 878cc6264f1af4a87bf68bb71f2cb031bdb4b1a1..d5300adbfd79bafd05217cd7088d1221a4501187 100644 (file)
@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb)
        usb3 = ubh_get_usb_third(uspi);
 
        if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
-            (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
+            (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
            mtype == UFS_MOUNT_UFSTYPE_UFS2) {
                /*we have statistic in different place, then usual*/
                uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb)
        usb2 = ubh_get_usb_second(uspi);
        usb3 = ubh_get_usb_third(uspi);
 
-       if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
-            (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
-           mtype == UFS_MOUNT_UFSTYPE_UFS2) {
+       if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
                /*we have statistic in different place, then usual*/
                usb2->fs_un.fs_u2.cs_ndir =
                        cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb)
                        cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
                usb3->fs_un1.fs_u2.cs_nffree =
                        cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
-       } else {
-               usb1->fs_cstotal.cs_ndir =
-                       cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
-               usb1->fs_cstotal.cs_nbfree =
-                       cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
-               usb1->fs_cstotal.cs_nifree =
-                       cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
-               usb1->fs_cstotal.cs_nffree =
-                       cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+               goto out;
+       }
+
+       if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
+            (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
+               /* store stats in both old and new places */
+               usb2->fs_un.fs_u2.cs_ndir =
+                       cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
+               usb2->fs_un.fs_u2.cs_nbfree =
+                       cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
+               usb3->fs_un1.fs_u2.cs_nifree =
+                       cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
+               usb3->fs_un1.fs_u2.cs_nffree =
+                       cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
        }
+       usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
+       usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
+       usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
+       usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+out:
        ubh_mark_buffer_dirty(USPI_UBH(uspi));
        ufs_print_super_stuff(sb, usb1, usb2, usb3);
        UFSD("EXIT\n");
@@ -996,6 +1004,13 @@ again:
                flags |=  UFS_ST_SUN;
        }
 
+       if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
+           uspi->s_postblformat == UFS_42POSTBLFMT) {
+               if (!silent)
+                       pr_err("this is not a 44bsd filesystem");
+               goto failed;
+       }
+
        /*
         * Check ufs magic number
         */
@@ -1143,8 +1158,8 @@ magic_found:
        uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
 
        if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
-               uspi->s_u2_size  = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
-               uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
+               uspi->s_size  = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
+               uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
        } else {
                uspi->s_size  =  fs32_to_cpu(sb, usb1->fs_size);
                uspi->s_dsize =  fs32_to_cpu(sb, usb1->fs_dsize);
@@ -1193,6 +1208,9 @@ magic_found:
        uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
        uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
 
+       uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
+                                             uspi->s_minfree, 100);
+
        /*
         * Compute another frequently used values
         */
@@ -1382,19 +1400,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
        mutex_lock(&UFS_SB(sb)->s_lock);
        usb3 = ubh_get_usb_third(uspi);
        
-       if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+       if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
                buf->f_type = UFS2_MAGIC;
-               buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
-       } else {
+       else
                buf->f_type = UFS_MAGIC;
-               buf->f_blocks = uspi->s_dsize;
-       }
-       buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
-               uspi->cs_total.cs_nffree;
+
+       buf->f_blocks = uspi->s_dsize;
+       buf->f_bfree = ufs_freefrags(uspi);
        buf->f_ffree = uspi->cs_total.cs_nifree;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
-               ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
+       buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
+               ? (buf->f_bfree - uspi->s_root_blocks) : 0;
        buf->f_files = uspi->s_ncg * uspi->s_ipg;
        buf->f_namelen = UFS_MAXNAMLEN;
        buf->f_fsid.val[0] = (u32)id;
index 0cbd5d340b6705b9c6e22ff0a60a49891c302860..823d55a37586037f7ed02f9be5e1dbee890123d4 100644 (file)
@@ -733,10 +733,8 @@ struct ufs_sb_private_info {
        __u32   s_dblkno;       /* offset of first data after cg */
        __u32   s_cgoffset;     /* cylinder group offset in cylinder */
        __u32   s_cgmask;       /* used to calc mod fs_ntrak */
-       __u32   s_size;         /* number of blocks (fragments) in fs */
-       __u32   s_dsize;        /* number of data blocks in fs */
-       __u64   s_u2_size;      /* ufs2: number of blocks (fragments) in fs */
-       __u64   s_u2_dsize;     /*ufs2:  number of data blocks in fs */
+       __u64   s_size;         /* number of blocks (fragments) in fs */
+       __u64   s_dsize;        /* number of data blocks in fs */
        __u32   s_ncg;          /* number of cylinder groups */
        __u32   s_bsize;        /* size of basic blocks */
        __u32   s_fsize;        /* size of fragments */
@@ -793,6 +791,7 @@ struct ufs_sb_private_info {
        __u32   s_maxsymlinklen;/* upper limit on fast symlinks' size */
        __s32   fs_magic;       /* filesystem magic */
        unsigned int s_dirblksize;
+       __u64   s_root_blocks;
 };
 
 /*
index f41ad0a6106f28a2165c94068f6c7c3d71a67b3e..02497a492eb25085fc7fcefc446ad0e9cd0f967a 100644 (file)
@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
 struct page *ufs_get_locked_page(struct address_space *mapping,
                                 pgoff_t index)
 {
-       struct page *page;
-
-       page = find_lock_page(mapping, index);
+       struct inode *inode = mapping->host;
+       struct page *page = find_lock_page(mapping, index);
        if (!page) {
                page = read_mapping_page(mapping, index, NULL);
 
@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
                        printk(KERN_ERR "ufs_change_blocknr: "
                               "read_mapping_page error: ino %lu, index: %lu\n",
                               mapping->host->i_ino, index);
-                       goto out;
+                       return page;
                }
 
                lock_page(page);
@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
                        /* Truncate got there first */
                        unlock_page(page);
                        put_page(page);
-                       page = NULL;
-                       goto out;
+                       return NULL;
                }
 
                if (!PageUptodate(page) || PageError(page)) {
@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
 
                        printk(KERN_ERR "ufs_change_blocknr: "
                               "can not read page: ino %lu, index: %lu\n",
-                              mapping->host->i_ino, index);
+                              inode->i_ino, index);
 
-                       page = ERR_PTR(-EIO);
+                       return ERR_PTR(-EIO);
                }
        }
-out:
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
        return page;
 }
index 398019fb144816875f2c717c2252823a4dd76b99..9fc7119a1551f8ff82a02e9d89d05d6e60024c55 100644 (file)
@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
 #define ubh_blkmap(ubh,begin,bit) \
        ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
 
-/*
- * Determine the number of available frags given a
- * percentage to hold in reserve.
- */
 static inline u64
-ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
+ufs_freefrags(struct ufs_sb_private_info *uspi)
 {
        return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
-               uspi->cs_total.cs_nffree -
-               (uspi->s_dsize * (percentreserved) / 100);
+               uspi->cs_total.cs_nffree;
 }
 
 /*
index f7555fc25877435e13b65cbe597ae9bdb11c6528..1d622f276e3a2c0fba23d979d7ae18cf6a899f30 100644 (file)
@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        bool must_wait, return_to_userland;
        long blocking_state;
 
-       BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
-
        ret = VM_FAULT_SIGBUS;
+
+       /*
+        * We don't do userfault handling for the final child pid update.
+        *
+        * We also don't do userfault handling during
+        * coredumping. hugetlbfs has the special
+        * follow_hugetlb_page() to skip missing pages in the
+        * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
+        * the no_page_table() helper in follow_page_mask(), but the
+        * shmem_vm_ops->fault method is invoked even during
+        * coredumping without mmap_sem and it ends up here.
+        */
+       if (current->flags & (PF_EXITING|PF_DUMPCORE))
+               goto out;
+
+       /*
+        * Coredumping runs without mmap_sem so we can only check that
+        * the mmap_sem is held, if PF_DUMPCORE was not set.
+        */
+       WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+
        ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
        if (!ctx)
                goto out;
@@ -360,12 +379,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        if (unlikely(ACCESS_ONCE(ctx->released)))
                goto out;
 
-       /*
-        * We don't do userfault handling for the final child pid update.
-        */
-       if (current->flags & PF_EXITING)
-               goto out;
-
        /*
         * Check that we can return VM_FAULT_RETRY.
         *
index 07b77b73b0240c5cca4187d29e8e403cbf4f0714..16d6a578fc160528651d522359eb6e9ccecac78d 100644 (file)
@@ -117,7 +117,7 @@ static inline void
 __xfs_buf_ioacct_dec(
        struct xfs_buf  *bp)
 {
-       ASSERT(spin_is_locked(&bp->b_lock));
+       lockdep_assert_held(&bp->b_lock);
 
        if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
                bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
index f61c84f8e31a363ce144424336359630647eac07..990210fcb9c326f923c0c25a86557a0aba3bb40f 100644 (file)
@@ -66,7 +66,6 @@ xfs_inode_alloc(
 
        XFS_STATS_INC(mp, vn_active);
        ASSERT(atomic_read(&ip->i_pincount) == 0);
-       ASSERT(!spin_is_locked(&ip->i_flags_lock));
        ASSERT(!xfs_isiflocked(ip));
        ASSERT(ip->i_ino == 0);
 
@@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag(
 {
        struct xfs_mount        *mp = pag->pag_mount;
 
-       ASSERT(spin_is_locked(&pag->pag_ici_lock));
+       lockdep_assert_held(&pag->pag_ici_lock);
        if (pag->pag_ici_reclaimable++)
                return;
 
@@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag(
 {
        struct xfs_mount        *mp = pag->pag_mount;
 
-       ASSERT(spin_is_locked(&pag->pag_ici_lock));
+       lockdep_assert_held(&pag->pag_ici_lock);
        if (--pag->pag_ici_reclaimable)
                return;
 
index d92543f3bbfdcaaeac47252729af30ba428eb371..bdc55c0da19cd06c65e589d9071f65c2eb5c2332 100644 (file)
@@ -374,6 +374,20 @@ struct acpi_table_desc {
        u16 validation_count;
 };
 
+/*
+ * Maximum value of the validation_count field in struct acpi_table_desc.
+ * When reached, validation_count cannot be changed any more and the table will
+ * be permanently regarded as validated.
+ *
+ * This is to prevent situations in which unbalanced table get/put operations
+ * may cause premature table unmapping in the OS to happen.
+ *
+ * The maximum validation count can be defined to any value, but should be
+ * greater than the maximum number of OS early stage mapping slots to avoid
+ * leaking early stage table mappings to the late stage.
+ */
+#define ACPI_MAX_TABLE_VALIDATIONS          ACPI_UINT16_MAX
+
 /* Masks for Flags field above */
 
 #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL  (0)        /* Virtual address, external maintained */
index ab92c4ea138b7c665c45b765c0e1f8a4958339ca..b74a3edcb3da82903568981a5b49fbbf1f4269cb 100644 (file)
@@ -586,6 +586,8 @@ struct request_queue {
 
        size_t                  cmd_size;
        void                    *rq_alloc_data;
+
+       struct work_struct      release_work;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
index 2319b8c108e87b9e87c11cc4c9aa314d24eb0364..c9670904968329ce3032825d55b28a72cb8b223c 100644 (file)
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item,
                                       const char *name,
                                       struct config_item_type *type);
 
-extern struct config_item * config_item_get(struct config_item *);
+extern struct config_item *config_item_get(struct config_item *);
+extern struct config_item *config_item_get_unless_zero(struct config_item *);
 extern void config_item_put(struct config_item *);
 
 struct config_item_type {
index 5e9c74cf889481ddbf6ae3a9caef166bd6ae6dee..9bbf21a516e4aa339aff03e9c2091043bb265fba 100644 (file)
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; }
 static inline int dmi_name_in_serial(const char *s) { return 0; }
 #define dmi_available 0
 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
-       void *private_data) { return -1; }
+       void *private_data) { return -ENXIO; }
 static inline bool dmi_match(enum dmi_field f, const char *str)
        { return false; }
 static inline void dmi_memdev_name(u16 handle, const char **bank,
index 3f39d27decf4d72e734734edec4403b00ca95657..4ed952c17fc7757965c26d2eba1c56c39d5b4ccc 100644 (file)
@@ -914,8 +914,7 @@ struct xfrmdev_ops {
  *
  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  *     Called when a user wants to change the Maximum Transfer Unit
- *     of a device. If not defined, any request to change MTU will
- *     will return an error.
+ *     of a device.
  *
  * void (*ndo_tx_timeout)(struct net_device *dev);
  *     Callback used when the transmitter has not made any progress
@@ -1596,8 +1595,8 @@ enum netdev_priv_flags {
  *     @rtnl_link_state:       This enum represents the phases of creating
  *                             a new link
  *
- *     @destructor:            Called from unregister,
- *                             can be used to call free_netdev
+ *     @needs_free_netdev:     Should unregister perform free_netdev?
+ *     @priv_destructor:       Called from unregister
  *     @npinfo:                XXX: need comments on this one
  *     @nd_net:                Network namespace this network device is inside
  *
@@ -1858,7 +1857,8 @@ struct net_device {
                RTNL_LINK_INITIALIZING,
        } rtnl_link_state:16;
 
-       void (*destructor)(struct net_device *dev);
+       bool needs_free_netdev;
+       void (*priv_destructor)(struct net_device *dev);
 
 #ifdef CONFIG_NETPOLL
        struct netpoll_info __rcu       *npinfo;
@@ -4261,6 +4261,11 @@ static inline const char *netdev_name(const struct net_device *dev)
        return dev->name;
 }
 
+static inline bool netdev_unregistering(const struct net_device *dev)
+{
+       return dev->reg_state == NETREG_UNREGISTERING;
+}
+
 static inline const char *netdev_reg_state(const struct net_device *dev)
 {
        switch (dev->reg_state) {
index 413335c8cb529a8506a2f934577c3413512d8c97..298f996969df632ba41b6c68f3d815678c89e079 100644 (file)
@@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
 {
 }
 
+static inline void cec_notifier_register(struct cec_notifier *n,
+                        struct cec_adapter *adap,
+                        void (*callback)(struct cec_adapter *adap, u16 pa))
+{
+}
+
+static inline void cec_notifier_unregister(struct cec_notifier *n)
+{
+}
+
 #endif
 
 #endif
index bfa88d4d67e1d6663da4952a6a097e32f5e573a8..201f060978da2d5f8f49ad3fb31ac514c55e60c2 100644 (file)
@@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
 #define cec_phys_addr_exp(pa) \
        ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
 
-#if IS_ENABLED(CONFIG_CEC_CORE)
+#if IS_REACHABLE(CONFIG_CEC_CORE)
 struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
                void *priv, const char *name, u32 caps, u8 available_las);
 int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
index d179d7767f519829a50395f47749360f88cd3d2d..7d4a594d5d58147e3cf4b56eb992268a619badf3 100644 (file)
@@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices {
  * it was forced up into this mode or autonegotiated.
  */
 
-/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
-/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */
+/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Update drivers/net/phy/phy.c:phy_speed_to_str() and
+ * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values.
+ */
 #define SPEED_10               10
 #define SPEED_100              100
 #define SPEED_1000             1000
index 61b7d36dfe34394f7cbed64217a8a9a1e7f55cfe..156ee4cab82e5e73bc2705a4707c04c1cf788b98 100644 (file)
@@ -343,6 +343,7 @@ enum ovs_key_attr {
 #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
 
 enum ovs_tunnel_key_attr {
+       /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */
        OVS_TUNNEL_KEY_ATTR_ID,                 /* be64 Tunnel ID */
        OVS_TUNNEL_KEY_ATTR_IPV4_SRC,           /* be32 src IP address. */
        OVS_TUNNEL_KEY_ATTR_IPV4_DST,           /* be32 dst IP address. */
index 070be980c37a57d91f86099d2be1b81db80176bc..425170d4439be5926a63ef9be50c71c1dd5878a9 100644 (file)
@@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                        ret = __irq_set_trigger(desc,
                                                new->flags & IRQF_TRIGGER_MASK);
 
-                       if (ret)
+                       if (ret) {
+                               irq_release_resources(desc);
                                goto out_mask;
+                       }
                }
 
                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
index 803c3bc274c4660bb672c2522331db967883aee4..326d4f88e2b1dbda470c7730c6ae1a67cc095f99 100644 (file)
@@ -5605,7 +5605,7 @@ void idle_task_exit(void)
        BUG_ON(cpu_online(smp_processor_id()));
 
        if (mm != &init_mm) {
-               switch_mm_irqs_off(mm, &init_mm, current);
+               switch_mm(mm, &init_mm, current);
                finish_arch_post_lock_switch();
        }
        mmdrop(mm);
index 622eed1b7658301a94a645c4426598b0096a8ab9..076a2e31951ccbb538e8004a85cf1f247818d57e 100644 (file)
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
        if (sg_policy->next_freq == next_freq)
                return;
 
-       if (sg_policy->next_freq > next_freq)
-               next_freq = (sg_policy->next_freq + next_freq) >> 1;
-
        sg_policy->next_freq = next_freq;
        sg_policy->last_freq_update_time = time;
 
index d711093218415d77ead6405004dd9e41323ad924..c77e4b1d51c09d1fc948d0bb105cd97b50fc6a42 100644 (file)
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void)
                        trace_sched_stat_runtime_enabled())  {
                printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
                             "stat_blocked and stat_runtime require the "
-                            "kernel parameter schedstats=enabled or "
+                            "kernel parameter schedstats=enable or "
                             "kernel.sched_schedstats=1\n");
        }
 #endif
index 5cb5b0008d9710c95c1af7ab8312b5e308b2c5d0..ee2f4202d82aa2acec5e438218a2405a4315f166 100644 (file)
@@ -387,7 +387,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
 
-       start = ktime_add(start, base->gettime());
+       start = ktime_add_safe(start, base->gettime());
        alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -475,7 +475,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
                overrun++;
        }
 
-       alarm->node.expires = ktime_add(alarm->node.expires, interval);
+       alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
        return overrun;
 }
 EXPORT_SYMBOL_GPL(alarm_forward);
@@ -660,13 +660,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval);
+
+       /*
+        * Rate limit to the tick as a hot fix to prevent DOS. Will be
+        * mopped up later.
+        */
+       if (timr->it.alarm.interval < TICK_NSEC)
+               timr->it.alarm.interval = TICK_NSEC;
+
        exp = timespec64_to_ktime(new_setting->it_value);
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
                ktime_t now;
 
                now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
-               exp = ktime_add(now, exp);
+               exp = ktime_add_safe(now, exp);
        }
 
        alarm_start(&timr->it.alarm.alarmtimer, exp);
index 987e496bb51a9cc84c92bedc62cf8d69e8e85668..b398c2ea69b290cdaec1769b7d11cbc501646652 100644 (file)
@@ -37,9 +37,11 @@ static int tick_broadcast_forced;
 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
 
 #ifdef CONFIG_TICK_ONESHOT
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
 static void tick_broadcast_clear_oneshot(int cpu);
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 #else
+static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
 static inline void tick_broadcast_clear_oneshot(int cpu) { }
 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
 #endif
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
 /**
  * tick_broadcast_setup_oneshot - setup the broadcast device
  */
-void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
        int cpu = smp_processor_id();
 
index f738251000fe6b07de4f3d54ee78d8f78408c5d7..be0ac01f2e1225b6d4bb814029854dfdba3cd5cb 100644 (file)
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
 
 /* Functions related to oneshot broadcasting */
 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
-extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
 extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
 extern int tick_broadcast_oneshot_active(void);
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void);
 bool tick_broadcast_oneshot_available(void);
 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
 #else /* !(BROADCAST && ONESHOT): */
-static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
index 74a54b7f25626e8c6d224af2b7384f7dcbf2e72f..9f79547d1b9782237a563f6a2e838655f6adf6dc 100644 (file)
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
 u32 crc32c(u32 crc, const void *address, unsigned int length)
 {
        SHASH_DESC_ON_STACK(shash, tfm);
-       u32 *ctx = (u32 *)shash_desc_ctx(shash);
+       u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
        int err;
 
        shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
        err = crypto_shash_update(shash, address, length);
        BUG_ON(err);
 
-       return *ctx;
+       ret = *ctx;
+       barrier_data(ctx);
+       return ret;
 }
 
 EXPORT_SYMBOL(crc32c);
index a84909cf20d36b3d84f00d8529127f78f6b5981d..88c6167f194db0ec07d707329569c2d4a9d34876 100644 (file)
@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
         */
        if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
                page = pmd_page(*vmf->pmd);
+               if (!get_page_unless_zero(page))
+                       goto out_unlock;
                spin_unlock(vmf->ptl);
                wait_on_page_locked(page);
+               put_page(page);
                goto out;
        }
 
@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 
        /* Migration could have started since the pmd_trans_migrating check */
        if (!page_locked) {
+               page_nid = -1;
+               if (!get_page_unless_zero(page))
+                       goto out_unlock;
                spin_unlock(vmf->ptl);
                wait_on_page_locked(page);
-               page_nid = -1;
+               put_page(page);
                goto out;
        }
 
index 342fac9ba89b0da3e207b1fdaef2be71c9837a24..ecc183fd94f36f35e91b69c6a06930bb456a7e86 100644 (file)
@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * page_remove_rmap() in try_to_unmap_one(). So to determine page status
         * correctly, we save a copy of the page flags at this time.
         */
-       page_flags = p->flags;
+       if (PageHuge(p))
+               page_flags = hpage->flags;
+       else
+               page_flags = p->flags;
 
        /*
         * unpoison always clear PG_hwpoison inside page lock
index ac6318a064d35e6dcc5385d1dc8062ff6e46554c..3405b4ee1757e3deb196c93e49b9641dad8d56fb 100644 (file)
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
                if (!page)
                        goto not_enough_page;
                ctrl->map[idx] = page;
+
+               if (!(idx % SWAP_CLUSTER_MAX))
+                       cond_resched();
        }
        return 0;
 not_enough_page:
index 6063581f705c48b97a78a087d8c67127af9c8d68..ce0618bfa8d0643d0e65263ad65a9e7a7a4c92ed 100644 (file)
@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
        unsigned long pressure = 0;
 
        /*
-        * reclaimed can be greater than scanned in cases
-        * like THP, where the scanned is 1 and reclaimed
-        * could be 512
+        * reclaimed can be greater than scanned for things such as reclaimed
+        * slab pages. shrink_node() just adds reclaimed pages without a
+        * related increment to scanned pages.
         */
        if (reclaimed >= scanned)
                goto out;
index 953b6728bd00c8ca7a4a20f2d2036c6f8f27f8e3..abc5f400fc71f2f57f3a029d1c196890ee4e39e0 100644 (file)
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev)
 
        free_percpu(vlan->vlan_pcpu_stats);
        vlan->vlan_pcpu_stats = NULL;
-       free_netdev(dev);
 }
 
 void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev)
        netif_keep_dst(dev);
 
        dev->netdev_ops         = &vlan_netdev_ops;
-       dev->destructor         = vlan_dev_free;
+       dev->needs_free_netdev  = true;
+       dev->priv_destructor    = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
        dev->min_mtu            = 0;
index 013e970eff393e0550aa250f7e72c27301071552..000ca2f113ab857b4969a95b8fe1134806b05f15 100644 (file)
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 
                skb_new->protocol = eth_type_trans(skb_new, soft_iface);
 
-               soft_iface->stats.rx_packets++;
-               soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
+               batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+               batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                                  skb->len + ETH_HLEN + hdr_size);
 
                netif_rx(skb_new);
                batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
index e1ebe14ee2a6e21cc8d6b4a42552cae4bd15061f..ae9f4d37d34f07182c3d2527186a0999fe5b639d 100644 (file)
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
                                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                                           "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
                                           orig_addr_gw);
-                               return NET_RX_DROP;
+                               goto free_skb;
                        }
                }
 
index b25789abf7b9e10aec7af1dfc41a5c9ff805284a..10f7edfb176ebd49c680ff4132db87aa00d3f04e 100644 (file)
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev)
         * netdev and its private data (bat_priv)
         */
        rcu_barrier();
-
-       free_netdev(dev);
 }
 
 /**
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &batadv_netdev_ops;
-       dev->destructor = batadv_softif_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = batadv_softif_free;
        dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
        dev->priv_flags |= IFF_NO_QUEUE;
 
index 608959989f8eddbfc9b97279a7fba61fd7381d04..ab3b654b05cc87e19a965d0419efb7d45bd4b94e 100644 (file)
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev)
 
        dev->netdev_ops         = &netdev_ops;
        dev->header_ops         = &header_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 static struct device_type bt_type = {
index 430b53e7d941def09220a1c97a2e82d288304595..f0f3447e8aa48ff02c3f11f6da0e65e79cb28e90 100644 (file)
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &br_netdev_ops;
-       dev->destructor = free_netdev;
+       dev->needs_free_netdev = true;
        dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
        dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
index adcad344c843985435958890583761e865a25374..21f18ea2fce440c1a0c8dcddc335e327dbf9bced 100644 (file)
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
 
        lock_sock(sk);
 
+       err = -EINVAL;
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               goto out;
+
        err = -EAFNOSUPPORT;
        if (uaddr->sa_family != AF_CAIF)
                goto out;
index 59ce1fcc220ce0a71fb57733be7cc91e6b8ac7fc..71b6ab240dea26b228be35ba24ec3f43772f19f7 100644 (file)
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
 {
        struct sk_buff *skb;
 
-       if (likely(in_interrupt()))
-               skb = alloc_skb(len + pfx, GFP_ATOMIC);
-       else
-               skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+       skb = alloc_skb(len + pfx, GFP_ATOMIC);
        if (unlikely(skb == NULL))
                return NULL;
 
index 1816fc9f1ee779f85874e886a9ba6db721c60680..fe3c53efb949ef29a3dc1f6278d9abc05b50f579 100644 (file)
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
 {
        struct chnl_net *priv = netdev_priv(dev);
        caif_free_client(&priv->chnl);
-       free_netdev(dev);
 }
 
 static void ipcaif_net_setup(struct net_device *dev)
 {
        struct chnl_net *priv;
        dev->netdev_ops = &netdev_ops;
-       dev->destructor = chnl_net_destructor;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = chnl_net_destructor;
        dev->flags |= IFF_NOARP;
        dev->flags |= IFF_POINTOPOINT;
        dev->mtu = GPRS_PDP_MTU;
index b6406fe33c76d1e7ca5f439e9b7409ad3a680e6d..88edac0f3e366398d0c1e0de023b90b0669498f9 100644 (file)
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
 
 static int can_pernet_init(struct net *net)
 {
-       net->can.can_rcvlists_lock =
-               __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
+       spin_lock_init(&net->can.can_rcvlists_lock);
        net->can.can_rx_alldev_list =
                kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
 
index fca407b4a6ea178d9224949bc57f89a26c97c5c1..6d60149287a1868cd65fcc55a4e29edd7611def3 100644 (file)
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
        if (!new_ifalias)
                return -ENOMEM;
        dev->ifalias = new_ifalias;
+       memcpy(dev->ifalias, alias, len);
+       dev->ifalias[len] = 0;
 
-       strlcpy(dev->ifalias, alias, len+1);
        return len;
 }
 
@@ -4948,6 +4949,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(__skb_gro_checksum_complete);
 
+static void net_rps_send_ipi(struct softnet_data *remsd)
+{
+#ifdef CONFIG_RPS
+       while (remsd) {
+               struct softnet_data *next = remsd->rps_ipi_next;
+
+               if (cpu_online(remsd->cpu))
+                       smp_call_function_single_async(remsd->cpu, &remsd->csd);
+               remsd = next;
+       }
+#endif
+}
+
 /*
  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4977,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
                local_irq_enable();
 
                /* Send pending IPI's to kick RPS processing on remote cpus. */
-               while (remsd) {
-                       struct softnet_data *next = remsd->rps_ipi_next;
-
-                       if (cpu_online(remsd->cpu))
-                               smp_call_function_single_async(remsd->cpu,
-                                                          &remsd->csd);
-                       remsd = next;
-               }
+               net_rps_send_ipi(remsd);
        } else
 #endif
                local_irq_enable();
@@ -7501,6 +7508,8 @@ out:
 err_uninit:
        if (dev->netdev_ops->ndo_uninit)
                dev->netdev_ops->ndo_uninit(dev);
+       if (dev->priv_destructor)
+               dev->priv_destructor(dev);
        goto out;
 }
 EXPORT_SYMBOL(register_netdevice);
@@ -7708,8 +7717,10 @@ void netdev_run_todo(void)
                WARN_ON(rcu_access_pointer(dev->ip6_ptr));
                WARN_ON(dev->dn_ptr);
 
-               if (dev->destructor)
-                       dev->destructor(dev);
+               if (dev->priv_destructor)
+                       dev->priv_destructor(dev);
+               if (dev->needs_free_netdev)
+                       free_netdev(dev);
 
                /* Report a network device has been unregistered */
                rtnl_lock();
@@ -8192,7 +8203,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
        struct sk_buff **list_skb;
        struct sk_buff *skb;
        unsigned int cpu;
-       struct softnet_data *sd, *oldsd;
+       struct softnet_data *sd, *oldsd, *remsd = NULL;
 
        local_irq_disable();
        cpu = smp_processor_id();
@@ -8233,6 +8244,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
        local_irq_enable();
 
+#ifdef CONFIG_RPS
+       remsd = oldsd->rps_ipi_list;
+       oldsd->rps_ipi_list = NULL;
+#endif
+       /* send out pending IPI's on offline CPU */
+       net_rps_send_ipi(remsd);
+
        /* Process offline CPU's input_pkt_queue */
        while ((skb = __skb_dequeue(&oldsd->process_queue))) {
                netif_rx_ni(skb);
index 6192f11beec9077de964e2aeff4f78547f08b8da..13ba4a090c410e40853178df4ffca85f02b71b03 100644 (file)
@@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
                spin_lock_bh(&dst_garbage.lock);
                dst = dst_garbage.list;
                dst_garbage.list = NULL;
+               /* The code in dst_ifdown places a hold on the loopback device.
+                * If the gc entry processing is set to expire after a lengthy
+                * interval, this hold can cause netdev_wait_allrefs() to hang
+                * out and wait for a long time -- until the the loopback
+                * interface is released.  If we're really unlucky, it'll emit
+                * pr_emerg messages to console too.  Reset the interval here,
+                * so dst cleanups occur in a more timely fashion.
+                */
+               if (dst_garbage.timer_inc > DST_GC_INC) {
+                       dst_garbage.timer_inc = DST_GC_INC;
+                       dst_garbage.timer_expires = DST_GC_MIN;
+                       mod_delayed_work(system_wq, &dst_gc_work,
+                                        dst_garbage.timer_expires);
+               }
                spin_unlock_bh(&dst_garbage.lock);
 
                if (last)
index 9e2c0a7cb3256e8cb2af1d65aaf293db96b9a418..5e61456f6bc795cfb75db2d530eb3b1872c82989 100644 (file)
@@ -1124,6 +1124,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
        struct ifla_vf_mac vf_mac;
        struct ifla_vf_info ivi;
 
+       memset(&ivi, 0, sizeof(ivi));
+
        /* Not all SR-IOV capable drivers support the
         * spoofcheck and "RSS query enable" query.  Preset to
         * -1 so the user space tool can detect that the driver
@@ -1132,7 +1134,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
        ivi.spoofchk = -1;
        ivi.rss_query_en = -1;
        ivi.trusted = -1;
-       memset(ivi.mac, 0, sizeof(ivi.mac));
        /* The default value for VF link state is "auto"
         * IFLA_VF_LINK_STATE_AUTO which equals zero
         */
index 1ed81ac6dd1a28b79dff9f9f4e8c1d0f99306a33..aa8ffecc46a439fa129dfea9c1f4d4e525ad3796 100644 (file)
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
 
-       if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+       if (skb->len < sizeof(*nlh) ||
+           nlh->nlmsg_len < sizeof(*nlh) ||
+           skb->len < nlh->nlmsg_len)
                return;
 
        if (!netlink_capable(skb, CAP_NET_ADMIN))
index c73160fb11e7c666f8ac8ed4103b7312ff769a58..0a0a392dc2bd64b8c4202cc1361d828f9f984dd1 100644 (file)
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
        del_timer_sync(&hsr->announce_timer);
 
        synchronize_rcu();
-       free_netdev(hsr_dev);
 }
 
 static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
        SET_NETDEV_DEVTYPE(dev, &hsr_type);
        dev->priv_flags |= IFF_NO_QUEUE;
 
-       dev->destructor = hsr_dev_destroy;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = hsr_dev_destroy;
 
        dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
                           NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
index 4ebe2aa3e7d3e944295e9d53890e3cb9b7a90139..04b5450c5a5572e875f7900a3676fd80259b9b4b 100644 (file)
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
        unsigned long irqflags;
 
        frame->is_supervision = is_supervision_frame(port->hsr, skb);
-       frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
-                                      frame->is_supervision);
+       frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
        if (frame->node_src == NULL)
                return -1; /* Unknown node and !is_supervision, or no mem */
 
index 7ea925816f79d8a0e547a0feb1257fda23097ba4..284a9b820df8db51a0dbee9737db5a8127eeaad7 100644 (file)
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 
 /* Get the hsr_node from which 'skb' was sent.
  */
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup)
 {
+       struct list_head *node_db = &port->hsr->node_db;
        struct hsr_node *node;
        struct ethhdr *ethhdr;
        u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
                 */
                seq_out = hsr_get_skb_sequence_nr(skb) - 1;
        } else {
-               WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+               /* this is called also for frames from master port and
+                * so warn only for non master ports
+                */
+               if (port->type != HSR_PT_MASTER)
+                       WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
                seq_out = HSR_SEQNR_START;
        }
 
index 438b40f98f5a986e50180c351d55c9dbb0b66977..4e04f0e868e95044ac27f77ca11d004d928326cd 100644 (file)
@@ -18,7 +18,7 @@ struct hsr_node;
 
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
                              u16 seq_out);
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup);
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                          struct hsr_port *port);
index d7efbf0dad20f8c4d3c42d039eff528c678bc04c..0a866f3322901e357091955bf24ac16fc5f6b94f 100644 (file)
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
 
        ldev->netdev_ops        = &lowpan_netdev_ops;
        ldev->header_ops        = &lowpan_header_ops;
-       ldev->destructor        = free_netdev;
+       ldev->needs_free_netdev = true;
        ldev->features          |= NETIF_F_NETNS_LOCAL;
 }
 
index 43318b5f56474bc15253e74e156962dd2c8df01f..9144fa7df2ad51372ba1c1debf54a41460a8cbec 100644 (file)
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        /* Needed by both icmp_global_allow and icmp_xmit_lock */
        local_bh_disable();
 
-       /* Check global sysctl_icmp_msgs_per_sec ratelimit */
-       if (!icmpv4_global_allow(net, type, code))
+       /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
+        * incoming dev is loopback.  If outgoing dev change to not be
+        * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
+        */
+       if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
+             !icmpv4_global_allow(net, type, code))
                goto out_bh_enable;
 
        sk = icmp_xmit_lock(net);
index 44fd86de2823dd17de16276a8ec01b190e69b8b4..8f6b5bbcbf69f54f354d3678cf6e5f8374602edb 100644 (file)
@@ -2071,21 +2071,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
 static void ip_mc_clear_src(struct ip_mc_list *pmc)
 {
-       struct ip_sf_list *psf, *nextpsf;
+       struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
 
-       for (psf = pmc->tomb; psf; psf = nextpsf) {
+       spin_lock_bh(&pmc->lock);
+       tomb = pmc->tomb;
+       pmc->tomb = NULL;
+       sources = pmc->sources;
+       pmc->sources = NULL;
+       pmc->sfmode = MCAST_EXCLUDE;
+       pmc->sfcount[MCAST_INCLUDE] = 0;
+       pmc->sfcount[MCAST_EXCLUDE] = 1;
+       spin_unlock_bh(&pmc->lock);
+
+       for (psf = tomb; psf; psf = nextpsf) {
                nextpsf = psf->sf_next;
                kfree(psf);
        }
-       pmc->tomb = NULL;
-       for (psf = pmc->sources; psf; psf = nextpsf) {
+       for (psf = sources; psf; psf = nextpsf) {
                nextpsf = psf->sf_next;
                kfree(psf);
        }
-       pmc->sources = NULL;
-       pmc->sfmode = MCAST_EXCLUDE;
-       pmc->sfcount[MCAST_INCLUDE] = 0;
-       pmc->sfcount[MCAST_EXCLUDE] = 1;
 }
 
 /* Join a multicast group
index b878ecbc0608fb433ae858b70e6e0101aa20fc4e..b436d077563174c22b48a81a6a856f30dd831a5e 100644 (file)
@@ -967,7 +967,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
        gro_cells_destroy(&tunnel->gro_cells);
        dst_cache_destroy(&tunnel->dst_cache);
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1154,8 @@ int ip_tunnel_init(struct net_device *dev)
        struct iphdr *iph = &tunnel->parms.iph;
        int err;
 
-       dev->destructor = ip_tunnel_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ip_tunnel_dev_free;
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
index 551de4d023a8edbf74835b43cb32d9173eedae36..8ae425cad81858b3a79719d47b43f8baa346a06a 100644 (file)
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
 static void ipmr_free_table(struct mr_table *mrt);
 
 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
-                         struct sk_buff *skb, struct mfc_cache *cache,
-                         int local);
+                         struct net_device *dev, struct sk_buff *skb,
+                         struct mfc_cache *cache, int local);
 static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
        dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
        dev->flags              = IFF_NOARP;
        dev->netdev_ops         = &reg_vif_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->features           |= NETIF_F_NETNS_LOCAL;
 }
 
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
 
                        rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
-                       ip_mr_forward(net, mrt, skb, c, 0);
+                       ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
                }
        }
 }
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
 
 /* Queue a packet for resolution. It gets locked cache entry! */
 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
-                                struct sk_buff *skb)
+                                struct sk_buff *skb, struct net_device *dev)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct mfc_cache *c;
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
                kfree_skb(skb);
                err = -ENOBUFS;
        } else {
+               if (dev) {
+                       skb->dev = dev;
+                       skb->skb_iif = dev->ifindex;
+               }
                skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
                err = 0;
        }
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
 
 /* "local" means that we should preserve one skb (for local delivery) */
 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
-                         struct sk_buff *skb, struct mfc_cache *cache,
-                         int local)
+                         struct net_device *dev, struct sk_buff *skb,
+                         struct mfc_cache *cache, int local)
 {
-       int true_vifi = ipmr_find_vif(mrt, skb->dev);
+       int true_vifi = ipmr_find_vif(mrt, dev);
        int psend = -1;
        int vif, ct;
 
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
        }
 
        /* Wrong interface: drop packet and (maybe) send PIM assert. */
-       if (mrt->vif_table[vif].dev != skb->dev) {
-               struct net_device *mdev;
-
-               mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
-               if (mdev == skb->dev)
-                       goto forward;
-
+       if (mrt->vif_table[vif].dev != dev) {
                if (rt_is_output_route(skb_rtable(skb))) {
                        /* It is our own packet, looped back.
                         * Very complicated situation...
@@ -2053,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb)
                read_lock(&mrt_lock);
                vif = ipmr_find_vif(mrt, dev);
                if (vif >= 0) {
-                       int err2 = ipmr_cache_unresolved(mrt, vif, skb);
+                       int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
                        read_unlock(&mrt_lock);
 
                        return err2;
@@ -2064,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb)
        }
 
        read_lock(&mrt_lock);
-       ip_mr_forward(net, mrt, skb, cache, local);
+       ip_mr_forward(net, mrt, dev, skb, cache, local);
        read_unlock(&mrt_lock);
 
        if (local)
@@ -2238,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                iph->saddr = saddr;
                iph->daddr = daddr;
                iph->version = 0;
-               err = ipmr_cache_unresolved(mrt, vif, skb2);
+               err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
                read_unlock(&mrt_lock);
                rcu_read_unlock();
                return err;
index 230b5aac9f03eadb775eea9cb3d9b4cce571cc32..8d7b113958b1332be11545069fafa8072a64e6a2 100644 (file)
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        local_bh_disable();
 
        /* Check global sysctl_icmp_msgs_per_sec ratelimit */
-       if (!icmpv6_global_allow(type))
+       if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
                goto out_bh_enable;
 
        mip6_addr_swap(skb);
index 2fd5ca151dcfca6034b0b0b27a3fe9abc7899e75..77f7f8c7d93d67483f241616123380f9d8e6ba84 100644 (file)
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
 {
        u32 *v = (u32 *)loc.v32;
 
+       __ila_hash_secret_init();
        return jhash_2words(v[0], v[1], hashrnd);
 }
 
index 0c5b4caa19491eb04bc755032611c76f03008acb..64eea3962733a323fbae25b3c7b6de658a8cfdea 100644 (file)
@@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev)
 
        dst_cache_destroy(&t->dst_cache);
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 static void ip6gre_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ip6gre_netdev_ops;
-       dev->destructor = ip6gre_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ip6gre_dev_free;
 
        dev->type = ARPHRD_IP6GRE;
 
@@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net)
        return 0;
 
 err_reg_dev:
-       ip6gre_dev_free(ign->fb_tunnel_dev);
+       free_netdev(ign->fb_tunnel_dev);
 err_alloc_dev:
        return err;
 }
@@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
-       dev->destructor = ip6gre_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ip6gre_dev_free;
 
        dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
index 9b37f9747fc6a6fbabb0740188bc98b5c95c41c4..c3581973f5d7265a574ae69416a516526ed64e44 100644 (file)
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
        gro_cells_destroy(&t->gro_cells);
        dst_cache_destroy(&t->dst_cache);
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
        return t;
 
 failed_free:
-       ip6_dev_free(dev);
+       free_netdev(dev);
 failed:
        return ERR_PTR(err);
 }
@@ -1777,7 +1776,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ip6_tnl_netdev_ops;
-       dev->destructor = ip6_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ip6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->flags |= IFF_NOARP;
@@ -2224,7 +2224,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        return 0;
 
 err_register:
-       ip6_dev_free(ip6n->fb_tnl_dev);
+       free_netdev(ip6n->fb_tnl_dev);
 err_alloc_dev:
        return err;
 }
index d67ef56454b25a088768e88fcd1f878a8c498f12..837ea1eefe7f8cc85924a9604d58cd702af94667 100644 (file)
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
 static void vti6_dev_free(struct net_device *dev)
 {
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
        return t;
 
 failed_free:
-       vti6_dev_free(dev);
+       free_netdev(dev);
 failed:
        return NULL;
 }
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = {
 static void vti6_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &vti6_netdev_ops;
-       dev->destructor = vti6_dev_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = vti6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net)
        return 0;
 
 err_register:
-       vti6_dev_free(ip6n->fb_tnl_dev);
+       free_netdev(ip6n->fb_tnl_dev);
 err_alloc_dev:
        return err;
 }
index 374997d26488ea38db7ed42ff7e6a55ede249021..2ecb39b943b5002e63bfa5b045919fbfd9fd64f6 100644 (file)
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
        dev->mtu                = 1500 - sizeof(struct ipv6hdr) - 8;
        dev->flags              = IFF_NOARP;
        dev->netdev_ops         = &reg_vif_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->features           |= NETIF_F_NETNS_LOCAL;
 }
 
index cc8e3ae9ca736490c3f689297d190f930fdd2ac9..e88bcb8ff0fd73b8377f5e0d3c3d1fa524ef1da8 100644 (file)
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
        u64 buff64[SNMP_MIB_MAX];
        int i;
 
-       memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+       memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
 
        snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
        for (i = 0; itemlist[i].name; i++)
index dc61b0b5e64edf7bd69ab905573e38415abf2346..7cebd954d5bb4263017f8f2d577ecfb88ca7c093 100644 (file)
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
        if ((rt->dst.dev == dev || !dev) &&
            rt != adn->net->ipv6.ip6_null_entry &&
            (rt->rt6i_nsiblings == 0 ||
+            (dev && netdev_unregistering(dev)) ||
             !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
                return -1;
 
index 61e5902f068732b10f734c7937c7539d418820d7..2378503577b0c8823049b7d17f857466481077b3 100644 (file)
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        return nt;
 
 failed_free:
-       ipip6_dev_free(dev);
+       free_netdev(dev);
 failed:
        return NULL;
 }
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev)
 
        dst_cache_destroy(&tunnel->dst_cache);
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 #define SIT_FEATURES (NETIF_F_SG          | \
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
        dev->netdev_ops         = &ipip6_netdev_ops;
-       dev->destructor         = ipip6_dev_free;
+       dev->needs_free_netdev  = true;
+       dev->priv_destructor    = ipip6_dev_free;
 
        dev->type               = ARPHRD_SIT;
        dev->hard_header_len    = LL_MAX_HEADER + t_hlen;
index 74d09f91709e6121ed80b8f089283615a5ffce5e..3be852808a9d1f7a2767f482b334279a95e90642 100644 (file)
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops         = &irlan_eth_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
        dev->min_mtu            = 0;
        dev->max_mtu            = ETH_MAX_MTU;
 
index 8b21af7321b928b4dcc5d7af3a6667380e9a949a..4de2ec94b08cbf5aba016da754b799c46d5f378a 100644 (file)
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
 {
        struct l2tp_eth *priv = netdev_priv(dev);
 
-       stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
-       stats->tx_packets = atomic_long_read(&priv->tx_packets);
-       stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
-       stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
-       stats->rx_packets = atomic_long_read(&priv->rx_packets);
-       stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+       stats->tx_bytes   = (unsigned long) atomic_long_read(&priv->tx_bytes);
+       stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
+       stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
+       stats->rx_bytes   = (unsigned long) atomic_long_read(&priv->rx_bytes);
+       stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
+       stats->rx_errors  = (unsigned long) atomic_long_read(&priv->rx_errors);
+
 }
 
 static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
        dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
        dev->features           |= NETIF_F_LLTX;
        dev->netdev_ops         = &l2tp_eth_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
index 6c2e6060cd549e3c4c39e78cc20ca9302472c4ac..4a388fe8c2d1363b300becd61664d333b128e845 100644 (file)
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        default:
                return -EINVAL;
        }
+       sdata->u.ap.req_smps = sdata->smps_mode;
+
        sdata->needed_rx_chains = sdata->local->rx_chains;
 
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
index 665501ac358f8d83630f2727fe6249dcfeeb9689..5e002f62c235fbae4c5154ef9eab65c0e5f842b8 100644 (file)
@@ -1531,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
                return true;
        /* can't handle non-legacy preamble yet */
        if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
-           status->encoding != RX_ENC_LEGACY)
+           status->encoding == RX_ENC_LEGACY)
                return true;
        return false;
 }
index 8fae1a72e6a7c7ea4f71ec3a3beb215b987a715f..f5f50150ba1cd7f53689a4e5efac3235f04679cf 100644 (file)
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
 static void ieee80211_if_free(struct net_device *dev)
 {
        free_percpu(dev->tstats);
-       free_netdev(dev);
 }
 
 static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev)
        ether_setup(dev);
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->netdev_ops = &ieee80211_dataif_ops;
-       dev->destructor = ieee80211_if_free;
+       dev->needs_free_netdev = true;
+       dev->priv_destructor = ieee80211_if_free;
 }
 
 static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1816,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                ret = dev_alloc_name(ndev, ndev->name);
                if (ret < 0) {
                        ieee80211_if_free(ndev);
+                       free_netdev(ndev);
                        return ret;
                }
 
@@ -1905,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = register_netdevice(ndev);
                if (ret) {
-                       ieee80211_if_free(ndev);
+                       free_netdev(ndev);
                        return ret;
                }
        }
index 0ea9712bd99ea698f40b068d5ee31a69133224a3..cc8e6ea1b27e95e36412d840cd394039f5df4d5d 100644 (file)
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_supported_band *sband;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
-       u32 rate_flags, rates = 0;
+       u32 rates = 0;
 
        sdata_assert_lock(sdata);
 
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                return;
        }
        chan = chanctx_conf->def.chan;
-       rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
        rcu_read_unlock();
        sband = local->hw.wiphy->bands[chan->band];
        shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                 */
                rates_len = 0;
                for (i = 0; i < sband->n_bitrates; i++) {
-                       if ((rate_flags & sband->bitrates[i].flags)
-                           != rate_flags)
-                               continue;
                        rates |= BIT(i);
                        rates_len++;
                }
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
                                u32 *rates, u32 *basic_rates,
                                bool *have_higher_than_11mbit,
                                int *min_rate, int *min_rate_index,
-                               int shift, u32 rate_flags)
+                               int shift)
 {
        int i, j;
 
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
                        int brate;
 
                        br = &sband->bitrates[j];
-                       if ((rate_flags & br->flags) != rate_flags)
-                               continue;
 
                        brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
                        if (brate == rate) {
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        return -ENOMEM;
        }
 
-       if (new_sta || override) {
-               err = ieee80211_prep_channel(sdata, cbss);
-               if (err) {
-                       if (new_sta)
-                               sta_info_free(local, new_sta);
-                       return -EINVAL;
-               }
-       }
-
+       /*
+        * Set up the information for the new channel before setting the
+        * new channel. We can't - completely race-free - change the basic
+        * rates bitmap and the channel (sband) that it refers to, but if
+        * we set it up before we at least avoid calling into the driver's
+        * bss_info_changed() method with invalid information (since we do
+        * call that from changing the channel - only for IDLE and perhaps
+        * some others, but ...).
+        *
+        * So to avoid that, just set up all the new information before the
+        * channel, but tell the driver to apply it only afterwards, since
+        * it might need the new channel for that.
+        */
        if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
-               struct ieee80211_chanctx_conf *chanctx_conf;
                const struct cfg80211_bss_ies *ies;
                int shift = ieee80211_vif_get_shift(&sdata->vif);
-               u32 rate_flags;
-
-               rcu_read_lock();
-               chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-               if (WARN_ON(!chanctx_conf)) {
-                       rcu_read_unlock();
-                       sta_info_free(local, new_sta);
-                       return -EINVAL;
-               }
-               rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
-               rcu_read_unlock();
 
                ieee80211_get_rates(sband, bss->supp_rates,
                                    bss->supp_rates_len,
                                    &rates, &basic_rates,
                                    &have_higher_than_11mbit,
                                    &min_rate, &min_rate_index,
-                                   shift, rate_flags);
+                                   shift);
 
                /*
                 * This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        sdata->vif.bss_conf.sync_dtim_count = 0;
                }
                rcu_read_unlock();
+       }
 
-               /* tell driver about BSSID, basic rates and timing */
+       if (new_sta || override) {
+               err = ieee80211_prep_channel(sdata, cbss);
+               if (err) {
+                       if (new_sta)
+                               sta_info_free(local, new_sta);
+                       return -EINVAL;
+               }
+       }
+
+       if (new_sta) {
+               /*
+                * tell driver about BSSID, basic rates and timing
+                * this was set up above, before setting the channel
+                */
                ieee80211_bss_info_change_notify(sdata,
                        BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
                        BSS_CHANGED_BEACON_INT);
index 1f75280ba26c78b3ad9864d0b63305cbba43f8fe..3674fe3d67dc74ba9042e91920f43376325c6b6a 100644 (file)
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !ieee80211_is_back_req(hdr->frame_control) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
             rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-           /* PM bit is only checked in frames where it isn't reserved,
+           /*
+            * PM bit is only checked in frames where it isn't reserved,
             * in AP mode it's reserved in non-bufferable management frames
             * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+            * BAR frames should be ignored as specified in
+            * IEEE 802.11-2012 10.2.1.2.
             */
            (!ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
index c1ef22df865fe77bf7cf0a42d560f54db98a3edd..cc19614ff4e60bed9a351b8f8aa73792103665a0 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/unaligned.h>
 #include <net/mac80211.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 
 #include "ieee80211_i.h"
 #include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
        key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
        michael_mic(key, hdr, data, data_len, mic);
-       if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+       if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
                goto mic_fail;
 
        /* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
                bip_aad(skb, aad);
                ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
                                   skb->data + 24, skb->len - 24, mic);
-               if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+               if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
                        key->u.aes_cmac.icverrors++;
                        return RX_DROP_UNUSABLE;
                }
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
                bip_aad(skb, aad);
                ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
                                       skb->data + 24, skb->len - 24, mic);
-               if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+               if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
                        key->u.aes_cmac.icverrors++;
                        return RX_DROP_UNUSABLE;
                }
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
                if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
                                       skb->data + 24, skb->len - 24,
                                       mic) < 0 ||
-                   memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+                   crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
                        key->u.aes_gmac.icverrors++;
                        return RX_DROP_UNUSABLE;
                }
index 06019dba4b10e3e0d079ee617707ae5412269144..bd88a9b80773e20e5de1f5a66ae7a3471387135e 100644 (file)
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 
        mac802154_llsec_destroy(&sdata->sec);
-
-       free_netdev(dev);
 }
 
 static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                                        sdata->dev->dev_addr);
 
                sdata->dev->header_ops = &mac802154_header_ops;
-               sdata->dev->destructor = mac802154_wpan_free;
+               sdata->dev->needs_free_netdev = true;
+               sdata->dev->priv_destructor = mac802154_wpan_free;
                sdata->dev->netdev_ops = &mac802154_wpan_ops;
                sdata->dev->ml_priv = &mac802154_mlme_wpan;
                wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
 
                break;
        case NL802154_IFTYPE_MONITOR:
-               sdata->dev->destructor = free_netdev;
+               sdata->dev->needs_free_netdev = true;
                sdata->dev->netdev_ops = &mac802154_monitor_ops;
                wpan_dev->promiscuous_mode = true;
                break;
index 89193a634da45bb78498ecd28ca39c455e32f2e4..04a3128adcf0adcc2603e45c9054cdd2fd0e7a0b 100644 (file)
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
        struct vport *vport = ovs_internal_dev_get_vport(dev);
 
        ovs_vport_free(vport);
-       free_netdev(dev);
 }
 
 static void
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
                              IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
-       netdev->destructor = internal_dev_destructor;
+       netdev->needs_free_netdev = true;
+       netdev->priv_destructor = internal_dev_destructor;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->rtnl_link_ops = &internal_dev_link_ops;
 
index 21c28b51be9439b20369b48077ac8392db7c3150..2c9337946e3038f130d198bb51d061a648ef1ed4 100644 (file)
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
        dev->tx_queue_len       = 10;
 
        dev->netdev_ops         = &gprs_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->needs_free_netdev  = true;
 }
 
 /*
index 164b5ac094be6d8bbd7a04aa8aa962a3c693ab44..7dc5892671c818db55b024b10d56ec2252c2f826 100644 (file)
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
                k++;
        }
 
-       if (n)
+       if (n) {
+               err = -EINVAL;
                goto err_out;
+       }
 
        return keys_ex;
 
index f42008b293112d1a6da2bcf6ef8af564b382ba39..b062bc80c7cb11b0ea473916c58957b54db21594 100644 (file)
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
                }
        }
 
-       spin_lock_bh(&police->tcf_lock);
        if (est) {
                err = gen_replace_estimator(&police->tcf_bstats, NULL,
                                            &police->tcf_rate_est,
                                            &police->tcf_lock,
                                            NULL, est);
                if (err)
-                       goto failure_unlock;
+                       goto failure;
        } else if (tb[TCA_POLICE_AVRATE] &&
                   (ret == ACT_P_CREATED ||
                    !gen_estimator_active(&police->tcf_rate_est))) {
                err = -EINVAL;
-               goto failure_unlock;
+               goto failure;
        }
 
+       spin_lock_bh(&police->tcf_lock);
        /* No failure allowed after this point */
        police->tcfp_mtu = parm->mtu;
        if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 
        return ret;
 
-failure_unlock:
-       spin_unlock_bh(&police->tcf_lock);
 failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
index f16c8d97b7f313e9f671d8bb8620b0f9566e619f..30aa0a529215ae54e43bdcb54a6e1870761996c3 100644 (file)
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
 
        for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
             hash++, head++) {
-               read_lock(&head->lock);
+               read_lock_bh(&head->lock);
                sctp_for_each_hentry(epb, &head->chain) {
                        err = cb(sctp_ep(epb), p);
                        if (err)
                                break;
                }
-               read_unlock(&head->lock);
+               read_unlock_bh(&head->lock);
        }
 
        return err;
index 312ef7de57d7ba27c58533df9edb2f4dd61cc864..ab3087687a32446ffa3bbfaccf206028886e6945 100644 (file)
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
        }
 
        if (skb_cloned(_skb) &&
-           pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+           pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
                goto exit;
 
        /* Now reverse the concerned fields */
index 6a7fe7660551f45c065a7f472b805c0b8073f6bb..1a0c961f4ffeef40abc5b980e004b01120e3c536 100644 (file)
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct path path = { };
 
        err = -EINVAL;
-       if (sunaddr->sun_family != AF_UNIX)
+       if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+           sunaddr->sun_family != AF_UNIX)
                goto out;
 
        if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
        unsigned int hash;
        int err;
 
+       err = -EINVAL;
+       if (alen < offsetofend(struct sockaddr, sa_family))
+               goto out;
+
        if (addr->sa_family != AF_UNSPEC) {
                err = unix_mkname(sunaddr, alen, &hash);
                if (err < 0)
index e67a526d1f301e4b9f27cc1685875aef85e6e9c3..819fd6858b499dca6ad37e04d11230c4daad6ff5 100644 (file)
@@ -1106,10 +1106,8 @@ static int selinux_parse_opts_str(char *options,
 
        opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
                                       GFP_KERNEL);
-       if (!opts->mnt_opts_flags) {
-               kfree(opts->mnt_opts);
+       if (!opts->mnt_opts_flags)
                goto out_err;
-       }
 
        if (fscontext) {
                opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1132,6 +1130,7 @@ static int selinux_parse_opts_str(char *options,
        return 0;
 
 out_err:
+       security_free_mnt_opts(opts);
        kfree(context);
        kfree(defcontext);
        kfree(fscontext);
index 282a60368b14df9e88b304d7e60aac77fd8e6bc0..5f66697fe1e09a81b9fa8c5cb37c54331cf5888e 100644 (file)
@@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "complete_and_exit",
                "kvm_spurious_fault",
                "__reiserfs_panic",
-               "lbug_with_loc"
+               "lbug_with_loc",
+               "fortify_panic",
        };
 
        if (func->bind == STB_WEAK)
index 8354d04b392fd9e94c0812731b55e7267b41504e..1f4fbc9a3292e06b6ae48c47297e795ccbc45809 100644 (file)
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
 
 include $(srctree)/tools/scripts/Makefile.arch
 
-$(call detected_var,ARCH)
+$(call detected_var,SRCARCH)
 
 NO_PERF_REGS := 1
 
 # Additional ARCH settings for ppc
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
 endif
 
 # Additional ARCH settings for x86
-ifeq ($(ARCH),x86)
+ifeq ($(SRCARCH),x86)
   $(call detected,CONFIG_X86)
   ifeq (${IS_64_BIT}, 1)
     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
   NO_PERF_REGS := 0
 endif
 
-ifeq ($(ARCH),arm)
+ifeq ($(SRCARCH),arm)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-arm
 endif
 
-ifeq ($(ARCH),arm64)
+ifeq ($(SRCARCH),arm64)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
 endif
@@ -61,7 +61,7 @@ endif
 # Disable it on all other architectures in case libdw unwind
 # support is detected in system. Add supported architectures
 # to the check.
-ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
   NO_LIBDW_DWARF_UNWIND := 1
 endif
 
@@ -115,9 +115,9 @@ endif
 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 
-FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
+FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
 # include ARCH specific config
--include $(src-perf)/arch/$(ARCH)/Makefile
+-include $(src-perf)/arch/$(SRCARCH)/Makefile
 
 ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
   CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@ ifeq ($(DEBUG),0)
 endif
 
 INC_FLAGS += -I$(src-perf)/util/include
-INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include
+INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
 INC_FLAGS += -I$(srctree)/tools/include/uapi
 INC_FLAGS += -I$(srctree)/tools/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
 
 # $(obj-perf)      for generated common-cmds.h
 # $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@ ifndef NO_LIBELF
 
   ifndef NO_DWARF
     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
-      msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+      msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
       NO_DWARF := 1
     else
       CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@ ifndef NO_LIBELF
         CFLAGS += -DHAVE_BPF_PROLOGUE
         $(call detected,CONFIG_BPF_PROLOGUE)
       else
-        msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
+        msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
       endif
     else
       msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP
   endif
 endif
 
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
   ifndef NO_DWARF
     CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
   endif
@@ -487,7 +487,7 @@ else
 endif
 
 ifndef NO_LOCAL_LIBUNWIND
-  ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
+  ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
     $(call feature_check,libunwind-debug-frame)
     ifneq ($(feature-libunwind-debug-frame), 1)
       msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1)
       NO_PERF_READ_VDSO32 := 1
     endif
   endif
-  ifneq ($(ARCH), x86)
+  ifneq ($(SRCARCH), x86)
     NO_PERF_READ_VDSOX32 := 1
   endif
   ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@ ifdef LIBBABELTRACE
 endif
 
 ifndef NO_AUXTRACE
-  ifeq ($(ARCH),x86)
+  ifeq ($(SRCARCH),x86)
     ifeq ($(feature-get_cpuid), 0)
       msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
       NO_AUXTRACE := 1
@@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc
 ETC_PERFCONFIG = etc/perfconfig
 endif
 ifndef lib
-ifeq ($(ARCH)$(IS_64_BIT), x861)
+ifeq ($(SRCARCH)$(IS_64_BIT), x861)
 lib = lib64
 else
 lib = lib
index 79fe31f20a17644e416642bb9d3a213c97286479..5008f51a08a2118ca34723a87373acd231afb7c3 100644 (file)
@@ -226,7 +226,7 @@ endif
 
 ifeq ($(config),0)
 include $(srctree)/tools/scripts/Makefile.arch
--include arch/$(ARCH)/Makefile
+-include arch/$(SRCARCH)/Makefile
 endif
 
 # The FEATURE_DUMP_EXPORT holds location of the actual
index 109eb75cf7de4e97c855fe52c3d6437f454b5c8e..d9b6af837c7d392fce6168d9ce5ebe2b9a6b57d5 100644 (file)
@@ -1,2 +1,2 @@
 libperf-y += common.o
-libperf-y += $(ARCH)/
+libperf-y += $(SRCARCH)/
index 9213a1273697a801d6b75f4a8804424e68eb6ab7..999a4e8781621677821440e80ec8491689bbc6e3 100644 (file)
@@ -2,7 +2,7 @@ hostprogs := jevents
 
 jevents-y      += json.o jsmn.o jevents.o
 pmu-events-y   += pmu-events.o
-JDIR           =  pmu-events/arch/$(ARCH)
+JDIR           =  pmu-events/arch/$(SRCARCH)
 JSON           =  $(shell [ -d $(JDIR) ] &&                            \
                        find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
 #
@@ -10,4 +10,4 @@ JSON          =  $(shell [ -d $(JDIR) ] &&                            \
 # directory and create tables in pmu-events.c.
 #
 $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
-       $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+       $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
index af58ebc243ef635ff2ddc6efcd26b377cc24a30b..84222bdb8689203924fc8f6a75f0baac4531d82c 100644 (file)
@@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
        $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
        $(Q)echo ';' >> $@
 
-ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc))
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
 endif
 
index 32873ec91a4e1307b30884c1141fe0e8f1da154e..cf00ebad2ef5ccefabee6324e004fc9b4d2e601b 100644 (file)
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused)
 
        evsel = perf_evlist__first(evlist);
        evsel->attr.task = 1;
-       evsel->attr.sample_freq = 0;
+       evsel->attr.sample_freq = 1;
        evsel->attr.inherit = 0;
        evsel->attr.watermark = 0;
        evsel->attr.wakeup_events = 1;
index e4f7902d5afa62f0aecab1103bc05ca4bcb226e9..cda44b0e821c63baea1f9da52123184768fdd2f4 100644 (file)
@@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
        struct perf_evsel *evsel;
 
        event_attr_init(&attr);
+       /*
+        * Unnamed union member, not supported as struct member named
+        * initializer in older compilers such as gcc 4.4.7
+        *
+        * Just for probing the precise_ip:
+        */
+       attr.sample_period = 1;
 
        perf_event_attr__set_max_precise_ip(&attr);
+       /*
+        * Now let the usual logic to set up the perf_event_attr defaults
+        * to kick in when we return and before perf_evsel__open() is called.
+        */
+       attr.sample_period = 0;
 
        evsel = perf_evsel__new(&attr);
        if (evsel == NULL)
index 5cac8d5e009a88ff096d9e2f8026e39e8567c595..b5baff3007bbd477551cc2e62770e534d0919f33 100644 (file)
@@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
 
 /*
  * default get_cpuid(): nothing gets recorded
- * actual implementation must be in arch/$(ARCH)/util/header.c
+ * actual implementation must be in arch/$(SRCARCH)/util/header.c
  */
 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
 {
index da45c4be5fb3e77ee59131602667b4d675bc3a40..7755a5e0fe5eb290c41502bf22725cce8a0b6935 100644 (file)
@@ -178,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg)
        Dwarf_Addr pc;
        bool isactivation;
 
+       if (!dwfl_frame_pc(state, &pc, NULL)) {
+               pr_err("%s", dwfl_errmsg(-1));
+               return DWARF_CB_ABORT;
+       }
+
+       // report the module before we query for isactivation
+       report_module(pc, ui);
+
        if (!dwfl_frame_pc(state, &pc, &isactivation)) {
                pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
index 19d0604f86946824b04138e8bcaf29f3628ab808..487cbfb89beb5012816c7e4989b3f9ff4261995d 100644 (file)
@@ -1,23 +1,42 @@
 #ifndef __BPF_ENDIAN__
 #define __BPF_ENDIAN__
 
-#include <asm/byteorder.h>
+#include <linux/swab.h>
 
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-# define __bpf_ntohs(x)                __builtin_bswap16(x)
-# define __bpf_htons(x)                __builtin_bswap16(x)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-# define __bpf_ntohs(x)                (x)
-# define __bpf_htons(x)                (x)
+/* LLVM's BPF target selects the endianness of the CPU
+ * it compiles on, or the user specifies (bpfel/bpfeb),
+ * respectively. The used __BYTE_ORDER__ is defined by
+ * the compiler, we cannot rely on __BYTE_ORDER from
+ * libc headers, since it doesn't reflect the actual
+ * requested byte order.
+ *
+ * Note, LLVM's BPF target has different __builtin_bswapX()
+ * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
+ * in bpfel and bpfeb case, which means below, that we map
+ * to cpu_to_be16(). We could use it unconditionally in BPF
+ * case, but better not rely on it, so that this header here
+ * can be used from application and BPF program side, which
+ * use different targets.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define __bpf_ntohs(x)                        __builtin_bswap16(x)
+# define __bpf_htons(x)                        __builtin_bswap16(x)
+# define __bpf_constant_ntohs(x)       ___constant_swab16(x)
+# define __bpf_constant_htons(x)       ___constant_swab16(x)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define __bpf_ntohs(x)                        (x)
+# define __bpf_htons(x)                        (x)
+# define __bpf_constant_ntohs(x)       (x)
+# define __bpf_constant_htons(x)       (x)
 #else
-# error "Fix your __BYTE_ORDER?!"
+# error "Fix your compiler's __BYTE_ORDER__?!"
 #endif
 
 #define bpf_htons(x)                           \
        (__builtin_constant_p(x) ?              \
-        __constant_htons(x) : __bpf_htons(x))
+        __bpf_constant_htons(x) : __bpf_htons(x))
 #define bpf_ntohs(x)                           \
        (__builtin_constant_p(x) ?              \
-        __constant_ntohs(x) : __bpf_ntohs(x))
+        __bpf_constant_ntohs(x) : __bpf_ntohs(x))
 
-#endif
+#endif /* __BPF_ENDIAN__ */