]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 13 Aug 2017 22:34:28 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 13 Aug 2017 22:34:28 +0000 (15:34 -0700)
Pull MIPS fixes from Ralf Baechle:
 "Another round of MIPS fixes:

   - compressed boot: Ignore a generated .c file

   - VDSO: Fix a register clobber list

   - DECstation: Fix an int-handler.S CPU_DADDI_WORKAROUNDS regression

   - Octeon: Fix recent cleanups that cleaned away a bit too much thus
     breaking the arch side of the EDAC and USB drivers.

   - uasm: Fix duplicate const in "const struct foo const bar[]" which
     GCC 7.1 no longer accepts.

   - Fix race on setting and getting cpu_online_mask

   - Fix preemption issue. To do so cleanly introduce macro to get the
     size of L3 cache line.

   - Revert include cleanup that sometimes results in build error

   - MicroMIPS uses bit 0 of the PC to indicate microMIPS mode. Make
     sure this bit is set for kernel entry as well.

   - Prevent configuring the kernel for both microMIPS and MT. There are
     no such CPUs currently and thus the combination is unsupported and
     results in build errors.

  This has been sitting in linux-next for a few days and has survived
  automated testing by Imagination's test farm. No known regressions
  pending except a number of issues that crept up due to lots of people
  switching to GCC 7.1"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: Set ISA bit in entry-y for microMIPS kernels
  MIPS: Prevent building MT support for microMIPS kernels
  MIPS: PCI: Fix smp_processor_id() in preemptible
  MIPS: Introduce cpu_tcache_line_size
  MIPS: DEC: Fix an int-handler.S CPU_DADDI_WORKAROUNDS regression
  MIPS: VDSO: Fix clobber lists in fallback code paths
  Revert "MIPS: Don't unnecessarily include kmalloc.h into <asm/cache.h>."
  MIPS: OCTEON: Fix USB platform code breakage.
  MIPS: Octeon: Fix broken EDAC driver.
  MIPS: gitignore: ignore generated .c files
  MIPS: Fix race on setting and getting cpu_online_mask
  MIPS: mm: remove duplicate "const" qualifier on insn_table

276 files changed:
Documentation/fb/efifb.txt
Documentation/gpio/gpio-legacy.txt
MAINTAINERS
arch/arm/include/asm/tlb.h
arch/ia64/include/asm/tlb.h
arch/mips/net/ebpf_jit.c [new file with mode: 0644]
arch/powerpc/configs/powernv_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/platforms/powernv/idle.c
arch/s390/include/asm/tlb.h
arch/s390/net/bpf_jit_comp.c
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/spitfire.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/um/include/asm/tlb.h
arch/x86/include/asm/hypervisor.h
arch/x86/mm/init.c
arch/x86/xen/enlighten_hvm.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/device.h [deleted file]
arch/xtensa/include/asm/param.h [deleted file]
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/mm/cache.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio-integrity.c
block/blk-mq.c
drivers/acpi/spcr.c
drivers/base/firmware_class.c
drivers/block/sunvdc.c
drivers/block/zram/zram_drv.c
drivers/char/random.c
drivers/cpuidle/cpuidle-powernv.c
drivers/crypto/inside-secure/safexcel_hash.c
drivers/dma-buf/sync_file.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/stm/Kconfig
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/i2c-core-acpi.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core.h
drivers/i2c/muxes/Kconfig
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/sun4i-gpadc-iio.c
drivers/iio/adc/vf610_adc.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/light/tsl2563.c
drivers/iio/pressure/st_pressure_core.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/iommu/arm-smmu.c
drivers/isdn/hysdn/hysdn_proclog.c
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/nand/atmel/nand-controller.c
drivers/mtd/nand/atmel/pmecc.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_timings.c
drivers/mtd/nand/sunxi_nand.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/cpts.h
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/target/fc.c
drivers/pci/pci.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
drivers/pinctrl/zte/pinctrl-zx.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sg.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/iio/resolver/ad2s1210.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thunderbolt/eeprom.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/amba-pl011.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-pci.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/rcar3.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/storage/unusual_uas.h
drivers/usb/storage/usb.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/omap2/omapfb/dss/core.c
drivers/xen/events/events_base.c
drivers/xen/xenbus/xenbus_xs.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/nfs/Kconfig
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/nfs4proc.c
fs/proc/meminfo.c
fs/proc/task_mmu.c
fs/userfaultfd.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log_cil.c
include/asm-generic/tlb.h
include/linux/acpi.h
include/linux/cpuhotplug.h
include/linux/device.h
include/linux/i2c.h
include/linux/iio/common/st_sensors.h
include/linux/mlx4/device.h
include/linux/mlx5/qp.h
include/linux/mm_types.h
include/linux/mtd/nand.h
include/linux/nvme-fc-driver.h
include/linux/pci.h
include/linux/pinctrl/pinconf-generic.h
include/linux/platform_data/st_sensors_pdata.h
include/linux/ptp_clock_kernel.h
include/linux/sync_file.h
include/net/tcp.h
include/target/iscsi/iscsi_target_core.h
include/uapi/drm/msm_drm.h
kernel/fork.c
kernel/futex.c
kernel/power/snapshot.c
lib/fault-inject.c
lib/test_kmod.c
mm/balloon_compaction.c
mm/debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/util.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/core/dev.c
net/ipv4/af_inet.c
net/ipv4/cipso_ipv4.c
net/ipv4/fou.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/udp_offload.c
net/packet/af_packet.c
net/rds/ib_recv.c
net/sched/act_ipt.c
net/tipc/node.c
scripts/get_maintainer.pl
scripts/parse-maintainers.pl
tools/build/feature/test-bpf.c
tools/lib/bpf/bpf.c
tools/testing/selftests/bpf/test_pkt_md_access.c
tools/testing/selftests/bpf/test_verifier.c

index a59916c29b3312cd4946a1d9a8da2331819e7845..1a85c1bdaf38a9ae7fb8b6555afc30abae661a20 100644 (file)
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
        Macbook Pro 17", iMac 20" :
                video=efifb:i20
 
+Accepted options:
+
+nowc   Don't map the framebuffer write combined. This can be used
+       to workaround side-effects and slowdowns on other CPU cores
+       when large amounts of console data are written.
+
 --
 Edgar Hucek <gimli@dark-green.com>
index b34fd94f70898a7f65c2a0313349588411eb8e81..5eacc147ea870c80bb06c38d43bd5b662c171194 100644 (file)
@@ -459,7 +459,7 @@ pin controller?
 
 This is done by registering "ranges" of pins, which are essentially
 cross-reference tables. These are described in
-Documentation/pinctrl.txt
+Documentation/driver-api/pinctl.rst
 
 While the pin allocation is totally managed by the pinctrl subsystem,
 gpio (under gpiolib) is still maintained by gpio drivers. It may happen
index 44cb004c765d5bc3e9b71844b08fca7f204cae61..6f7721d1634c2eb7247538f2cb4d85fa1be1a458 100644 (file)
@@ -1161,7 +1161,7 @@ M:        Brendan Higgins <brendanhiggins@google.com>
 R:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 R:     Joel Stanley <joel@jms.id.au>
 L:     linux-i2c@vger.kernel.org
-L:     openbmc@lists.ozlabs.org
+L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/irqchip/irq-aspeed-i2c-ic.c
 F:     drivers/i2c/busses/i2c-aspeed.c
@@ -5834,7 +5834,7 @@ F:        drivers/staging/greybus/spi.c
 F:     drivers/staging/greybus/spilib.c
 F:     drivers/staging/greybus/spilib.h
 
-GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
+GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
 M:     Bryan O'Donoghue <pure.logic@nexus-software.ie>
 S:     Maintained
 F:     drivers/staging/greybus/loopback.c
@@ -10383,7 +10383,7 @@ L:      linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/
-F:     Documentation/pinctrl.txt
+F:     Documentation/driver-api/pinctl.rst
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
@@ -14004,6 +14004,7 @@ F:      drivers/block/virtio_blk.c
 F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
+F:     mm/balloon_compaction.c
 
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
index 3f2eb76243e3c5f9d387959acae740ce871e5afa..d5562f9ce60079139d360e5d6afac59469051454 100644 (file)
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->range_start = start;
+               tlb->range_end = end;
+       }
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index fced197b96264e01b20743e90706ed20cf30b242..cbe5ac3699bf0f9dbdfd726c112f6fc6bd1271f0 100644 (file)
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
  * collected.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force)
+               tlb->need_flush = 1;
        /*
         * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
         * tlb->end_addr.
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644 (file)
index 0000000..3f87b96
--- /dev/null
@@ -0,0 +1,1950 @@
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO    0
+#define MIPS_R_AT      1
+#define MIPS_R_V0      2       /* BPF_R0 */
+#define MIPS_R_V1      3
+#define MIPS_R_A0      4       /* BPF_R1 */
+#define MIPS_R_A1      5       /* BPF_R2 */
+#define MIPS_R_A2      6       /* BPF_R3 */
+#define MIPS_R_A3      7       /* BPF_R4 */
+#define MIPS_R_A4      8       /* BPF_R5 */
+#define MIPS_R_T4      12      /* BPF_AX */
+#define MIPS_R_T5      13
+#define MIPS_R_T6      14
+#define MIPS_R_T7      15
+#define MIPS_R_S0      16      /* BPF_R6 */
+#define MIPS_R_S1      17      /* BPF_R7 */
+#define MIPS_R_S2      18      /* BPF_R8 */
+#define MIPS_R_S3      19      /* BPF_R9 */
+#define MIPS_R_S4      20      /* BPF_TCC */
+#define MIPS_R_S5      21
+#define MIPS_R_S6      22
+#define MIPS_R_S7      23
+#define MIPS_R_T8      24
+#define MIPS_R_T9      25
+#define MIPS_R_SP      29
+#define MIPS_R_RA      31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0   BIT(0)
+#define EBPF_SAVE_S1   BIT(1)
+#define EBPF_SAVE_S2   BIT(2)
+#define EBPF_SAVE_S3   BIT(3)
+#define EBPF_SAVE_S4   BIT(4)
+#define EBPF_SAVE_RA   BIT(5)
+#define EBPF_SEEN_FP   BIT(6)
+#define EBPF_SEEN_TC   BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register.  The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+       /* uninitialized */
+       REG_UNKNOWN,
+       /* not known to be 32-bit compatible. */
+       REG_64BIT,
+       /* 32-bit compatible, no truncation needed for 64-bit ops. */
+       REG_64BIT_32BIT,
+       /* 32-bit compatible, need truncation for 64-bit ops. */
+       REG_32BIT,
+       /* 32-bit zero extended. */
+       REG_32BIT_ZERO_EX,
+       /* 32-bit no sign/zero extension needed. */
+       REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf:               The sk_filter
+ * @stack_size:                eBPF stack size
+ * @tmp_offset:                eBPF $sp offset to 8-byte temporary memory
+ * @idx:               Instruction index
+ * @flags:             JIT flags
+ * @offsets:           Instruction offsets
+ * @target:            Memory location for the compiled filter
+ * @reg_val_types      Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+       const struct bpf_prog *skf;
+       int stack_size;
+       int tmp_offset;
+       u32 idx;
+       u32 flags;
+       u32 *offsets;
+       u32 *target;
+       u64 *reg_val_types;
+       unsigned int long_b_conversion:1;
+       unsigned int gen_b_offsets:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+       *rvt &= ~(7ull << (reg * 3));
+       *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+                                         int index, int reg)
+{
+       return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...)                     \
+do {                                                   \
+       if ((ctx)->target != NULL) {                    \
+               u32 *p = &(ctx)->target[ctx->idx];      \
+               uasm_i_##func(&p, ##__VA_ARGS__);       \
+       }                                               \
+       (ctx)->idx++;                                   \
+} while (0)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+       unsigned long target_va, base_va;
+       unsigned int r;
+
+       if (!ctx->target)
+               return 0;
+
+       base_va = (unsigned long)ctx->target;
+       target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+       if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+               return (unsigned int)-1;
+       r = target_va & 0x0ffffffful;
+       return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+       if (!ctx->gen_b_offsets)
+               return 0;
+
+       /*
+        * We want a pc-relative branch.  tgt is the instruction offset
+        * we want to jump to.
+
+        * Branch on MIPS:
+        * I: target_offset <- sign_extend(offset)
+        * I+1: PC += target_offset (delay slot)
+        *
+        * ctx->idx currently points to the branch instruction
+        * but the offset is added to the delay slot so we need
+        * to subtract 4.
+        */
+       return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+               (ctx->idx * 4) - 4;
+}
+
+int bpf_jit_enable __read_mostly;
+
+enum which_ebpf_reg {
+       src_reg,
+       src_reg_no_fp,
+       dst_reg,
+       dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI.  We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
+                    enum which_ebpf_reg w)
+{
+       int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+               insn->src_reg : insn->dst_reg;
+
+       switch (ebpf_reg) {
+       case BPF_REG_0:
+               return MIPS_R_V0;
+       case BPF_REG_1:
+               return MIPS_R_A0;
+       case BPF_REG_2:
+               return MIPS_R_A1;
+       case BPF_REG_3:
+               return MIPS_R_A2;
+       case BPF_REG_4:
+               return MIPS_R_A3;
+       case BPF_REG_5:
+               return MIPS_R_A4;
+       case BPF_REG_6:
+               ctx->flags |= EBPF_SAVE_S0;
+               return MIPS_R_S0;
+       case BPF_REG_7:
+               ctx->flags |= EBPF_SAVE_S1;
+               return MIPS_R_S1;
+       case BPF_REG_8:
+               ctx->flags |= EBPF_SAVE_S2;
+               return MIPS_R_S2;
+       case BPF_REG_9:
+               ctx->flags |= EBPF_SAVE_S3;
+               return MIPS_R_S3;
+       case BPF_REG_10:
+               if (w == dst_reg || w == src_reg_no_fp)
+                       goto bad_reg;
+               ctx->flags |= EBPF_SEEN_FP;
+               /*
+                * Needs special handling, return something that
+                * cannot be clobbered just in case.
+                */
+               return MIPS_R_ZERO;
+       case BPF_REG_AX:
+               return MIPS_R_T4;
+       default:
+bad_reg:
+               WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+               return -EINVAL;
+       }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ *  Entry $sp ------>   +--------------------------------+
+ *                      |   $ra  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s0  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s1  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s2  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s3  (optional)              |
+ *                      +--------------------------------+
+ *                      |   $s4  (optional)              |
+ *                      +--------------------------------+
+ *                      |   tmp-storage  (if $ra saved)  |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ *                      |   BPF_REG_10 relative storage  |
+ *                      |    MAX_BPF_STACK (optional)    |
+ *                      |      .                         |
+ *                      |      .                         |
+ *                      |      .                         |
+ *     $sp -------->    +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+       int stack_adjust = 0;
+       int store_offset;
+       int locals_size;
+
+       if (ctx->flags & EBPF_SAVE_RA)
+               /*
+                * If RA we are doing a function call and may need
+                * extra 8-byte tmp area.
+                */
+               stack_adjust += 16;
+       if (ctx->flags & EBPF_SAVE_S0)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S1)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S2)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S3)
+               stack_adjust += 8;
+       if (ctx->flags & EBPF_SAVE_S4)
+               stack_adjust += 8;
+
+       BUILD_BUG_ON(MAX_BPF_STACK & 7);
+       locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+       stack_adjust += locals_size;
+       ctx->tmp_offset = locals_size;
+
+       ctx->stack_size = stack_adjust;
+
+       /*
+        * First instruction initializes the tail call count (TCC).
+        * On tail call we skip this instruction, and the TCC is
+        * passed in $v1 from the caller.
+        */
+       emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+       if (stack_adjust)
+               emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+       else
+               return 0;
+
+       store_offset = stack_adjust - 8;
+
+       if (ctx->flags & EBPF_SAVE_RA) {
+               emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S0) {
+               emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S1) {
+               emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S2) {
+               emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S3) {
+               emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S4) {
+               emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+
+       if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+               emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+       return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       int stack_adjust = ctx->stack_size;
+       int store_offset = stack_adjust - 8;
+       int r0 = MIPS_R_V0;
+
+       if (dest_reg == MIPS_R_RA &&
+           get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+               /* Don't let zero extended value escape. */
+               emit_instr(ctx, sll, r0, r0, 0);
+
+       if (ctx->flags & EBPF_SAVE_RA) {
+               emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S0) {
+               emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S1) {
+               emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S2) {
+               emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S3) {
+               emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       if (ctx->flags & EBPF_SAVE_S4) {
+               emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
+               store_offset -= 8;
+       }
+       emit_instr(ctx, jr, dest_reg);
+
+       if (stack_adjust)
+               emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+       else
+               emit_instr(ctx, nop);
+
+       return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+                          struct jit_ctx *ctx)
+{
+       if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+               emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+       } else {
+               int lower = (s16)(insn->imm & 0xffff);
+               int upper = insn->imm - lower;
+
+               emit_instr(ctx, lui, reg, upper >> 16);
+               emit_instr(ctx, addiu, reg, reg, lower);
+       }
+
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+                       int idx)
+{
+       int upper_bound, lower_bound;
+       int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+       if (dst < 0)
+               return dst;
+
+       switch (BPF_OP(insn->code)) {
+       case BPF_MOV:
+       case BPF_ADD:
+               upper_bound = S16_MAX;
+               lower_bound = S16_MIN;
+               break;
+       case BPF_SUB:
+               upper_bound = -(int)S16_MIN;
+               lower_bound = -(int)S16_MAX;
+               break;
+       case BPF_AND:
+       case BPF_OR:
+       case BPF_XOR:
+               upper_bound = 0xffff;
+               lower_bound = 0;
+               break;
+       case BPF_RSH:
+       case BPF_LSH:
+       case BPF_ARSH:
+               /* Shift amounts are truncated, no need for bounds */
+               upper_bound = S32_MAX;
+               lower_bound = S32_MIN;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /*
+        * Immediate move clobbers the register, so no sign/zero
+        * extension needed.
+        */
+       if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+           BPF_OP(insn->code) != BPF_MOV &&
+           get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+               emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+       /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+       if (BPF_CLASS(insn->code) == BPF_ALU &&
+           BPF_OP(insn->code) != BPF_LSH &&
+           BPF_OP(insn->code) != BPF_MOV &&
+           get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+               emit_instr(ctx, sll, dst, dst, 0);
+
+       if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+               /* single insn immediate case */
+               switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+               case BPF_ALU64 | BPF_MOV:
+                       emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_AND:
+               case BPF_ALU | BPF_AND:
+                       emit_instr(ctx, andi, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_OR:
+               case BPF_ALU | BPF_OR:
+                       emit_instr(ctx, ori, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_XOR:
+               case BPF_ALU | BPF_XOR:
+                       emit_instr(ctx, xori, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_ADD:
+                       emit_instr(ctx, daddiu, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_SUB:
+                       emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+                       break;
+               case BPF_ALU64 | BPF_RSH:
+                       emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_RSH:
+                       emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU64 | BPF_LSH:
+                       emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_LSH:
+                       emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU64 | BPF_ARSH:
+                       emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+                       break;
+               case BPF_ALU | BPF_ARSH:
+                       emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+                       break;
+               case BPF_ALU | BPF_MOV:
+                       emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+                       break;
+               case BPF_ALU | BPF_ADD:
+                       emit_instr(ctx, addiu, dst, dst, insn->imm);
+                       break;
+               case BPF_ALU | BPF_SUB:
+                       emit_instr(ctx, addiu, dst, dst, -insn->imm);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               /* multi insn immediate case */
+               if (BPF_OP(insn->code) == BPF_MOV) {
+                       gen_imm_to_reg(insn, dst, ctx);
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+                       case BPF_ALU64 | BPF_AND:
+                       case BPF_ALU | BPF_AND:
+                               emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_OR:
+                       case BPF_ALU | BPF_OR:
+                               emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_XOR:
+                       case BPF_ALU | BPF_XOR:
+                               emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_ADD:
+                               emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU64 | BPF_SUB:
+                               emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU | BPF_ADD:
+                               emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+                               break;
+                       case BPF_ALU | BPF_SUB:
+                               emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void * __must_check
+ool_skb_header_pointer(const struct sk_buff *skb, int offset,
+                      int len, void *buffer)
+{
+       return skb_header_pointer(skb, offset, len, buffer);
+}
+
+static int size_to_len(const struct bpf_insn *insn)
+{
+       switch (BPF_SIZE(insn->code)) {
+       case BPF_B:
+               return 1;
+       case BPF_H:
+               return 2;
+       case BPF_W:
+               return 4;
+       case BPF_DW:
+               return 8;
+       }
+       return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+       if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+               emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+       } else if (value >= 0xffffffff80000000ull ||
+                  (value < 0x80000000 && value > 0xffff)) {
+               emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+               emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+       } else {
+               int i;
+               bool seen_part = false;
+               int needed_shift = 0;
+
+               for (i = 0; i < 4; i++) {
+                       u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+                       if (seen_part && needed_shift > 0 && (part || i == 3)) {
+                               emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+                               needed_shift = 0;
+                       }
+                       if (part) {
+                               if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+                                       emit_instr(ctx, lui, dst, (s32)(s16)part);
+                                       needed_shift = -16;
+                               } else {
+                                       emit_instr(ctx, ori, dst,
+                                                  seen_part ? dst : MIPS_R_ZERO,
+                                                  (unsigned int)part);
+                               }
+                               seen_part = true;
+                       }
+                       if (seen_part)
+                               needed_shift += 16;
+               }
+       }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+       int off, b_off;
+
+       ctx->flags |= EBPF_SEEN_TC;
+       /*
+        * if (index >= array->map.max_entries)
+        *     goto out;
+        */
+       off = offsetof(struct bpf_array, map.max_entries);
+       emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+       emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+       /*
+        * if (--TCC < 0)
+        *     goto out;
+        */
+       /* Delay slot */
+       emit_instr(ctx, daddiu, MIPS_R_T5,
+                  (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+       /*
+        * prog = array->ptrs[index];
+        * if (prog == NULL)
+        *     goto out;
+        */
+       /* Delay slot */
+       emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+       emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+       off = offsetof(struct bpf_array, ptrs);
+       emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+       b_off = b_imm(this_idx + 1, ctx);
+       emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+       /* Delay slot */
+       emit_instr(ctx, nop);
+
+       /* goto *(prog->bpf_func + 4); */
+       off = offsetof(struct bpf_prog, bpf_func);
+       emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+       /* All systems are go... propagate TCC */
+       emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+       /* Skip first instruction (TCC initialization) */
+       emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+       return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool use_bbit_insns(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON:
+       case CPU_CAVIUM_OCTEON_PLUS:
+       case CPU_CAVIUM_OCTEON2:
+       case CPU_CAVIUM_OCTEON3:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_bad_offset(int b_off)
+{
+       return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+                         int this_idx, int exit_idx)
+{
+       int src, dst, r, td, ts, mem_off, b_off;
+       bool need_swap, did_move, cmp_eq;
+       unsigned int target;
+       u64 t64;
+       s64 t64s;
+
+       switch (insn->code) {
+       case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+       case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+       case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+               r = gen_imm_insn(insn, ctx, this_idx);
+               if (r < 0)
+                       return r;
+               break;
+       case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               if (insn->imm == 1) /* Mult by 1 is a nop */
+                       break;
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+               emit_instr(ctx, mflo, dst);
+               break;
+       case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+               break;
+       case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               if (insn->imm == 1) /* Mult by 1 is a nop */
+                       break;
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, multu, dst, MIPS_R_AT);
+               emit_instr(ctx, mflo, dst);
+               break;
+       case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+               break;
+       case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+       case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) { /* Div by zero */
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+               }
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               if (insn->imm == 1) {
+                       /* div by 1 is a nop, mod by 1 is zero */
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+                       break;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, divu, dst, MIPS_R_AT);
+               if (BPF_OP(insn->code) == BPF_DIV)
+                       emit_instr(ctx, mflo, dst);
+               else
+                       emit_instr(ctx, mfhi, dst);
+               break;
+       case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+       case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) { /* Div by zero */
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+               }
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+               if (insn->imm == 1) {
+                       /* div by 1 is a nop, mod by 1 is zero */
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+                       break;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+               if (BPF_OP(insn->code) == BPF_DIV)
+                       emit_instr(ctx, mflo, dst);
+               else
+                       emit_instr(ctx, mfhi, dst);
+               break;
+       case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+       case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+               src = ebpf_to_mips_reg(ctx, insn, src_reg);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+               did_move = false;
+               if (insn->src_reg == BPF_REG_10) {
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+                               did_move = true;
+                       } else {
+                               emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+                               src = MIPS_R_AT;
+                       }
+               } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                       int tmp_reg = MIPS_R_AT;
+
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               tmp_reg = dst;
+                               did_move = true;
+                       }
+                       emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+                       emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+                       src = MIPS_R_AT;
+               }
+               switch (BPF_OP(insn->code)) {
+               case BPF_MOV:
+                       if (!did_move)
+                               emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+                       break;
+               case BPF_ADD:
+                       emit_instr(ctx, daddu, dst, dst, src);
+                       break;
+               case BPF_SUB:
+                       emit_instr(ctx, dsubu, dst, dst, src);
+                       break;
+               case BPF_XOR:
+                       emit_instr(ctx, xor, dst, dst, src);
+                       break;
+               case BPF_OR:
+                       emit_instr(ctx, or, dst, dst, src);
+                       break;
+               case BPF_AND:
+                       emit_instr(ctx, and, dst, dst, src);
+                       break;
+               case BPF_MUL:
+                       emit_instr(ctx, dmultu, dst, src);
+                       emit_instr(ctx, mflo, dst);
+                       break;
+               case BPF_DIV:
+               case BPF_MOD:
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+                       emit_instr(ctx, ddivu, dst, src);
+                       if (BPF_OP(insn->code) == BPF_DIV)
+                               emit_instr(ctx, mflo, dst);
+                       else
+                               emit_instr(ctx, mfhi, dst);
+                       break;
+               case BPF_LSH:
+                       emit_instr(ctx, dsllv, dst, dst, src);
+                       break;
+               case BPF_RSH:
+                       emit_instr(ctx, dsrlv, dst, dst, src);
+                       break;
+               case BPF_ARSH:
+                       emit_instr(ctx, dsrav, dst, dst, src);
+                       break;
+               default:
+                       pr_err("ALU64_REG NOT HANDLED\n");
+                       return -EINVAL;
+               }
+               break;
+       case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+       case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+               did_move = false;
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+                       int tmp_reg = MIPS_R_AT;
+
+                       if (BPF_OP(insn->code) == BPF_MOV) {
+                               tmp_reg = dst;
+                               did_move = true;
+                       }
+                       /* sign extend */
+                       emit_instr(ctx, sll, tmp_reg, src, 0);
+                       src = MIPS_R_AT;
+               }
+               switch (BPF_OP(insn->code)) {
+               case BPF_MOV:
+                       if (!did_move)
+                               emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+                       break;
+               case BPF_ADD:
+                       emit_instr(ctx, addu, dst, dst, src);
+                       break;
+               case BPF_SUB:
+                       emit_instr(ctx, subu, dst, dst, src);
+                       break;
+               case BPF_XOR:
+                       emit_instr(ctx, xor, dst, dst, src);
+                       break;
+               case BPF_OR:
+                       emit_instr(ctx, or, dst, dst, src);
+                       break;
+               case BPF_AND:
+                       emit_instr(ctx, and, dst, dst, src);
+                       break;
+               case BPF_MUL:
+                       emit_instr(ctx, mul, dst, dst, src);
+                       break;
+               case BPF_DIV:
+               case BPF_MOD:
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+                       emit_instr(ctx, divu, dst, src);
+                       if (BPF_OP(insn->code) == BPF_DIV)
+                               emit_instr(ctx, mflo, dst);
+                       else
+                               emit_instr(ctx, mfhi, dst);
+                       break;
+               case BPF_LSH:
+                       emit_instr(ctx, sllv, dst, dst, src);
+                       break;
+               case BPF_RSH:
+                       emit_instr(ctx, srlv, dst, dst, src);
+                       break;
+               default:
+                       pr_err("ALU_REG NOT HANDLED\n");
+                       return -EINVAL;
+               }
+               break;
+       case BPF_JMP | BPF_EXIT:
+               if (this_idx + 1 < exit_idx) {
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, nop);
+               }
+               break;
+       case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+       case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+               cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+               if (insn->imm == 0) {
+                       src = MIPS_R_ZERO;
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       src = MIPS_R_AT;
+               }
+               goto jeq_common;
+       case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+       case BPF_JMP | BPF_JNE | BPF_X:
+       case BPF_JMP | BPF_JSGT | BPF_X:
+       case BPF_JMP | BPF_JSGE | BPF_X:
+       case BPF_JMP | BPF_JGT | BPF_X:
+       case BPF_JMP | BPF_JGE | BPF_X:
+       case BPF_JMP | BPF_JSET | BPF_X:
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (src < 0 || dst < 0)
+                       return -EINVAL;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (td == REG_32BIT && ts != REG_32BIT) {
+                       emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+                       src = MIPS_R_AT;
+               } else if (ts == REG_32BIT && td != REG_32BIT) {
+                       emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+                       dst = MIPS_R_AT;
+               }
+               if (BPF_OP(insn->code) == BPF_JSET) {
+                       emit_instr(ctx, and, MIPS_R_AT, dst, src);
+                       cmp_eq = false;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JSGT) {
+                       emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_instr(ctx, blez, MIPS_R_AT, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               } else if (BPF_OP(insn->code) == BPF_JSGE) {
+                       emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JGT) {
+                       /* dst or src could be AT */
+                       emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+                       emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+                       /* SP known to be non-zero, movz becomes boolean not */
+                       emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
+                       emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+                       emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else if (BPF_OP(insn->code) == BPF_JGE) {
+                       emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+                       cmp_eq = true;
+                       dst = MIPS_R_AT;
+                       src = MIPS_R_ZERO;
+               } else { /* JNE/JEQ case */
+                       cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
+               }
+jeq_common:
+               /*
+                * If the next insn is EXIT and we are jumping arround
+                * only it, invert the sense of the compare and
+                * conditionally jump to the exit.  Poor man's branch
+                * chaining.
+                */
+               if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                       b_off = b_imm(exit_idx, ctx);
+                       if (is_bad_offset(b_off)) {
+                               target = j_target(ctx, exit_idx);
+                               if (target == (unsigned int)-1)
+                                       return -E2BIG;
+                               cmp_eq = !cmp_eq;
+                               b_off = 4 * 3;
+                               if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                                       ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                                       ctx->long_b_conversion = 1;
+                               }
+                       }
+
+                       if (cmp_eq)
+                               emit_instr(ctx, bne, dst, src, b_off);
+                       else
+                               emit_instr(ctx, beq, dst, src, b_off);
+                       emit_instr(ctx, nop);
+                       if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+                               emit_instr(ctx, j, target);
+                               emit_instr(ctx, nop);
+                       }
+                       return 2; /* We consumed the exit. */
+               }
+               b_off = b_imm(this_idx + insn->off + 1, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, this_idx + insn->off + 1);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+                       cmp_eq = !cmp_eq;
+                       b_off = 4 * 3;
+                       if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                               ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                               ctx->long_b_conversion = 1;
+                       }
+               }
+
+               if (cmp_eq)
+                       emit_instr(ctx, beq, dst, src, b_off);
+               else
+                       emit_instr(ctx, bne, dst, src, b_off);
+               emit_instr(ctx, nop);
+               if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+                       emit_instr(ctx, j, target);
+                       emit_instr(ctx, nop);
+               }
+               break;
+       case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+       case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+               cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+
+               if (insn->imm == 0) {
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               if (cmp_eq)
+                                       emit_instr(ctx, bltz, dst, b_off);
+                               else
+                                       emit_instr(ctx, blez, dst, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       if (cmp_eq)
+                               emit_instr(ctx, bgez, dst, b_off);
+                       else
+                               emit_instr(ctx, bgtz, dst, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               }
+               /*
+                * only "LT" compare available, so we must use imm + 1
+                * to generate "GT"
+                */
+               t64s = insn->imm + (cmp_eq ? 0 : 1);
+               if (t64s >= S16_MIN && t64s <= S16_MAX) {
+                       emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+                       src = MIPS_R_AT;
+                       dst = MIPS_R_ZERO;
+                       cmp_eq = true;
+                       goto jeq_common;
+               }
+               emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+               emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = true;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JGT | BPF_K:
+       case BPF_JMP | BPF_JGE | BPF_K:
+               cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+               /*
+                * only "LT" compare available, so we must use imm + 1
+                * to generate "GT"
+                */
+               t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
+               if (t64s >= 0 && t64s <= S16_MAX) {
+                       emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
+                       src = MIPS_R_AT;
+                       dst = MIPS_R_ZERO;
+                       cmp_eq = true;
+                       goto jeq_common;
+               }
+               emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+               emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = true;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+               if (dst < 0)
+                       return dst;
+
+               if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
+                       if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+                               b_off = b_imm(exit_idx, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+                               emit_instr(ctx, nop);
+                               return 2; /* We consumed the exit. */
+                       }
+                       b_off = b_imm(this_idx + insn->off + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+                       emit_instr(ctx, nop);
+                       break;
+               }
+               t64 = (u32)insn->imm;
+               emit_const_to_reg(ctx, MIPS_R_AT, t64);
+               emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+               src = MIPS_R_AT;
+               dst = MIPS_R_ZERO;
+               cmp_eq = false;
+               goto jeq_common;
+
+       case BPF_JMP | BPF_JA:
+               /*
+                * Prefer relative branch for easier debugging, but
+                * fall back if needed.
+                */
+               b_off = b_imm(this_idx + insn->off + 1, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, this_idx + insn->off + 1);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+                       emit_instr(ctx, j, target);
+               } else {
+                       emit_instr(ctx, b, b_off);
+               }
+               emit_instr(ctx, nop);
+               break;
+       case BPF_LD | BPF_DW | BPF_IMM:
+               if (insn->src_reg != 0)
+                       return -EINVAL;
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+               emit_const_to_reg(ctx, dst, t64);
+               return 2; /* Double slot insn */
+
+       case BPF_JMP | BPF_CALL:
+               ctx->flags |= EBPF_SAVE_RA;
+               t64s = (s64)insn->imm + (s64)__bpf_call_base;
+               emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+               emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+               /* delay slot */
+               emit_instr(ctx, nop);
+               break;
+
+       case BPF_JMP | BPF_TAIL_CALL:
+               if (emit_bpf_tail_call(ctx, this_idx))
+                       return -EINVAL;
+               break;
+
+       case BPF_LD | BPF_B | BPF_ABS:
+       case BPF_LD | BPF_H | BPF_ABS:
+       case BPF_LD | BPF_W | BPF_ABS:
+       case BPF_LD | BPF_DW | BPF_ABS:
+               ctx->flags |= EBPF_SAVE_RA;
+
+               gen_imm_to_reg(insn, MIPS_R_A1, ctx);
+               emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+
+               if (insn->imm < 0) {
+                       emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
+               } else {
+                       emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+                       emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+               }
+               goto ld_skb_common;
+
+       case BPF_LD | BPF_B | BPF_IND:
+       case BPF_LD | BPF_H | BPF_IND:
+       case BPF_LD | BPF_W | BPF_IND:
+       case BPF_LD | BPF_DW | BPF_IND:
+               ctx->flags |= EBPF_SAVE_RA;
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               if (src < 0)
+                       return src;
+               ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+               if (ts == REG_32BIT_ZERO_EX) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, MIPS_R_A1, src, 0);
+                       src = MIPS_R_A1;
+               }
+               if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+                       emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
+               } else {
+                       gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+                       emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
+               }
+               /* truncate to 32-bit int */
+               emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
+               emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+               emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
+
+               emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
+               emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+               emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+               emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
+
+ld_skb_common:
+               emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+               /* delay slot move */
+               emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
+
+               /* Check the error value */
+               b_off = b_imm(exit_idx, ctx);
+               if (is_bad_offset(b_off)) {
+                       target = j_target(ctx, exit_idx);
+                       if (target == (unsigned int)-1)
+                               return -E2BIG;
+
+                       if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+                               ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+                               ctx->long_b_conversion = 1;
+                       }
+                       emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
+                       emit_instr(ctx, nop);
+                       emit_instr(ctx, j, target);
+                       emit_instr(ctx, nop);
+               } else {
+                       emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
+                       emit_instr(ctx, nop);
+               }
+
+#ifdef __BIG_ENDIAN
+               need_swap = false;
+#else
+               need_swap = true;
+#endif
+               dst = MIPS_R_V0;
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
+                       if (need_swap)
+                               emit_instr(ctx, wsbh, dst, dst);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
+                       if (need_swap) {
+                               emit_instr(ctx, wsbh, dst, dst);
+                               emit_instr(ctx, rotr, dst, dst, 16);
+                       }
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
+                       if (need_swap) {
+                               emit_instr(ctx, dsbh, dst, dst);
+                               emit_instr(ctx, dshd, dst, dst);
+                       }
+                       break;
+               }
+
+               break;
+       case BPF_ALU | BPF_END | BPF_FROM_BE:
+       case BPF_ALU | BPF_END | BPF_FROM_LE:
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+               if (insn->imm == 64 && td == REG_32BIT)
+                       emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+               if (insn->imm != 64 &&
+                   (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+                       /* sign extend */
+                       emit_instr(ctx, sll, dst, dst, 0);
+               }
+
+#ifdef __BIG_ENDIAN
+               need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+               need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+               if (insn->imm == 16) {
+                       if (need_swap)
+                               emit_instr(ctx, wsbh, dst, dst);
+                       emit_instr(ctx, andi, dst, dst, 0xffff);
+               } else if (insn->imm == 32) {
+                       if (need_swap) {
+                               emit_instr(ctx, wsbh, dst, dst);
+                               emit_instr(ctx, rotr, dst, dst, 16);
+                       }
+               } else { /* 64-bit*/
+                       if (need_swap) {
+                               emit_instr(ctx, dsbh, dst, dst);
+                               emit_instr(ctx, dshd, dst, dst);
+                       }
+               }
+               break;
+
+       case BPF_ST | BPF_B | BPF_MEM:
+       case BPF_ST | BPF_H | BPF_MEM:
+       case BPF_ST | BPF_W | BPF_MEM:
+       case BPF_ST | BPF_DW | BPF_MEM:
+               if (insn->dst_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       dst = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+                       if (dst < 0)
+                               return dst;
+                       mem_off = insn->off;
+               }
+               gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+                       break;
+               }
+               break;
+
+       case BPF_LDX | BPF_B | BPF_MEM:
+       case BPF_LDX | BPF_H | BPF_MEM:
+       case BPF_LDX | BPF_W | BPF_MEM:
+       case BPF_LDX | BPF_DW | BPF_MEM:
+               if (insn->src_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       src = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+                       if (src < 0)
+                               return src;
+                       mem_off = insn->off;
+               }
+               dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+               if (dst < 0)
+                       return dst;
+               switch (BPF_SIZE(insn->code)) {
+               case BPF_B:
+                       emit_instr(ctx, lbu, dst, mem_off, src);
+                       break;
+               case BPF_H:
+                       emit_instr(ctx, lhu, dst, mem_off, src);
+                       break;
+               case BPF_W:
+                       emit_instr(ctx, lw, dst, mem_off, src);
+                       break;
+               case BPF_DW:
+                       emit_instr(ctx, ld, dst, mem_off, src);
+                       break;
+               }
+               break;
+
+       case BPF_STX | BPF_B | BPF_MEM:
+       case BPF_STX | BPF_H | BPF_MEM:
+       case BPF_STX | BPF_W | BPF_MEM:
+       case BPF_STX | BPF_DW | BPF_MEM:
+       case BPF_STX | BPF_W | BPF_XADD:
+       case BPF_STX | BPF_DW | BPF_XADD:
+               if (insn->dst_reg == BPF_REG_10) {
+                       ctx->flags |= EBPF_SEEN_FP;
+                       dst = MIPS_R_SP;
+                       mem_off = insn->off + MAX_BPF_STACK;
+               } else {
+                       dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+                       if (dst < 0)
+                               return dst;
+                       mem_off = insn->off;
+               }
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               if (src < 0)
+                       return dst;
+               if (BPF_MODE(insn->code) == BPF_XADD) {
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_W:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+                               emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+                               /*
+                                * On failure back up to LL (-4
+                                * instructions of 4 bytes each
+                                */
+                               emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+                               emit_instr(ctx, nop);
+                               break;
+                       case BPF_DW:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+                                       emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+                               emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+                               emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+                               emit_instr(ctx, nop);
+                               break;
+                       }
+               } else { /* BPF_MEM */
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_B:
+                               emit_instr(ctx, sb, src, mem_off, dst);
+                               break;
+                       case BPF_H:
+                               emit_instr(ctx, sh, src, mem_off, dst);
+                               break;
+                       case BPF_W:
+                               emit_instr(ctx, sw, src, mem_off, dst);
+                               break;
+                       case BPF_DW:
+                               if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+                                       emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+                                       emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+                                       src = MIPS_R_AT;
+                               }
+                               emit_instr(ctx, sd, src, mem_off, dst);
+                               break;
+                       }
+               }
+               break;
+
+       default:
+               pr_err("NOT HANDLED %d - (%02x)\n",
+                      this_idx, (unsigned int)insn->code);
+               return -EINVAL;
+       }
+       return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       const struct bpf_insn *insn;
+       int i, r;
+
+       for (i = 0; i < prog->len; ) {
+               insn = prog->insnsi + i;
+               if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+                       /* dead instruction, don't emit it. */
+                       i++;
+                       continue;
+               }
+
+               if (ctx->target == NULL)
+                       ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+               r = build_one_insn(insn, ctx, i, prog->len);
+               if (r < 0)
+                       return r;
+               i += r;
+       }
+       /* epilogue offset */
+       if (ctx->target == NULL)
+               ctx->offsets[i] = ctx->idx * 4;
+
+       /*
+        * All exits have an offset of the epilogue, some offsets may
+        * not have been set due to banch-around threading, so set
+        * them now.
+        */
+       if (ctx->target == NULL)
+               for (i = 0; i < prog->len; i++) {
+                       insn = prog->insnsi + i;
+                       if (insn->code == (BPF_JMP | BPF_EXIT))
+                               ctx->offsets[i] = ctx->idx * 4;
+               }
+       return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+                                  int start_idx, bool follow_taken)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       const struct bpf_insn *insn;
+       u64 exit_rvt = initial_rvt;
+       u64 *rvt = ctx->reg_val_types;
+       int idx;
+       int reg;
+
+       for (idx = start_idx; idx < prog->len; idx++) {
+               rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+               insn = prog->insnsi + idx;
+               switch (BPF_CLASS(insn->code)) {
+               case BPF_ALU:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD:
+                       case BPF_SUB:
+                       case BPF_MUL:
+                       case BPF_DIV:
+                       case BPF_OR:
+                       case BPF_AND:
+                       case BPF_LSH:
+                       case BPF_RSH:
+                       case BPF_NEG:
+                       case BPF_MOD:
+                       case BPF_XOR:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       case BPF_MOV:
+                               if (BPF_SRC(insn->code)) {
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               } else {
+                                       /* IMM to REG move*/
+                                       if (insn->imm >= 0)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               }
+                               break;
+                       case BPF_END:
+                               if (insn->imm == 64)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               else if (insn->imm == 32)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               else /* insn->imm == 16 */
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_ALU64:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_MOV:
+                               if (BPF_SRC(insn->code)) {
+                                       /* REG to REG move*/
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               } else {
+                                       /* IMM to REG move*/
+                                       if (insn->imm >= 0)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+                               }
+                               break;
+                       default:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_LD:
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_DW:
+                               if (BPF_MODE(insn->code) == BPF_IMM) {
+                                       s64 val;
+
+                                       val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+                                       if (val > 0 && val <= S32_MAX)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                                       else if (val >= S32_MIN && val <= S32_MAX)
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+                                       else
+                                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                                       rvt[idx] |= RVT_DONE;
+                                       idx++;
+                               } else {
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               }
+                               break;
+                       case BPF_B:
+                       case BPF_H:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       case BPF_W:
+                               if (BPF_MODE(insn->code) == BPF_IMM)
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg,
+                                                        insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+                               else
+                                       set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_LDX:
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_DW:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+                               break;
+                       case BPF_B:
+                       case BPF_H:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+                               break;
+                       case BPF_W:
+                               set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+                               break;
+                       }
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               case BPF_JMP:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_EXIT:
+                               rvt[idx] = RVT_DONE | exit_rvt;
+                               rvt[prog->len] = exit_rvt;
+                               return idx;
+                       case BPF_JA:
+                               rvt[idx] |= RVT_DONE;
+                               idx += insn->off;
+                               break;
+                       case BPF_JEQ:
+                       case BPF_JGT:
+                       case BPF_JGE:
+                       case BPF_JSET:
+                       case BPF_JNE:
+                       case BPF_JSGT:
+                       case BPF_JSGE:
+                               if (follow_taken) {
+                                       rvt[idx] |= RVT_BRANCH_TAKEN;
+                                       idx += insn->off;
+                                       follow_taken = false;
+                               } else {
+                                       rvt[idx] |= RVT_FALL_THROUGH;
+                               }
+                               break;
+                       case BPF_CALL:
+                               set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+                               /* Upon call return, argument registers are clobbered. */
+                               for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+                                       set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+                               rvt[idx] |= RVT_DONE;
+                               break;
+                       default:
+                               WARN(1, "Unhandled BPF_JMP case.\n");
+                               rvt[idx] |= RVT_DONE;
+                               break;
+                       }
+                       break;
+               default:
+                       rvt[idx] |= RVT_DONE;
+                       break;
+               }
+       }
+       return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn.  This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+       const struct bpf_prog *prog = ctx->skf;
+       u64 exit_rvt;
+       int reg;
+       int i;
+
+       /*
+        * 11 registers * 3 bits/reg leaves top bits free for other
+        * uses.  Bit-62..63 used to see if we have visited an insn.
+        */
+       exit_rvt = 0;
+
+       /* Upon entry, argument registers are 64-bit. */
+       for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+               set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+       /*
+        * First follow all conditional branches on the fall-through
+        * edge of control flow..
+        */
+       reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+       /*
+        * Then repeatedly find the first conditional branch where
+        * both edges of control flow have not been taken, and follow
+        * the branch taken edge.  We will end up restarting the
+        * search once per conditional branch insn.
+        */
+       for (i = 0; i < prog->len; i++) {
+               u64 rvt = ctx->reg_val_types[i];
+
+               if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+                   (rvt & RVT_VISITED_MASK) == 0)
+                       continue;
+               if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+                       reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+               } else { /* RVT_BRANCH_TAKEN */
+                       WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+                       reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+               }
+               goto restart_search;
+       }
+       /*
+        * Eventually all conditional branches have been followed on
+        * both branches and we are done.  Any insn that has not been
+        * visited at this point is dead.
+        */
+
+       return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+       u32 *p;
+
+       /* We are guaranteed to have aligned memory. */
+       for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+               uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+       struct bpf_prog *orig_prog = prog;
+       bool tmp_blinded = false;
+       struct bpf_prog *tmp;
+       struct bpf_binary_header *header = NULL;
+       struct jit_ctx ctx;
+       unsigned int image_size;
+       u8 *image_ptr;
+
+       if (!bpf_jit_enable || !cpu_has_mips64r2)
+               return prog;
+
+       tmp = bpf_jit_blind_constants(prog);
+       /* If blinding was requested and we failed during blinding,
+        * we must fall back to the interpreter.
+        */
+       if (IS_ERR(tmp))
+               return orig_prog;
+       if (tmp != prog) {
+               tmp_blinded = true;
+               prog = tmp;
+       }
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+       if (ctx.offsets == NULL)
+               goto out_err;
+
+       ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+       if (ctx.reg_val_types == NULL)
+               goto out_err;
+
+       ctx.skf = prog;
+
+       if (reg_val_propagate(&ctx))
+               goto out_err;
+
+       /*
+        * First pass discovers used resources and instruction offsets
+        * assuming short branches are used.
+        */
+       if (build_int_body(&ctx))
+               goto out_err;
+
+       /*
+        * If no calls are made (EBPF_SAVE_RA), then tail call count
+        * in $v1, else we must save in n$s4.
+        */
+       if (ctx.flags & EBPF_SEEN_TC) {
+               if (ctx.flags & EBPF_SAVE_RA)
+                       ctx.flags |= EBPF_SAVE_S4;
+               else
+                       ctx.flags |= EBPF_TCC_IN_V1;
+       }
+
+       /*
+        * Second pass generates offsets, if any branches are out of
+        * range a jump-around long sequence is generated, and we have
+        * to try again from the beginning to generate the new
+        * offsets.  This is done until no additional conversions are
+        * necessary.
+        */
+       do {
+               ctx.idx = 0;
+               ctx.gen_b_offsets = 1;
+               ctx.long_b_conversion = 0;
+               if (gen_int_prologue(&ctx))
+                       goto out_err;
+               if (build_int_body(&ctx))
+                       goto out_err;
+               if (build_int_epilogue(&ctx, MIPS_R_RA))
+                       goto out_err;
+       } while (ctx.long_b_conversion);
+
+       image_size = 4 * ctx.idx;
+
+       header = bpf_jit_binary_alloc(image_size, &image_ptr,
+                                     sizeof(u32), jit_fill_hole);
+       if (header == NULL)
+               goto out_err;
+
+       ctx.target = (u32 *)image_ptr;
+
+       /* Third pass generates the code */
+       ctx.idx = 0;
+       if (gen_int_prologue(&ctx))
+               goto out_err;
+       if (build_int_body(&ctx))
+               goto out_err;
+       if (build_int_epilogue(&ctx, MIPS_R_RA))
+               goto out_err;
+
+       /* Update the icache */
+       flush_icache_range((unsigned long)ctx.target,
+                          (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+
+       if (bpf_jit_enable > 1)
+               /* Dump JIT code */
+               bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+       bpf_jit_binary_lock_ro(header);
+       prog->bpf_func = (void *)ctx.target;
+       prog->jited = 1;
+       prog->jited_len = image_size;
+out_normal:
+       if (tmp_blinded)
+               bpf_jit_prog_release_other(prog, prog == orig_prog ?
+                                          tmp : orig_prog);
+       kfree(ctx.offsets);
+       kfree(ctx.reg_val_types);
+
+       return prog;
+
+out_err:
+       prog = orig_prog;
+       if (header)
+               bpf_jit_binary_free(header);
+       goto out_normal;
+}
index 0695ce047d565199e4501333fa41ece48cdf9e45..34fc9bbfca9e68d6372e1d34b79ebf95d978e685 100644 (file)
@@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 5175028c56ce74e3e50a2b30eabccf7b87ed8f0e..c5246d29f3859965316bd4d48e4e816283439bf0 100644 (file)
@@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
index 1a61aa20dfbac9d5072ae83ef90640b8be380bd3..fd5d98a0b95c7b1ae5fda56892c2ecd43ea29f3a 100644 (file)
@@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 49d8422767b4de686ec0ee64fbf69ac415f05003..e925c1c99c71cab982967e7f7df6325e3135506f 100644 (file)
@@ -223,17 +223,27 @@ system_call_exit:
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    .Lsyscall_exit_work
 
-       /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
-       li      r7,MSR_FP
+       andi.   r0,r8,MSR_FP
+       beq 2f
 #ifdef CONFIG_ALTIVEC
-       oris    r7,r7,MSR_VEC@h
+       andis.  r0,r8,MSR_VEC@h
+       bne     3f
 #endif
-       and     r0,r8,r7
-       cmpd    r0,r7
-       bne     .Lsyscall_restore_math
-.Lsyscall_restore_math_cont:
+2:     addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+       li      r10,MSR_RI
+       mtmsrd  r10,1           /* Restore RI */
+#endif
+       bl      restore_math
+#ifdef CONFIG_PPC_BOOK3S
+       li      r11,0
+       mtmsrd  r11,1
+#endif
+       ld      r8,_MSR(r1)
+       ld      r3,RESULT(r1)
+       li      r11,-MAX_ERRNO
 
-       cmpld   r3,r11
+3:     cmpld   r3,r11
        ld      r5,_CCR(r1)
        bge-    .Lsyscall_error
 .Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        std     r5,_CCR(r1)
        b       .Lsyscall_error_cont
 
-.Lsyscall_restore_math:
-       /*
-        * Some initial tests from restore_math to avoid the heavyweight
-        * C code entry and MSR manipulations.
-        */
-       LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
-       and.    r0,r0,r8
-       bne     1f
-
-       ld      r7,PACACURRENT(r13)
-       lbz     r0,THREAD+THREAD_LOAD_FP(r7)
-#ifdef CONFIG_ALTIVEC
-       lbz     r6,THREAD+THREAD_LOAD_VEC(r7)
-       add     r0,r0,r6
-#endif
-       cmpdi   r0,0
-       beq     .Lsyscall_restore_math_cont
-
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
-       li      r10,MSR_RI
-       mtmsrd  r10,1           /* Restore RI */
-#endif
-       bl      restore_math
-#ifdef CONFIG_PPC_BOOK3S
-       li      r11,0
-       mtmsrd  r11,1
-#endif
-       /* Restore volatiles, reload MSR from updated one */
-       ld      r8,_MSR(r1)
-       ld      r3,RESULT(r1)
-       li      r11,-MAX_ERRNO
-       b       .Lsyscall_restore_math_cont
-
 /* Traced system call support */
 .Lsyscall_dotrace:
        bl      save_nvgprs
index 9f3e2c932dccc1c3a1158fc174a8cf57e63dd75d..ec480966f9bf55f17184537f64e7d10c40c723c0 100644 (file)
@@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
 {
        unsigned long msr;
 
-       /*
-        * Syscall exit makes a similar initial check before branching
-        * to restore_math. Keep them in synch.
-        */
        if (!msr_tm_active(regs->msr) &&
                !current->thread.load_fp && !loadvec(current->thread))
                return;
index cf0e1245b8cc1c78948a4004be2d20c5b5ac0b78..8d3320562c70f3ef7308645fb7b805fc14794e42 100644 (file)
@@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
        hard_irq_disable();
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
                raw_local_irq_restore(*flags);
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
                raw_local_irq_save(*flags);
                hard_irq_disable();
        }
@@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
 static void nmi_ipi_lock(void)
 {
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 }
 
 static void nmi_ipi_unlock(void)
@@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
        nmi_ipi_lock_start(&flags);
        while (nmi_ipi_busy_count) {
                nmi_ipi_unlock_end(&flags);
-               cpu_relax();
+               spin_until_cond(nmi_ipi_busy_count == 0);
                nmi_ipi_lock_start(&flags);
        }
 
index b67f8b03a32d0f12ce29eeb4ac3be0a97384fe72..34721a257a770c450baac0288f8006c94ff1975b 100644 (file)
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
         * This may be called from low level interrupt handlers at some
         * point in future.
         */
-       local_irq_save(*flags);
-       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
-               cpu_relax();
+       raw_local_irq_save(*flags);
+       hard_irq_disable(); /* Make it soft-NMI safe */
+       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
+               raw_local_irq_restore(*flags);
+               spin_until_cond(!test_bit(0, &__wd_smp_lock));
+               raw_local_irq_save(*flags);
+               hard_irq_disable();
+       }
 }
 
 static inline void wd_smp_unlock(unsigned long *flags)
 {
        clear_bit_unlock(0, &__wd_smp_lock);
-       local_irq_restore(*flags);
+       raw_local_irq_restore(*flags);
 }
 
 static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
                nmi_panic(regs, "Hard LOCKUP");
 }
 
-static void set_cpu_stuck(int cpu, u64 tb)
+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
 {
-       cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
-       cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+       cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
+       cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
        if (cpumask_empty(&wd_smp_cpus_pending)) {
                wd_smp_last_reset_tb = tb;
                cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
                                &wd_smp_cpus_stuck);
        }
 }
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+       set_cpumask_stuck(cpumask_of(cpu), tb);
+}
 
 static void watchdog_smp_panic(int cpu, u64 tb)
 {
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
        }
        smp_flush_nmi_ipi(1000000);
 
-       /* Take the stuck CPU out of the watch group */
-       for_each_cpu(c, &wd_smp_cpus_pending)
-               set_cpu_stuck(c, tb);
+       /* Take the stuck CPUs out of the watch group */
+       set_cpumask_stuck(&wd_smp_cpus_pending, tb);
 
-out:
        wd_smp_unlock(&flags);
 
        printk_safe_flush();
@@ -152,6 +159,11 @@ out:
 
        if (hardlockup_panic)
                nmi_panic(NULL, "Hard LOCKUP");
+
+       return;
+
+out:
+       wd_smp_unlock(&flags);
 }
 
 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
 
 void arch_touch_nmi_watchdog(void)
 {
+       unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
        int cpu = smp_processor_id();
 
-       watchdog_timer_interrupt(cpu);
+       if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
+               watchdog_timer_interrupt(cpu);
 }
 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
 
@@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
 
 static int start_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
                WARN_ON(1);
                return 0;
@@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
        if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
                return 0;
 
+       wd_smp_lock(&flags);
        cpumask_set_cpu(cpu, &wd_cpus_enabled);
        if (cpumask_weight(&wd_cpus_enabled) == 1) {
                cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
                wd_smp_last_reset_tb = get_tb();
        }
-       smp_wmb();
+       wd_smp_unlock(&flags);
+
        start_watchdog_timer_on(cpu);
 
        return 0;
@@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
 
 static int stop_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
                return 0; /* Can happen in CPU unplug case */
 
        stop_watchdog_timer_on(cpu);
 
+       wd_smp_lock(&flags);
        cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+       wd_smp_unlock(&flags);
+
        wd_smp_clear_cpu_pending(cpu, get_tb());
 
        return 0;
index 2abee070373fb3a8b757b8d3cb269e5d0b89dff6..a553aeea7af683812ba2f5a80d65e97cda163919 100644 (file)
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
  */
 static u64 pnv_deepest_stop_psscr_val;
 static u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_flag;
 static bool deepest_stop_found;
 
 static int pnv_save_sprs_for_deep_states(void)
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
 
        update_subcore_sibling_mask();
 
-       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
-               pnv_save_sprs_for_deep_states();
+       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+               int rc = pnv_save_sprs_for_deep_states();
+
+               if (likely(!rc))
+                       return;
+
+               /*
+                * The stop-api is unable to restore hypervisor
+                * resources on wakeup from platform idle states which
+                * lose full context. So disable such states.
+                */
+               supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+               pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+               pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+               if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+                   (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+                       /*
+                        * Use the default stop state for CPU-Hotplug
+                        * if available.
+                        */
+                       if (default_stop_found) {
+                               pnv_deepest_stop_psscr_val =
+                                       pnv_default_stop_val;
+                               pnv_deepest_stop_psscr_mask =
+                                       pnv_default_stop_mask;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+                                       pnv_deepest_stop_psscr_val);
+                       } else { /* Fallback to snooze loop for CPU-Hotplug */
+                               deepest_stop_found = false;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+                       }
+               }
+       }
 }
 
 u32 pnv_get_supported_cpuidle_states(void)
@@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
                                                pnv_deepest_stop_psscr_val;
                srr1 = power9_idle_stop(psscr);
 
-       } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+       } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
+                  (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
                srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
        } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
                   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
                        max_residency_ns = residency_ns[i];
                        pnv_deepest_stop_psscr_val = psscr_val[i];
                        pnv_deepest_stop_psscr_mask = psscr_mask[i];
+                       pnv_deepest_stop_flag = flags[i];
                        deepest_stop_found = true;
                }
 
index 7317b3108a88859a91523c45f1e52c08cb22fdc4..2eb8ff0d6fca443543c32ac80ff690b4b67be1ef 100644 (file)
@@ -47,10 +47,9 @@ struct mmu_table_batch {
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
-                                 struct mm_struct *mm,
-                                 unsigned long start,
-                                 unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
-                                 unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+       }
+
        tlb_flush_mmu(tlb);
 }
 
index 01c6fbc3e85b62fdec83bacea0f76a88126bfe84..1803797fc885cf799337b5d61f30ae726628b8d6 100644 (file)
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
                insn_count = bpf_jit_insn(jit, fp, i);
                if (insn_count < 0)
                        return -1;
-               jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+               /* Next instruction address */
+               jit->addrs[i + insn_count] = jit->prg;
        }
        bpf_jit_epilogue(jit);
 
index 46e0d635e36f711aff9a88c45955905d7fbf3cc2..51a8bc967e75f1e3c96a70783e9da439310edbcb 100644 (file)
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
-       if (tlb->fullmm)
+       if (tlb->fullmm || force)
                flush_tlb_mm(tlb->mm);
 
        /* keep the page table cache within bounds */
index 1d8321c827a8821bb4e9f4989eb883cd761370db..1b1286d0506910c0f9a92ab6af14e272dd008d61 100644 (file)
 #define SUN4V_CHIP_NIAGARA5    0x05
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
+#define SUN4V_CHIP_SPARC_M8    0x08
 #define SUN4V_CHIP_SPARC64X    0x8a
 #define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
+/*
+ * The following CPU_ID_xxx constants are used
+ * to identify the CPU type in the setup phase
+ * (see head_64.S)
+ */
+#define CPU_ID_NIAGARA1                ('1')
+#define CPU_ID_NIAGARA2                ('2')
+#define CPU_ID_NIAGARA3                ('3')
+#define CPU_ID_NIAGARA4                ('4')
+#define CPU_ID_NIAGARA5                ('5')
+#define CPU_ID_M6              ('6')
+#define CPU_ID_M7              ('7')
+#define CPU_ID_M8              ('8')
+#define CPU_ID_SONOMA1         ('N')
+
 #ifndef __ASSEMBLY__
 
 enum ultra_tlb_layout {
index 493e023a468a919c61d77451e43e0a4a2e414bbe..ef4f18f7a67402ed8baceb2ea05ee7f6368cc404 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_M8:
+               sparc_cpu_type = "SPARC-M8";
+               sparc_fpu_type = "SPARC-M8 integrated FPU";
+               sparc_pmu_type = "sparc-m8";
+               break;
+
        case SUN4V_CHIP_SPARC_SN:
                sparc_cpu_type = "SPARC-SN";
                sparc_fpu_type = "SPARC-SN integrated FPU";
index 45c820e1cba5d949ff936f15392ca3c0c8578a34..90d550bbfeefe484f1560940f111235f26332d7a 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
index 41a4073286671eff51f275bfca4ae6d9d01db74d..78e0211753d28f14f955af865704248b1e5daf24 100644 (file)
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
         nop
 
 70:    ldub    [%g1 + 7], %g2
-       cmp     %g2, '3'
+       cmp     %g2, CPU_ID_NIAGARA3
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA3, %g4
-       cmp     %g2, '4'
+       cmp     %g2, CPU_ID_NIAGARA4
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA4, %g4
-       cmp     %g2, '5'
+       cmp     %g2, CPU_ID_NIAGARA5
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA5, %g4
-       cmp     %g2, '6'
+       cmp     %g2, CPU_ID_M6
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M6, %g4
-       cmp     %g2, '7'
+       cmp     %g2, CPU_ID_M7
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
-       cmp     %g2, 'N'
+       cmp     %g2, CPU_ID_M8
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_M8, %g4
+       cmp     %g2, CPU_ID_SONOMA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
 91:    sethi   %hi(prom_cpu_compatible), %g1
        or      %g1, %lo(prom_cpu_compatible), %g1
        ldub    [%g1 + 17], %g2
-       cmp     %g2, '1'
+       cmp     %g2, CPU_ID_NIAGARA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA1, %g4
-       cmp     %g2, '2'
+       cmp     %g2, CPU_ID_NIAGARA2
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA2, %g4
        
@@ -600,6 +603,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_M8
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_SN
index 4d9c3e13c15056b5d60e7ccd266b36cfe29d2c00..150ee7d4b059a69e174dff7c7d16ff906f73e1ed 100644 (file)
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
-           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
+
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
+       case SUN4V_CHIP_SPARC_SN:
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
+               break;
+       default:
+               break;
+       }
 
        sun4v_hvapi_init();
 }
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
index fed73f14aa49befee59b93b0fcab02f65f7e10d2..afa0099f374852e0cf093088d942512008a45a68 100644 (file)
@@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
                        break;
                case SUN4V_CHIP_SPARC_M7:
                case SUN4V_CHIP_SPARC_SN:
-               default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
                        sparc64_va_hole_bottom = 0x0008000000000000UL;
                        max_phys_bits = 49;
                        break;
+               case SUN4V_CHIP_SPARC_M8:
+               default:
+                       /* M8 and later support 54-bit virtual addresses.
+                        * However, restricting M8 and above VA bits to 53
+                        * as 4-level page table cannot support more than
+                        * 53 VA bits.
+                        */
+                       sparc64_va_hole_top =    0xfff0000000000000UL;
+                       sparc64_va_hole_bottom = 0x0010000000000000UL;
+                       max_phys_bits = 51;
+                       break;
                }
        }
 
@@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
@@ -2313,6 +2324,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
index 600a2e9bfee2feea2a6dbc8b91d2a5a872d9d8d3..344d95619d0334659e6f4a9f3a5bff70ae95f67c 100644 (file)
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+               tlb->need_flush = 1;
+       }
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index 21126155a739f4a3495499f052abcd3eab5a0bb7..0ead9dbb91301d0f7f8923dcf33f25515bd182b8 100644 (file)
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
 
        /* pin current vcpu to specified physical cpu (run rarely) */
        void            (*pin_vcpu)(int);
+
+       /* called during init_mem_mapping() to setup early mappings. */
+       void            (*init_mem_mapping)(void);
 };
 
 extern const struct hypervisor_x86 *x86_hyper;
@@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
 extern void init_hypervisor_platform(void);
 extern bool hypervisor_x2apic_available(void);
 extern void hypervisor_pin_vcpu(int cpu);
+
+static inline void hypervisor_init_mem_mapping(void)
+{
+       if (x86_hyper && x86_hyper->init_mem_mapping)
+               x86_hyper->init_mem_mapping();
+}
 #else
 static inline void init_hypervisor_platform(void) { }
 static inline bool hypervisor_x2apic_available(void) { return false; }
+static inline void hypervisor_init_mem_mapping(void) { }
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
index 673541eb3b3f16c8c029349d597d67f4bb83a77a..bf3f1065d6addb88b898ba3a86089cccff6ed15e 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/dma.h>           /* for MAX_DMA_PFN */
 #include <asm/microcode.h>
 #include <asm/kaslr.h>
+#include <asm/hypervisor.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
+       hypervisor_init_mem_mapping();
+
        early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
 }
 
index 87d791356ea9052f79f23e00f5d0024ff8cf0b7f..de503c225ae1f194b10c71b44528ad2a2a7a4c0d 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/setup.h>
 #include <asm/hypervisor.h>
 #include <asm/e820/api.h>
+#include <asm/early_ioremap.h>
 
 #include <asm/xen/cpuid.h>
 #include <asm/xen/hypervisor.h>
 #include "mmu.h"
 #include "smp.h"
 
-void __ref xen_hvm_init_shared_info(void)
+static unsigned long shared_info_pfn;
+
+void xen_hvm_init_shared_info(void)
 {
        struct xen_add_to_physmap xatp;
-       u64 pa;
-
-       if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
-               /*
-                * Search for a free page starting at 4kB physical address.
-                * Low memory is preferred to avoid an EPT large page split up
-                * by the mapping.
-                * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
-                * the BIOS used for HVM guests is well behaved and won't
-                * clobber memory other than the first 4kB.
-                */
-               for (pa = PAGE_SIZE;
-                    !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
-                    memblock_is_reserved(pa);
-                    pa += PAGE_SIZE)
-                       ;
-
-               memblock_reserve(pa, PAGE_SIZE);
-               HYPERVISOR_shared_info = __va(pa);
-       }
 
        xatp.domid = DOMID_SELF;
        xatp.idx = 0;
        xatp.space = XENMAPSPACE_shared_info;
-       xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+       xatp.gpfn = shared_info_pfn;
        if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
                BUG();
 }
 
+static void __init reserve_shared_info(void)
+{
+       u64 pa;
+
+       /*
+        * Search for a free page starting at 4kB physical address.
+        * Low memory is preferred to avoid an EPT large page split up
+        * by the mapping.
+        * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
+        * the BIOS used for HVM guests is well behaved and won't
+        * clobber memory other than the first 4kB.
+        */
+       for (pa = PAGE_SIZE;
+            !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
+            memblock_is_reserved(pa);
+            pa += PAGE_SIZE)
+               ;
+
+       shared_info_pfn = PHYS_PFN(pa);
+
+       memblock_reserve(pa, PAGE_SIZE);
+       HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
+}
+
+static void __init xen_hvm_init_mem_mapping(void)
+{
+       early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
+       HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
+}
+
 static void __init init_hvm_pv_info(void)
 {
        int major, minor;
@@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
 
        init_hvm_pv_info();
 
+       reserve_shared_info();
        xen_hvm_init_shared_info();
 
        /*
@@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
        .init_platform          = xen_hvm_guest_init,
        .pin_vcpu               = xen_pin_vcpu,
        .x2apic_available       = xen_x2apic_para_available,
+       .init_mem_mapping       = xen_hvm_init_mem_mapping,
 };
 EXPORT_SYMBOL(x86_hyper_xen_hvm);
index 2d716ebc5a5e90d62c3759e01a512e0ff6ab9de4..dff7cc39437caba214fac506f57009837f2223c8 100644 (file)
@@ -1,5 +1,6 @@
 generic-y += bug.h
 generic-y += clkdev.h
+generic-y += device.h
 generic-y += div64.h
 generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
@@ -17,6 +18,7 @@ generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += param.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += rwsem.h
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
deleted file mode 100644 (file)
index 1deeb8e..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#ifndef _ASM_XTENSA_DEVICE_H
-#define _ASM_XTENSA_DEVICE_H
-
-struct dev_archdata {
-};
-
-struct pdev_archdata {
-};
-
-#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h
deleted file mode 100644 (file)
index 0a70e78..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/param.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _XTENSA_PARAM_H
-#define _XTENSA_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ            CONFIG_HZ       /* internal timer frequency */
-# define USER_HZ       100             /* for user interfaces in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ)      /* frequnzy at which times() counts */
-#endif /* _XTENSA_PARAM_H */
index d159e9b9c01837ba5aa9e77d50c3c14e475ce367..672391003e40fac4f814fe5b6aa1dc04e791c81b 100644 (file)
@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
 }
 EXPORT_SYMBOL(__sync_fetch_and_or_4);
 
-#ifdef CONFIG_NET
 /*
  * Networking support
  */
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif /* CONFIG_NET */
 
 /*
  * Architecture-specific symbols
index 1a804a2f9a5be6212c57febc01f6d28f47b8c91a..3c75c4e597da8f086f65de51201e0d37d6672733 100644 (file)
@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
        clear_page_alias(kvaddr, paddr);
        preempt_enable();
 }
+EXPORT_SYMBOL(clear_user_highpage);
 
 void copy_user_highpage(struct page *dst, struct page *src,
                        unsigned long vaddr, struct vm_area_struct *vma)
@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
        copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
        preempt_enable();
 }
-
-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(copy_user_highpage);
 
 /*
  * Any time the kernel writes to a user page cache page, or it is about to
@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
 
        /* There shouldn't be an entry in the cache for this page anymore. */
 }
-
+EXPORT_SYMBOL(flush_dcache_page);
 
 /*
  * For now, flush the whole cache. FIXME??
@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
        __flush_invalidate_dcache_all();
        __invalidate_icache_all();
 }
+EXPORT_SYMBOL(local_flush_cache_range);
 
 /* 
  * Remove any entry in the cache for this page. 
@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
        __flush_invalidate_dcache_page_alias(virt, phys);
        __invalidate_icache_page_alias(virt, phys);
 }
+EXPORT_SYMBOL(local_flush_cache_page);
 
-#endif
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
 
 void
 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 
        flush_tlb_page(vma, addr);
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
        if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
                unsigned long phys = page_to_phys(page);
@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
  * flush_dcache_page() on the page.
  */
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                unsigned long vaddr, void *dst, const void *src,
index 63e771ab56d80ade8cd5b5e8ccd86f2fac5f0573..859f0a8c97c8a1bf58b6ed4dc016c0ce0fb355ad 100644 (file)
@@ -71,17 +71,29 @@ struct bfq_service_tree {
  *
  * bfq_sched_data is the basic scheduler queue.  It supports three
  * ioprio_classes, and can be used either as a toplevel queue or as an
- * intermediate queue on a hierarchical setup.  @next_in_service
- * points to the active entity of the sched_data service trees that
- * will be scheduled next. It is used to reduce the number of steps
- * needed for each hierarchical-schedule update.
+ * intermediate queue in a hierarchical setup.
  *
  * The supported ioprio_classes are the same as in CFQ, in descending
  * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
  * Requests from higher priority queues are served before all the
  * requests from lower priority queues; among requests of the same
  * queue requests are served according to B-WF2Q+.
- * All the fields are protected by the queue lock of the containing bfqd.
+ *
+ * The schedule is implemented by the service trees, plus the field
+ * @next_in_service, which points to the entity on the active trees
+ * that will be served next, if 1) no changes in the schedule occurs
+ * before the current in-service entity is expired, 2) the in-service
+ * queue becomes idle when it expires, and 3) if the entity pointed by
+ * in_service_entity is not a queue, then the in-service child entity
+ * of the entity pointed by in_service_entity becomes idle on
+ * expiration. This peculiar definition allows for the following
+ * optimization, not yet exploited: while a given entity is still in
+ * service, we already know which is the best candidate for next
+ * service among the other active entitities in the same parent
+ * entity. We can then quickly compare the timestamps of the
+ * in-service entity with those of such best candidate.
+ *
+ * All fields are protected by the lock of the containing bfqd.
  */
 struct bfq_sched_data {
        /* entity in service */
index 979f8f21b7e2b17268b2db3c9510c333fccbe10f..911aa7431dbeb08d6db591c1a0f577bd3af56e22 100644 (file)
@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
 
 /*
  * This function tells whether entity stops being a candidate for next
- * service, according to the following logic.
+ * service, according to the restrictive definition of the field
+ * next_in_service. In particular, this function is invoked for an
+ * entity that is about to be set in service.
  *
- * This function is invoked for an entity that is about to be set in
- * service. If such an entity is a queue, then the entity is no longer
- * a candidate for next service (i.e, a candidate entity to serve
- * after the in-service entity is expired). The function then returns
- * true.
+ * If entity is a queue, then the entity is no longer a candidate for
+ * next service according to the that definition, because entity is
+ * about to become the in-service queue. This function then returns
+ * true if entity is a queue.
  *
- * In contrast, the entity could stil be a candidate for next service
- * if it is not a queue, and has more than one child. In fact, even if
- * one of its children is about to be set in service, other children
- * may still be the next to serve. As a consequence, a non-queue
- * entity is not a candidate for next-service only if it has only one
- * child. And only if this condition holds, then the function returns
- * true for a non-queue entity.
+ * In contrast, entity could still be a candidate for next service if
+ * it is not a queue, and has more than one active child. In fact,
+ * even if one of its children is about to be set in service, other
+ * active children may still be the next to serve, for the parent
+ * entity, even according to the above definition. As a consequence, a
+ * non-queue entity is not a candidate for next-service only if it has
+ * only one active child. And only if this condition holds, then this
+ * function returns true for a non-queue entity.
  */
 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
 {
@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
 
        bfqg = container_of(entity, struct bfq_group, entity);
 
+       /*
+        * The field active_entities does not always contain the
+        * actual number of active children entities: it happens to
+        * not account for the in-service entity in case the latter is
+        * removed from its active tree (which may get done after
+        * invoking the function bfq_no_longer_next_in_service in
+        * bfq_get_next_queue). Fortunately, here, i.e., while
+        * bfq_no_longer_next_in_service is not yet completed in
+        * bfq_get_next_queue, bfq_active_extract has not yet been
+        * invoked, and thus active_entities still coincides with the
+        * actual number of active entities.
+        */
        if (bfqg->active_entities == 1)
                return true;
 
@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
  * one of its children receives a new request.
  *
  * Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possible extracting it
+ * inserts entity into its active tree, ater possibly extracting it
  * from its idle tree.
  */
 static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
                entity->start = entity->finish;
                /*
                 * In addition, if the entity had more than one child
-                * when set in service, then was not extracted from
+                * when set in service, then it was not extracted from
                 * the active tree. This implies that the position of
                 * the entity in the active tree may need to be
                 * changed now, because we have just updated the start
@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
                 * time in a moment (the requeueing is then, more
                 * precisely, a repositioning in this case). To
                 * implement this repositioning, we: 1) dequeue the
-                * entity here, 2) update the finish time and
-                * requeue the entity according to the new
-                * timestamps below.
+                * entity here, 2) update the finish time and requeue
+                * the entity according to the new timestamps below.
                 */
                if (entity->tree)
                        bfq_active_extract(st, entity);
@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
 
 
 /**
- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
- *                      and activate, requeue or reposition all ancestors
- *                      for which such an update becomes necessary.
+ * bfq_activate_requeue_entity - activate or requeue an entity representing a
+ *                              bfq_queue, and activate, requeue or reposition
+ *                              all ancestors for which such an update becomes
+ *                              necessary.
  * @entity: the entity to activate.
  * @non_blocking_wait_rq: true if this entity was waiting for a request
  * @requeue: true if this is a requeue, which implies that bfqq is
@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
  * @ins_into_idle_tree: if false, the entity will not be put into the
  *                     idle tree.
  *
- * Deactivates an entity, independently from its previous state.  Must
+ * Deactivates an entity, independently of its previous state.  Must
  * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it on the idle
+ * from that tree, and if necessary and allowed, puts it into the idle
  * tree.
  */
 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
        st = bfq_entity_service_tree(entity);
        is_in_service = entity == sd->in_service_entity;
 
-       if (is_in_service)
+       if (is_in_service) {
                bfq_calc_finish(entity, entity->service);
+               sd->in_service_entity = NULL;
+       }
 
        if (entity->tree == &st->active)
                bfq_active_extract(st, entity);
@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
 /**
  * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
  * @entity: the entity to deactivate.
- * @ins_into_idle_tree: true if the entity can be put on the idle tree
+ * @ins_into_idle_tree: true if the entity can be put into the idle tree
  */
 static void bfq_deactivate_entity(struct bfq_entity *entity,
                                  bool ins_into_idle_tree,
@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
                         */
                        bfq_update_next_in_service(sd, NULL);
 
-               if (sd->next_in_service)
+               if (sd->next_in_service || sd->in_service_entity) {
                        /*
-                        * The parent entity is still backlogged,
-                        * because next_in_service is not NULL. So, no
-                        * further upwards deactivation must be
-                        * performed.  Yet, next_in_service has
-                        * changed.  Then the schedule does need to be
-                        * updated upwards.
+                        * The parent entity is still active, because
+                        * either next_in_service or in_service_entity
+                        * is not NULL. So, no further upwards
+                        * deactivation must be performed.  Yet,
+                        * next_in_service has changed. Then the
+                        * schedule does need to be updated upwards.
+                        *
+                        * NOTE If in_service_entity is not NULL, then
+                        * next_in_service may happen to be NULL,
+                        * although the parent entity is evidently
+                        * active. This happens if 1) the entity
+                        * pointed by in_service_entity is the only
+                        * active entity in the parent entity, and 2)
+                        * according to the definition of
+                        * next_in_service, the in_service_entity
+                        * cannot be considered as
+                        * next_in_service. See the comments on the
+                        * definition of next_in_service for details.
                         */
                        break;
+               }
 
                /*
                 * If we get here, then the parent is no more
@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
 
                /*
                 * If entity is no longer a candidate for next
-                * service, then we extract it from its active tree,
-                * for the following reason. To further boost the
-                * throughput in some special case, BFQ needs to know
-                * which is the next candidate entity to serve, while
-                * there is already an entity in service. In this
-                * respect, to make it easy to compute/update the next
-                * candidate entity to serve after the current
-                * candidate has been set in service, there is a case
-                * where it is necessary to extract the current
-                * candidate from its service tree. Such a case is
-                * when the entity just set in service cannot be also
-                * a candidate for next service. Details about when
-                * this conditions holds are reported in the comments
-                * on the function bfq_no_longer_next_in_service()
-                * invoked below.
+                * service, then it must be extracted from its active
+                * tree, so as to make sure that it won't be
+                * considered when computing next_in_service. See the
+                * comments on the function
+                * bfq_no_longer_next_in_service() for details.
                 */
                if (bfq_no_longer_next_in_service(entity))
                        bfq_active_extract(bfq_entity_service_tree(entity),
                                           entity);
 
                /*
-                * For the same reason why we may have just extracted
-                * entity from its active tree, we may need to update
-                * next_in_service for the sched_data of entity too,
-                * regardless of whether entity has been extracted.
-                * In fact, even if entity has not been extracted, a
-                * descendant entity may get extracted. Such an event
-                * would cause a change in next_in_service for the
-                * level of the descendant entity, and thus possibly
-                * back to upper levels.
+                * Even if entity is not to be extracted according to
+                * the above check, a descendant entity may get
+                * extracted in one of the next iterations of this
+                * loop. Such an event could cause a change in
+                * next_in_service for the level of the descendant
+                * entity, and thus possibly back to this level.
                 *
-                * We cannot perform the resulting needed update
-                * before the end of this loop, because, to know which
-                * is the correct next-to-serve candidate entity for
-                * each level, we need first to find the leaf entity
-                * to set in service. In fact, only after we know
-                * which is the next-to-serve leaf entity, we can
-                * discover whether the parent entity of the leaf
-                * entity becomes the next-to-serve, and so on.
+                * However, we cannot perform the resulting needed
+                * update of next_in_service for this level before the
+                * end of the whole loop, because, to know which is
+                * the correct next-to-serve candidate entity for each
+                * level, we need first to find the leaf entity to set
+                * in service. In fact, only after we know which is
+                * the next-to-serve leaf entity, we can discover
+                * whether the parent entity of the leaf entity
+                * becomes the next-to-serve, and so on.
                 */
-
        }
 
        bfqq = bfq_entity_to_bfqq(entity);
index 83e92beb3c9feb25f3be917e2b675f90b1544b5f..9b1ea478577b033195f34b457c85ad16eb759c6a 100644 (file)
@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
  */
 bool __bio_integrity_endio(struct bio *bio)
 {
-       if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
-               struct bio_integrity_payload *bip = bio_integrity(bio);
+       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct bio_integrity_payload *bip = bio_integrity(bio);
 
+       if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
+           (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
                INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
                queue_work(kintegrityd_wq, &bip->bip_work);
                return false;
index 041f7b7fa0d6def444e9349b6cf748afc8e89b2d..535cbdf32aabb28de64e7613e748bfa7a73ade6a 100644 (file)
@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        struct elevator_queue *e = q->elevator;
        struct request *rq;
        unsigned int tag;
+       struct blk_mq_ctx *local_ctx = NULL;
 
        blk_queue_enter_live(q);
        data->q = q;
        if (likely(!data->ctx))
-               data->ctx = blk_mq_get_ctx(q);
+               data->ctx = local_ctx = blk_mq_get_ctx(q);
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
        if (op & REQ_NOWAIT)
@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
        tag = blk_mq_get_tag(data);
        if (tag == BLK_MQ_TAG_FAIL) {
+               if (local_ctx) {
+                       blk_mq_put_ctx(local_ctx);
+                       data->ctx = NULL;
+               }
                blk_queue_exit(q);
                return NULL;
        }
@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
-       blk_mq_put_ctx(alloc_data.ctx);
-       blk_queue_exit(q);
-
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
+       blk_mq_put_ctx(alloc_data.ctx);
+       blk_queue_exit(q);
+
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
-       blk_queue_exit(q);
-
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
+       blk_queue_exit(q);
+
        return rq;
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@@ -679,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
                                    unsigned long msecs)
 {
-       kblockd_schedule_delayed_work(&q->requeue_work,
-                                     msecs_to_jiffies(msecs));
+       kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
+                                   msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
index 4ac3e06b41d846440d35079dfa54885111eec0df..98aa8c808a3346dc559e2f419ae4378dec075ffc 100644 (file)
 #include <linux/kernel.h>
 #include <linux/serial_core.h>
 
+/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
 /*
  * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
  * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
                goto done;
        }
 
-       if (qdf2400_erratum_44_present(&table->header))
-               uart = "qdf2400_e44";
+       /*
+        * If the E44 erratum is required, then we need to tell the pl011
+        * driver to implement the work-around.
+        *
+        * The global variable is used by the probe function when it
+        * creates the UARTs, whether or not they're used as a console.
+        *
+        * If the user specifies "traditional" earlycon, the qdf2400_e44
+        * console name matches the EARLYCON_DECLARE() statement, and
+        * SPCR is not used.  Parameter "earlycon" is false.
+        *
+        * If the user specifies "SPCR" earlycon, then we need to update
+        * the console name so that it also says "qdf2400_e44".  Parameter
+        * "earlycon" is true.
+        *
+        * For consistency, if we change the console name, then we do it
+        * for everyone, not just earlycon.
+        */
+       if (qdf2400_erratum_44_present(&table->header)) {
+               qdf2400_e44_present = true;
+               if (earlycon)
+                       uart = "qdf2400_e44";
+       }
+
        if (xgene_8250_erratum_present(table))
                iotype = "mmio32";
 
index b9f907eedbf770ee32359468d2b8d07e57bde667..bfbe1e15412889dfb40e699af7c3623a06d83253 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/syscore_ops.h>
 #include <linux/reboot.h>
 #include <linux/security.h>
-#include <linux/swait.h>
 
 #include <generated/utsrelease.h>
 
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
  * state of the firmware loading.
  */
 struct fw_state {
-       struct swait_queue_head wq;
+       struct completion completion;
        enum fw_status status;
 };
 
 static void fw_state_init(struct fw_state *fw_st)
 {
-       init_swait_queue_head(&fw_st->wq);
+       init_completion(&fw_st->completion);
        fw_st->status = FW_STATUS_UNKNOWN;
 }
 
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
 {
        long ret;
 
-       ret = swait_event_interruptible_timeout(fw_st->wq,
-                               __fw_state_is_done(READ_ONCE(fw_st->status)),
-                               timeout);
+       ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
        if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
                return -ENOENT;
        if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
        WRITE_ONCE(fw_st->status, status);
 
        if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
-               swake_up(&fw_st->wq);
+               complete_all(&fw_st->completion);
 }
 
 #define fw_state_start(fw_st)                                  \
        __fw_state_set(fw_st, FW_STATUS_LOADING)
 #define fw_state_done(fw_st)                                   \
        __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st)                                        \
+       __fw_state_set(fw_st, FW_STATUS_ABORTED)
 #define fw_state_wait(fw_st)                                   \
        __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
 
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st)     false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
 {
        return fw_st->status == status;
 }
 
+#define fw_state_is_aborted(fw_st)                             \
+       __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
 #define fw_state_aborted(fw_st)                                        \
        __fw_state_set(fw_st, FW_STATUS_ABORTED)
 #define fw_state_is_done(fw_st)                                        \
        __fw_state_check(fw_st, FW_STATUS_DONE)
 #define fw_state_is_loading(fw_st)                             \
        __fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st)                             \
-       __fw_state_check(fw_st, FW_STATUS_ABORTED)
 #define fw_state_wait_timeout(fw_st, timeout)                  \
        __fw_state_wait_common(fw_st, timeout)
 
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
        return 1; /* need to load */
 }
 
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+       struct firmware_buf *buf;
+
+       /* Loaded directly? */
+       if (!fw || !fw->priv)
+               return;
+
+       buf = fw->priv;
+       if (!fw_state_is_aborted(&buf->fw_st))
+               fw_state_aborted(&buf->fw_st);
+}
+
 /* called from request_firmware() and request_firmware_work_func() */
 static int
 _request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 
  out:
        if (ret < 0) {
+               fw_abort_batch_reqs(fw);
                release_firmware(fw);
                fw = NULL;
        }
index 6b16ead1da5871abcef5b2233733f281158596a8..ad9749463d4fa9a382afa7f24587bbbe3a2efcc9 100644 (file)
@@ -875,6 +875,56 @@ static void print_version(void)
                printk(KERN_INFO "%s", version);
 }
 
+struct vdc_check_port_data {
+       int     dev_no;
+       char    *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+       struct vio_dev *vdev = to_vio_dev(dev);
+       struct vdc_check_port_data *port_data;
+
+       port_data = (struct vdc_check_port_data *)arg;
+
+       if ((vdev->dev_no == port_data->dev_no) &&
+           (!(strcmp((char *)&vdev->type, port_data->type))) &&
+               dev_get_drvdata(dev)) {
+               /* This device has already been configured
+                * by vdc_port_probe()
+                */
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+       struct vdc_check_port_data port_data;
+       struct device *dev;
+
+       port_data.dev_no = vdev->dev_no;
+       port_data.type = (char *)&vdev->type;
+
+       dev = device_find_child(vdev->dev.parent, &port_data,
+                               vdc_device_probed);
+
+       if (dev)
+               return true;
+
+       return false;
+}
+
 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 {
        struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto err_out_release_mdesc;
        }
 
+       /* Check if this device is part of an mpgroup */
+       if (vdc_port_mpgroup_check(vdev)) {
+               printk(KERN_WARNING
+                       "VIO: Ignoring extra vdisk port %s",
+                       dev_name(&vdev->dev));
+               goto err_out_release_mdesc;
+       }
+
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        err = -ENOMEM;
        if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        if (err)
                goto err_out_free_tx_ring;
 
+       /* Note that the device driver_data is used to determine
+        * whether the port has been probed.
+        */
        dev_set_drvdata(&vdev->dev, port);
 
        mdesc_release(hp);
index 856d5dc02451d44b59695127994017877cd02b38..3b1b6340ba13a2977ffd0a13424ce95322f67f0e 100644 (file)
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct zram *zram = dev_to_zram(dev);
-       char compressor[CRYPTO_MAX_ALG_NAME];
+       char compressor[ARRAY_SIZE(zram->compressor)];
        size_t sz;
 
        strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                return -EBUSY;
        }
 
-       strlcpy(zram->compressor, compressor, sizeof(compressor));
+       strcpy(zram->compressor, compressor);
        up_write(&zram->init_lock);
        return len;
 }
index afa3ce7d3e729a1ad1485d129aa1d26646292f74..8ad92707e45f23b890203d5c5468d47473acf636 100644 (file)
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
        print_once = true;
 #endif
-       pr_notice("random: %s called from %pF with crng_init=%d\n",
+       pr_notice("random: %s called from %pS with crng_init=%d\n",
                  func_name, caller, crng_init);
 }
 
index 37b0698b7193e60be4107a8be107285e55c65877..42896a67aeae38325cbda2acb7ca655c1b43915c 100644 (file)
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
        return -1;
 }
 
+extern u32 pnv_get_supported_cpuidle_states(void);
 static int powernv_add_idle_states(void)
 {
        struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
        const char *names[CPUIDLE_STATE_MAX];
        u32 has_stop_states = 0;
        int i, rc;
+       u32 supported_flags = pnv_get_supported_cpuidle_states();
+
 
        /* Currently we have snooze statically defined */
 
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
        for (i = 0; i < dt_idle_states; i++) {
                unsigned int exit_latency, target_residency;
                bool stops_timebase = false;
+
+               /*
+                * Skip the platform idle state whose flag isn't in
+                * the supported_cpuidle_states flag mask.
+                */
+               if ((flags[i] & supported_flags) != flags[i])
+                       continue;
                /*
                 * If an idle state has exit latency beyond
                 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
index 8527a5899a2f7b6a3245a4a52ca4c0283b2f4666..3f819399cd95519a9956ed1d3ecba76fa2aa62b4 100644 (file)
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
        if (ret)
                return ret;
 
-       memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
-       memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
-
-       for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+       for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
                if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
                    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
                        ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
                }
        }
 
+       memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+       memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
        return 0;
 }
 
index d7e219d2669daf01c935bd18c0836cb88a34e075..66fb40d0ebdbbec521499cd58cf2e1d55c195878 100644 (file)
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
 {
        struct sync_file *sync_file = file->private_data;
 
-       if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+       if (test_bit(POLL_ENABLED, &sync_file->flags))
                dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
        dma_fence_put(sync_file->fence);
        kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &sync_file->wq, wait);
 
-       if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+       if (list_empty(&sync_file->cb.node) &&
+           !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
                if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
                                           fence_check_cb_func) < 0)
                        wake_up_all(&sync_file->wq);
index 5c26488e7a2d7a0320ddf321375b8ff4c200185f..0529e500c5341ed5e0d93bb658cd42d9288b13ad 100644 (file)
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        /* port@2 is the output port */
        ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
-       if (ret)
+       if (ret && ret != -ENODEV)
                return ret;
 
        /* Shut down GPIO is optional */
index 5bd93169dac2059a0981cc2f24b8c9032447ba9e..6463fc2c736fd4db5881a259b21848328b7f6cea 100644 (file)
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
                if (ret)
                        return ret;
 
-               if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
-                       DRM_ERROR("relocation %u outside object", i);
+               if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+                       DRM_ERROR("relocation %u outside object\n", i);
                        return -EINVAL;
                }
 
index d48fd7c918f880df0b3a27da5e8fa4f09c011b04..73217c281c9a87e51ac2a3d8ddf235b227ba2a1d 100644 (file)
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
        struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
        struct drm_gem_object *obj;
        struct drm_framebuffer *fb;
        int i;
        int ret;
 
-       for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+       for (i = 0; i < info->num_planes; i++) {
+               unsigned int height = (i == 0) ? mode_cmd->height :
+                                    DIV_ROUND_UP(mode_cmd->height, info->vsub);
+               unsigned long size = height * mode_cmd->pitches[i] +
+                                    mode_cmd->offsets[i];
+
                obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                }
 
                exynos_gem[i] = to_exynos_gem(obj);
+
+               if (size > exynos_gem[i]->size) {
+                       i++;
+                       ret = -EINVAL;
+                       goto err;
+               }
        }
 
        fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
index 700050556242480e6fbf8eb4a8d97c6307e9390d..1648887d3f55248cf055524a2f0e341062f0cd8d 100644 (file)
@@ -46,6 +46,8 @@
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
                ((a)->lrca == (b)->lrca))
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
 static int context_switch_events[] = {
        [RCS] = RCS_AS_CONTEXT_SWITCH,
        [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_execlist *execlist =
-               &vgpu->execlist[workload->ring_id];
+       int ring_id = workload->ring_id;
+       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
        struct intel_vgpu_workload *next_workload;
-       struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+       struct list_head *next = workload_q_head(vgpu, ring_id)->next;
        bool lite_restore = false;
        int ret;
 
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
        release_shadow_batch_buffer(workload);
        release_shadow_wa_ctx(&workload->wa_ctx);
 
-       if (workload->status || vgpu->resetting)
+       if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+               /* if workload->status is not successful means HW GPU
+                * has occurred GPU hang or something wrong with i915/GVT,
+                * and GVT won't inject context switch interrupt to guest.
+                * So this error is a vGPU hang actually to the guest.
+                * According to this we should emunlate a vGPU hang. If
+                * there are pending workloads which are already submitted
+                * from guest, we should clean them up like HW GPU does.
+                *
+                * if it is in middle of engine resetting, the pending
+                * workloads won't be submitted to HW GPU and will be
+                * cleaned up during the resetting process later, so doing
+                * the workload clean up here doesn't have any impact.
+                **/
+               clean_workloads(vgpu, ENGINE_MASK(ring_id));
                goto out;
+       }
 
-       if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+       if (!list_empty(workload_q_head(vgpu, ring_id))) {
                struct execlist_ctx_descriptor_format *this_desc, *next_desc;
 
                next_workload = container_of(next,
index 5dad9298b2d5dbbe7b626895806e6008047bbd6a..a26c1705430eb2134d002b68ddcb26d272684bd9 100644 (file)
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct intel_gvt_device_info *info = &gvt->device_info;
        struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
        struct intel_gvt_mmio_info *e;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
        unsigned long size, crc32_start;
-       int i;
+       int i, j;
        int ret;
 
        size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
                *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 
+       for (i = 0; i < num; i++, block++) {
+               for (j = 0; j < block->size; j += 4)
+                       *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+                               I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+                                                       block->offset) + j));
+       }
+
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
        crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
index 3a74e79eac2f6c13fef32e1611b539db7b8f46c3..2964a4d01a66da5d2fb06d256ed35fa83fa96a38 100644 (file)
@@ -149,7 +149,7 @@ struct intel_vgpu {
        bool active;
        bool pv_notified;
        bool failsafe;
-       bool resetting;
+       unsigned int resetting_eng;
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
 
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
        unsigned long vgpu_allocated_fence_num;
 };
 
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+       unsigned int device;
+       i915_reg_t   offset;
+       unsigned int size;
+       gvt_mmio_func read;
+       gvt_mmio_func write;
+};
+
 #define INTEL_GVT_MMIO_HASH_BITS 11
 
 struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
 
+       struct gvt_mmio_block *mmio_block;
+       unsigned int num_mmio_block;
+
        DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
        unsigned int num_tracked_mmio;
 };
index 17febe830ff6984e06bb81cb91601a76b67d5f2a..feed9921b3b3eb05e6e8dce5e1b510f4d6fc9479 100644 (file)
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        return 0;
 }
 
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
-       unsigned int device;
-       i915_reg_t   offset;
-       unsigned int size;
-       gvt_mmio_func read;
-       gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
-       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
-       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
-       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
-               pvinfo_mmio_read, pvinfo_mmio_write},
-       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
                                              unsigned int offset)
 {
        unsigned long device = intel_gvt_get_device_type(gvt);
-       struct gvt_mmio_block *block = gvt_mmio_blocks;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+       for (i = 0; i < num; i++, block++) {
                if (!(device & block->device))
                        continue;
                if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
        gvt->mmio.mmio_attribute = NULL;
 }
 
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+               pvinfo_mmio_read, pvinfo_mmio_write},
+       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
 /**
  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                        goto err;
        }
 
+       gvt->mmio.mmio_block = mmio_blocks;
+       gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
        gvt_dbg_mmio("traced %u virtual mmio registers\n",
                     gvt->mmio.num_tracked_mmio);
        return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
        gvt_mmio_func func;
        int ret;
 
-       if (WARN_ON(bytes > 4))
+       if (WARN_ON(bytes > 8))
                return -EINVAL;
 
        /*
index 4f7057d62d88b393ce77670f9100bc2d3b246014..22e08eb2d0b7c66faf01741656fb33d1535925f3 100644 (file)
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                i915_gem_request_put(fetch_and_zero(&workload->req));
 
-               if (!workload->status && !vgpu->resetting) {
+               if (!workload->status && !(vgpu->resetting_eng &
+                                          ENGINE_MASK(ring_id))) {
                        update_guest_context(workload);
 
                        for_each_set_bit(event, workload->pending_events,
index 90c14e6e3ea06b8de36d90284132659eb80f72c6..3deadcbd5a245c039169f1a10c6c91cc791d3a66 100644 (file)
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+       unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
 
        gvt_dbg_core("------------------------------------------\n");
        gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
                     vgpu->id, dmlr, engine_mask);
-       vgpu->resetting = true;
+
+       vgpu->resetting_eng = resetting_eng;
 
        intel_vgpu_stop_schedule(vgpu);
        /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                mutex_lock(&gvt->lock);
        }
 
-       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+       intel_vgpu_reset_execlist(vgpu, resetting_eng);
 
        /* full GPU reset or device model level reset */
        if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                }
        }
 
-       vgpu->resetting = false;
+       vgpu->resetting_eng = 0;
        gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
        gvt_dbg_core("------------------------------------------\n");
 }
index 1032f98add112a66a19fb186a2b28de773caadf8..77fb3980813143d2d9e3432c0ebb994a4bcad032 100644 (file)
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
                return true;
 
        case MUTEX_TRYLOCK_FAILED:
+               *unlock = false;
+               preempt_disable();
                do {
                        cpu_relax();
                        if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-       case MUTEX_TRYLOCK_SUCCESS:
                                *unlock = true;
-                               return true;
+                               break;
                        }
                } while (!need_resched());
+               preempt_enable();
+               return *unlock;
 
-               return false;
+       case MUTEX_TRYLOCK_SUCCESS:
+               *unlock = true;
+               return true;
        }
 
        BUG();
index 9cd22f83b0cfaee680ed06c5bde67db6fc89d0fa..f33d90226704108e71ee5662e01977e32b627fcb 100644 (file)
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
        u32 *cs;
        int i;
 
-       cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+       cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+       *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
 
        *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
        *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
index 306c6b06b330bfc57f75a992c60468cb9d88e81c..17c4ae7e4e7c51e85de97cb8c803b280115de8cf 100644 (file)
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                }
 
                /* Program the max register to clamp values > 1.0. */
+               i = lut_size - 1;
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
                           drm_color_lut_extract(lut[i].red, 16));
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
index 96c2cbd81869e7e55dedc8ce09f53938466bee70..593349be8b9dfce328d20ea3ca5d1b22e41da320 100644 (file)
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 
        if (i915.invert_brightness > 0 ||
            dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-               return panel->backlight.max - val;
+               return panel->backlight.max - val + panel->backlight.min;
        }
 
        return val;
index b638d192ce5e046cc29e87459cc36e988e498ea8..99d39b2aefa675941d42c86b3c9b5a4d2cda937b 100644 (file)
@@ -5,7 +5,7 @@ config DRM_MSM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
-       select QCOM_MDT_LOADER
+       select QCOM_MDT_LOADER if ARCH_QCOM
        select REGULATOR
        select DRM_KMS_HELPER
        select DRM_PANEL
index b4b54f1c24bc1995a032493838d99e8e31dff9e9..f9eae03aa1dcaef072974d60216fb6b09ef81e66 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/qcom_scm.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
 #include <linux/soc/qcom/mdt_loader.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
 
 #define GPU_PAS_ID 13
 
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 {
        const struct firmware *fw;
+       struct device_node *np;
+       struct resource r;
        phys_addr_t mem_phys;
        ssize_t mem_size;
        void *mem_region = NULL;
        int ret;
 
+       if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+               return -EINVAL;
+
+       np = of_get_child_by_name(dev->of_node, "zap-shader");
+       if (!np)
+               return -ENODEV;
+
+       np = of_parse_phandle(np, "memory-region", 0);
+       if (!np)
+               return -EINVAL;
+
+       ret = of_address_to_resource(np, 0, &r);
+       if (ret)
+               return ret;
+
+       mem_phys = r.start;
+       mem_size = resource_size(&r);
+
        /* Request the MDT file for the firmware */
        ret = request_firmware(&fw, fwname, dev);
        if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
        }
 
        /* Allocate memory for the firmware image */
-       mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+       mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
        if (!mem_region) {
                ret = -ENOMEM;
                goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
                DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 
 out:
+       if (mem_region)
+               memunmap(mem_region);
+
        release_firmware(fw);
 
        return ret;
 }
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
-       return -ENODEV;
-}
-#endif
 
 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        gpu->funcs->flush(gpu);
 }
 
-struct a5xx_hwcg {
+static const struct {
        u32 offset;
        u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
 };
 
-static const struct {
-       int (*test)(struct adreno_gpu *gpu);
-       const struct a5xx_hwcg *regs;
-       unsigned int count;
-} a5xx_hwcg_regs[] = {
-       { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
-               const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
 {
        unsigned int i;
 
-       for (i = 0; i < count; i++)
-               gpu_write(gpu, regs[i].offset, regs[i].value);
+       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+               gpu_write(gpu, a5xx_hwcg[i].offset,
+                       state ? a5xx_hwcg[i].value : 0);
 
-       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
-       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
-               if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
-                       _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
-                               a5xx_hwcg_regs[i].count);
-                       return;
-               }
-       }
+       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
 }
 
 static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
        return ret;
 }
 
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
-       struct device_node *node;
-       int ret;
-
-       if (dev->parent)
-               return 0;
-
-       /* Find the sub-node for the zap shader */
-       node = of_get_child_by_name(parent->of_node, "zap-shader");
-       if (!node) {
-               DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
-               return -ENODEV;
-       }
-
-       dev->parent = parent;
-       dev->of_node = node;
-       dev_set_name(dev, "adreno_zap_shader");
-
-       ret = device_register(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
-               goto out;
-       }
-
-       ret = of_reserved_mem_device_init(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
-               device_unregister(dev);
-       }
-
-out:
-       if (ret)
-               dev->parent = NULL;
-
-       return ret;
-}
-
 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 {
        static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
                return -ENODEV;
        }
 
-       ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
-       if (!ret)
-               ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
-                       adreno_gpu->info->zapfw);
+       ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
 
        loaded = !ret;
 
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
 
        /* Enable HWCG */
-       a5xx_enable_hwcg(gpu);
+       a5xx_set_hwcg(gpu, true);
 
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
 
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 
        DBG("%s", gpu->name);
 
-       if (a5xx_gpu->zap_dev.parent)
-               device_unregister(&a5xx_gpu->zap_dev);
-
        if (a5xx_gpu->pm4_bo) {
                if (a5xx_gpu->pm4_iova)
                        msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
        0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
-       0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
-       0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
-       0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
-       0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
-       0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
-       0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
-       0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
-       0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
-       0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
-       0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
-       0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
-       0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
-       0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
-       0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
-       0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
-       0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
-       0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
-       0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
-       0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
-       0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
-       0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
-       0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
-       0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
-       0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
-       ~0
+       0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+       0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+       0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+       0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+       0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+       0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+       0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+       0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+       0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+       0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+       0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+       0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+       0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+       0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+       0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+       0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+       0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+       0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+       0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+       0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+       0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+       0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+       0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+       0xB9A0, 0xB9BF, ~0
 };
 
 static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
 {
        seq_printf(m, "status:   %08x\n",
                        gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+       /*
+        * Temporarily disable hardware clock gating before going into
+        * adreno_show to avoid issues while reading the registers
+        */
+       a5xx_set_hwcg(gpu, false);
        adreno_show(gpu, m);
+       a5xx_set_hwcg(gpu, true);
 }
 #endif
 
index 6638bc85645dbad4adf3689bd7d9bae9441173c2..1137092241d593c34e4607e3c723acfb74861972 100644 (file)
@@ -36,8 +36,6 @@ struct a5xx_gpu {
        uint32_t gpmu_dwords;
 
        uint32_t lm_leakage;
-
-       struct device zap_dev;
 };
 
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
 }
 
 bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 
 #endif /* __A5XX_GPU_H__ */
index f1ab2703674a2f5d4f533828bb6c8b49df24f571..7414c6bbd582e9597e502305885f0dec909859be 100644 (file)
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                *value = adreno_gpu->base.fast_rate;
                return 0;
        case MSM_PARAM_TIMESTAMP:
-               if (adreno_gpu->funcs->get_timestamp)
-                       return adreno_gpu->funcs->get_timestamp(gpu, value);
+               if (adreno_gpu->funcs->get_timestamp) {
+                       int ret;
+
+                       pm_runtime_get_sync(&gpu->pdev->dev);
+                       ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+                       pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+                       return ret;
+               }
                return -EINVAL;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
index 9e9c5696bc03547b813ecf2ae56c535265e64bcd..c7b612c3d7717a02d8d64be21dea68f183163917 100644 (file)
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
        struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       int ret;
+
+       ret = dsi_calc_clk_rate(msm_host);
+       if (ret) {
+               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+               return;
+       }
 
        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
        clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                                        struct drm_display_mode *mode)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-       int ret;
 
        if (msm_host->mode) {
                drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                return -ENOMEM;
        }
 
-       ret = dsi_calc_clk_rate(msm_host);
-       if (ret) {
-               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
-               return ret;
-       }
-
        return 0;
 }
 
index cb5415d6c04b7ab6e1e80503d26b32891a934dee..735a87a699fafafb99b179752e0e7f3c19491389 100644 (file)
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
        struct mdp5_ctl *ctl = mdp5_cstate->ctl;
        uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
        unsigned long flags;
-       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
-       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
        int i, plane_cnt = 0;
        bool bg_alpha_enabled = false;
        u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!handle) {
                DBG("Cursor off");
                cursor_enable = false;
+               mdp5_enable(mdp5_kms);
                goto set_cursor;
        }
 
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
        crtc_flush(crtc, flush_mask);
 
 end:
+       mdp5_disable(mdp5_kms);
        if (old_bo) {
                drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
                /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
                        MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        crtc_flush(crtc, flush_mask);
 
+       mdp5_disable(mdp5_kms);
+
        return 0;
 }
 
index 97f3294fbfc6f9d26f453dac36d5dbb3cb00e93a..70bef51245af89d5bbb292f4495f2fd79dcc9e33 100644 (file)
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
        struct mdp5_interface *intf = mdp5_encoder->intf;
 
        if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
-               mdp5_cmd_encoder_disable(encoder);
+               mdp5_cmd_encoder_enable(encoder);
        else
                mdp5_vid_encoder_enable(encoder);
 }
index 5d13fa5381ee37705a0c282bf023b4782fc19268..1c603aef3c59cdff286ce38e84c3e6a4745dd0c2 100644 (file)
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name, bool mandatory)
 {
        struct device *dev = &pdev->dev;
-       struct clk *clk = devm_clk_get(dev, name);
+       struct clk *clk = msm_clk_get(pdev, name);
        if (IS_ERR(clk) && mandatory) {
                dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
                return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
        }
 
        /* mandatory clocks: */
-       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
        if (ret)
                goto fail;
 
        /* optional clocks: */
-       get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+       get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
 
        /* we need to set a default rate before enabling.  Set a safe
         * rate first, then figure out hw revision, and then set a
index fe3a4de1a4331ff86f0b4f0cc85a48b208bca3b5..61f39c86dd09e53a5860ce880b0f7955e5008c05 100644 (file)
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
        struct mdp5_hw_pipe *right_hwpipe;
        const struct mdp_format *format;
        uint32_t nplanes, config = 0;
-       struct phase_step step = { 0 };
-       struct pixel_ext pe = { 0 };
+       struct phase_step step = { { 0 } };
+       struct pixel_ext pe = { { 0 } };
        uint32_t hdecm = 0, vdecm = 0;
        uint32_t pix_format;
        unsigned int rotation;
index 65f35544c1ec8859018c2afb713fa5120fc43272..a0c60e738db8d7be5e841311832e82f0b45bd7fa 100644 (file)
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
                struct page **pages;
 
                vma = add_vma(obj, aspace);
-               if (IS_ERR(vma))
-                       return PTR_ERR(vma);
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
+                       goto unlock;
+               }
 
                pages = get_pages(obj);
                if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 
 fail:
        del_vma(vma);
-
+unlock:
        mutex_unlock(&msm_obj->lock);
        return ret;
 }
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        if (use_vram) {
                struct msm_gem_vma *vma;
                struct page **pages;
+               struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+               mutex_lock(&msm_obj->lock);
 
                vma = add_vma(obj, NULL);
+               mutex_unlock(&msm_obj->lock);
                if (IS_ERR(vma)) {
                        ret = PTR_ERR(vma);
                        goto fail;
index 6bfca74701410050b20d1a136ae5cdc4454b1306..8a75c0bd8a78b1481e30fdab63f2d14bfc64536d 100644 (file)
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
                struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
 {
        struct msm_gem_submit *submit;
-       uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
-               (nr_cmds * sizeof(submit->cmd[0]));
+       uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+               ((u64)nr_cmds * sizeof(submit->cmd[0]));
 
        if (sz > SIZE_MAX)
                return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+       if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
                ret = submit_fence_sync(submit);
                if (ret)
                        goto out;
index c36321bc87148864db09bd0af4fc38a39cb182f9..d34e331554f3903eaded86cf12fdd4a4ef24507a 100644 (file)
@@ -42,7 +42,7 @@ void
 msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma, struct sg_table *sgt)
 {
-       if (!vma->iova)
+       if (!aspace || !vma->iova)
                return;
 
        if (aspace->mmu) {
index c7c84d34d97e20308b926077e7ed4ce6e8d77281..88582af8bd89745b7c78332cf415dd0bc9f24e23 100644 (file)
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        /* Create output path objects for each VBIOS display path. */
        i = -1;
        while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+               if (ver < 0x40) /* No support for chipsets prior to NV50. */
+                       break;
                if (dcbE.type == DCB_OUTPUT_UNUSED)
                        continue;
                if (dcbE.type == DCB_OUTPUT_EOL)
index 5d450332c2fd79fd8c1052aca84d7d15c5db3ef8..2900f1410d959bc9f4002a6f0c38a3aae5a9c59a 100644 (file)
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int ret;
+       int ret, i;
 
        ret = pm_runtime_get_sync(vop->dev);
        if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
        }
 
        memcpy(vop->regs, vop->regsbak, vop->len);
+       /*
+        * We need to make sure that all windows are disabled before we
+        * enable the crtc. Otherwise we might try to scan from a destroyed
+        * buffer later.
+        */
+       for (i = 0; i < vop->data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win = vop_win->data;
+
+               spin_lock(&vop->reg_lock);
+               VOP_WIN_SET(vop, win, enable, 0);
+               spin_unlock(&vop->reg_lock);
+       }
+
        vop_cfg_done(vop);
 
        /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
 static void vop_crtc_disable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int i;
 
        WARN_ON(vop->event);
 
        rockchip_drm_psr_deactivate(&vop->crtc);
 
-       /*
-        * We need to make sure that all windows are disabled before we
-        * disable that crtc. Otherwise we might try to scan from a destroyed
-        * buffer later.
-        */
-       for (i = 0; i < vop->data->win_size; i++) {
-               struct vop_win *vop_win = &vop->win[i];
-               const struct vop_win_data *win = vop_win->data;
-
-               spin_lock(&vop->reg_lock);
-               VOP_WIN_SET(vop, win, enable, 0);
-               spin_unlock(&vop->reg_lock);
-       }
-
-       vop_cfg_done(vop);
-
        drm_crtc_vblank_off(crtc);
 
        /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
         * Src.x1 can be odd when do clip, but yuv plane start point
         * need align with 2 pixel.
         */
-       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+               DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, format, format);
-       VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+       VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
        if (is_yuv_support(fb->format->format)) {
                int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
 
                dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
-               VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+               VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
                VOP_WIN_SET(vop, win, uv_mst, dma_addr);
        }
 
index 9979fd0c22821d7efa3d7054468e0914619e0692..27eefbfcf3d05f3ad03e3b9f6fe2ae56f68e7684 100644 (file)
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
 
        act_height = (src_h + vskiplines - 1) / vskiplines;
 
+       if (act_height == dst_h)
+               return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
        return GET_SCL_FT_BILI_DN(act_height, dst_h);
 }
 
index 2c4817fb08902427df09223b7ab4046241fc6c77..8fe5b184b4e8a945d68c201a3a29be531594e2d2 100644 (file)
@@ -7,7 +7,6 @@ config DRM_STM
        select DRM_PANEL
        select VIDEOMODE_HELPERS
        select FB_PROVIDE_GET_FB_UNMAPPED_AREA
-       default y
 
        help
          Enable support for the on-chip display controller on
index 1006b230b236f1c977d1c1a7d9bf32a268b7e263..65fa29591d21641fd1bd4e4484d8daeef56f9bdb 100644 (file)
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
 
 config I2C_VERSATILE
        tristate "ARM Versatile/Realview I2C bus support"
-       depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+       depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
        select I2C_ALGOBIT
        help
          Say yes if you want to support the I2C serial bus on ARMs Versatile
index 2ea6d0d25a01a33069bce293ab6858947a0cb01f..143a8fd582b4aeb905ea25b416261a5c1f44a6e9 100644 (file)
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        }
 
        acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+       /* Some broken DSTDs use 1MiHz instead of 1MHz */
+       if (acpi_speed == 1048576)
+               acpi_speed = 1000000;
        /*
         * Find bus speed from the "clock-frequency" device property, ACPI
         * or by using fast mode if neither is set.
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        if (dev->clk_freq != 100000 && dev->clk_freq != 400000
            && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
                dev_err(&pdev->dev,
-                       "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+                       "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+                       dev->clk_freq);
                ret = -EINVAL;
                goto exit_reset;
        }
index 4842ec3a5451ed479446fc13352405aca45697d2..a9126b3cda61bc95f6a9d1282821ab7552484534 100644 (file)
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
                dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
 }
 
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       if (!(client && matches))
+               return NULL;
+
+       return acpi_match_device(matches, &client->dev);
+}
+
 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
                                           void *data, void **return_value)
 {
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
 
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
 {
        struct i2c_adapter *adapter = i2c_verify_adapter(dev);
 
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
        return ACPI_HANDLE(dev) == (acpi_handle)data;
 }
 
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
 {
        return ACPI_COMPANION(dev) == data;
 }
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
        struct device *dev;
 
        dev = bus_find_device(&i2c_bus_type, NULL, handle,
-                             i2c_acpi_match_adapter);
+                             i2c_acpi_find_match_adapter);
        return dev ? i2c_verify_adapter(dev) : NULL;
 }
 
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+       dev = bus_find_device(&i2c_bus_type, NULL, adev,
+                             i2c_acpi_find_match_device);
        return dev ? i2c_verify_client(dev) : NULL;
 }
 
index c89dac7fd2e7b793217119f2ccee849cf75ebcfe..12822a4b8f8f09b5c080f7338a89e0ea00cbb4f2 100644 (file)
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
         * Tree match table entry is supplied for the probing device.
         */
        if (!driver->id_table &&
+           !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
            !i2c_of_match_device(dev->driver->of_match_table, client))
                return -ENODEV;
 
index 3b63f5e5b89cbda662a580c387bfa2d23e2ebcef..3d3d9bf02101bddf06fc6597f107cc3ac8e3beb8 100644 (file)
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
 int i2c_check_7bit_addr_validity_strict(unsigned short addr);
 
 #ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client);
 void i2c_acpi_register_devices(struct i2c_adapter *adap);
 #else /* CONFIG_ACPI */
 static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       return NULL;
+}
 #endif /* CONFIG_ACPI */
 extern struct notifier_block i2c_acpi_notifier;
 
index 2c64d0e0740f0db0c4427af9d6602bc8aaa0ea24..17121329bb793a615e8969a15327e3f07035cdbb 100644 (file)
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
          different sets of pins at run-time.
 
          This driver can also be built as a module. If so, the module will be
-         called pinctrl-i2cmux.
+         called i2c-mux-pinctrl.
 
 config I2C_MUX_REG
        tristate "Register-based I2C multiplexer"
index 6b5d3be283c4e7e00f72bb32e550b52ab06e1526..807299dd45ebf0663fbc97b3831bd8f92148ec20 100644 (file)
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
        struct regmap *regmap;
        int irq;
        struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
-       atomic_t active_intr;
        struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
        struct mutex mutex;
        u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
                goto out_fix_power_state;
        }
 
-       if (state)
-               atomic_inc(&data->active_intr);
-       else
-               atomic_dec(&data->active_intr);
-
        return 0;
 
 out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
        struct bmc150_accel_data *data = iio_priv(indio_dev);
 
        mutex_lock(&data->mutex);
-       if (atomic_read(&data->active_intr))
-               bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+       bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
        bmc150_accel_fifo_set_mode(data);
        mutex_unlock(&data->mutex);
 
index 07d1489cd457a6b5445b8b3ba35dad95b1792acc..e44f62bf9caa9f1a45c6699875b885d497bb0a7a 100644 (file)
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_ihl = 0x02,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                                .en_mask = 0x08,
                        },
                },
+               .sim = {
+                       .addr = 0x24,
+                       .value = BIT(0),
+               },
                .multi_read_bit = false,
                .bootime = 2,
        },
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_int1 = 0x04,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(1),
+               },
                .multi_read_bit = true,
                .bootime = 2, /* guess */
        },
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(7),
+               },
                .multi_read_bit = false,
                .bootime = 2, /* guess */
        },
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .addr_ihl = 0x22,
                        .mask_ihl = 0x80,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_int1 = 0x04,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(1),
+               },
                .multi_read_bit = false,
                .bootime = 2,
        },
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_ihl = 0x02,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
index e0ea411a0b2df9563085c70552086946843ba2ca..c02b23d675cbc1540ec47769515da714c784cf67 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/iio/iio.h>
 #include <linux/iio/driver.h>
+#include <linux/iopoll.h>
 
 #define ASPEED_RESOLUTION_BITS         10
 #define ASPEED_CLOCKS_PER_SAMPLE       12
 
 #define ASPEED_ENGINE_ENABLE           BIT(0)
 
+#define ASPEED_ADC_CTRL_INIT_RDY       BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME   500
+#define ASPEED_ADC_INIT_TIMEOUT                500000
+
 struct aspeed_adc_model_data {
        const char *model_name;
        unsigned int min_sampling_rate; // Hz
        unsigned int max_sampling_rate; // Hz
        unsigned int vref_voltage;      // mV
+       bool wait_init_sequence;
 };
 
 struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
                goto scaler_error;
        }
 
+       model_data = of_device_get_match_data(&pdev->dev);
+
+       if (model_data->wait_init_sequence) {
+               /* Enable engine in normal mode. */
+               writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+                      data->base + ASPEED_REG_ENGINE_CONTROL);
+
+               /* Wait for initial sequence complete. */
+               ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+                                        adc_engine_control_reg_val,
+                                        adc_engine_control_reg_val &
+                                        ASPEED_ADC_CTRL_INIT_RDY,
+                                        ASPEED_ADC_INIT_POLLING_TIME,
+                                        ASPEED_ADC_INIT_TIMEOUT);
+               if (ret)
+                       goto scaler_error;
+       }
+
        /* Start all channels in normal mode. */
        ret = clk_prepare_enable(data->clk_scaler->clk);
        if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
        .vref_voltage = 1800, // mV
        .min_sampling_rate = 1,
        .max_sampling_rate = 1000000,
+       .wait_init_sequence = true,
 };
 
 static const struct of_device_id aspeed_adc_matches[] = {
index 64799ad7ebad02a797607470678aa500842ed99f..462a99c13e7a210a74f1d9b2a9c87de7ebc3fad6 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/iio/driver.h>
 
 #define AXP288_ADC_EN_MASK             0xF1
+#define AXP288_ADC_TS_PIN_GPADC                0xF2
+#define AXP288_ADC_TS_PIN_ON           0xF3
 
 enum axp288_adc_id {
        AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
        return IIO_VAL_INT;
 }
 
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+                               unsigned long address)
+{
+       int ret;
+
+       /* channels other than GPADC do not need to switch TS pin */
+       if (address != AXP288_GP_ADC_H)
+               return 0;
+
+       ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+       if (ret)
+               return ret;
+
+       /* When switching to the GPADC pin give things some time to settle */
+       if (mode == AXP288_ADC_TS_PIN_GPADC)
+               usleep_range(6000, 10000);
+
+       return 0;
+}
+
 static int axp288_adc_read_raw(struct iio_dev *indio_dev,
                        struct iio_chan_spec const *chan,
                        int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        mutex_lock(&indio_dev->mlock);
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+                                       chan->address)) {
+                       dev_err(&indio_dev->dev, "GPADC mode\n");
+                       ret = -EINVAL;
+                       break;
+               }
                ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+                                               chan->address))
+                       dev_err(&indio_dev->dev, "TS pin restore\n");
                break;
        default:
                ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+       /* ADC should be always enabled for internal FG to function */
+       if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+               return -EIO;
+
+       return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
 static const struct iio_info axp288_adc_iio_info = {
        .read_raw = &axp288_adc_read_raw,
        .driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
         * Set ADC to enabled state at all time, including system suspend.
         * otherwise internal fuel gauge functionality may be affected.
         */
-       ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+       ret = axp288_adc_set_state(axp20x->regmap);
        if (ret) {
                dev_err(&pdev->dev, "unable to enable ADC device\n");
                return ret;
index 81d4c39e414a4da6b0f8df0ebd8922d371a909d7..137f577d94326a299e0f944edfd108bdc2db31fe 100644 (file)
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
 
 err:
        pm_runtime_put_autosuspend(indio_dev->dev.parent);
+       disable_irq(irq);
        mutex_unlock(&info->mutex);
 
        return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
                complete(&info->completion);
 
 out:
-       disable_irq_nosync(info->temp_data_irq);
        return IRQ_HANDLED;
 }
 
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
                complete(&info->completion);
 
 out:
-       disable_irq_nosync(info->fifo_data_irq);
        return IRQ_HANDLED;
 }
 
index 01fc76f7d6602c7b14c5f0cd4ea0c01090278087..c168e0db329ab49b6b59d720cc62cb2b37352088 100644 (file)
@@ -77,7 +77,7 @@
 #define VF610_ADC_ADSTS_MASK           0x300
 #define VF610_ADC_ADLPC_EN             0x80
 #define VF610_ADC_ADHSC_EN             0x400
-#define VF610_ADC_REFSEL_VALT          0x100
+#define VF610_ADC_REFSEL_VALT          0x800
 #define VF610_ADC_REFSEL_VBG           0x1000
 #define VF610_ADC_ADTRG_HARD           0x2000
 #define VF610_ADC_AVGS_8               0x4000
index 79c8c7cd70d5c6d74fc2e32cad372f8644233c6b..6e6a1ecc99ddf4b69252b6b4232c9cda6f00d4be 100644 (file)
@@ -550,6 +550,31 @@ out:
 }
 EXPORT_SYMBOL(st_sensors_read_info_raw);
 
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+                       const struct st_sensor_settings *sensor_settings)
+{
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+       struct device_node *np = sdata->dev->of_node;
+       struct st_sensors_platform_data *pdata;
+
+       pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+       if (((np && of_property_read_bool(np, "spi-3wire")) ||
+            (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+               int err;
+
+               err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+                                           sensor_settings->sim.addr,
+                                           sensor_settings->sim.value);
+               if (err < 0) {
+                       dev_err(&indio_dev->dev,
+                               "failed to init interface mode\n");
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
 int st_sensors_check_device_support(struct iio_dev *indio_dev,
                        int num_sensors_list,
                        const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
                return -ENODEV;
        }
 
+       err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+       if (err < 0)
+               return err;
+
        if (sensor_settings[i].wai_addr) {
                err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
                                           sensor_settings[i].wai_addr, &wai);
index e7d4ea75e007c0bd82ef087812524258ec69f490..7599693f7fe9597cb750319ccb2635362c1dc9f0 100644 (file)
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
        struct tsl2563_chip *chip = iio_priv(dev_info);
 
        iio_push_event(dev_info,
-                      IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+                      IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
                                            0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_EITHER),
index aa61ec15c1396ca3925ecf1a099fbf91a302dae7..f1bce05ffa135703792f24317db81375d3e4ebb3 100644 (file)
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
-               .multi_read_bit = true,
+               .multi_read_bit = false,
                .bootime = 2,
        },
 };
index 01236cef7bfb1affe07e4214cf6d8baf6ca2a2a1..437522ca97b4b62fd79b8e84fa643ff9c4751ccd 100644 (file)
@@ -61,6 +61,7 @@ struct addr_req {
        void (*callback)(int status, struct sockaddr *src_addr,
                         struct rdma_dev_addr *addr, void *context);
        unsigned long timeout;
+       struct delayed_work work;
        int status;
        u32 seq;
 };
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
 }
 EXPORT_SYMBOL(rdma_translate_ip);
 
-static void set_timeout(unsigned long time)
+static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
 {
        unsigned long delay;
 
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
        if ((long)delay < 0)
                delay = 0;
 
-       mod_delayed_work(addr_wq, &work, delay);
+       mod_delayed_work(addr_wq, delayed_work, delay);
 }
 
 static void queue_req(struct addr_req *req)
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
 
        list_add(&req->list, &temp_req->list);
 
-       if (req_list.next == &req->list)
-               set_timeout(req->timeout);
+       set_timeout(&req->work, req->timeout);
        mutex_unlock(&lock);
 }
 
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
        return ret;
 }
 
+static void process_one_req(struct work_struct *_work)
+{
+       struct addr_req *req;
+       struct sockaddr *src_in, *dst_in;
+
+       mutex_lock(&lock);
+       req = container_of(_work, struct addr_req, work.work);
+
+       if (req->status == -ENODATA) {
+               src_in = (struct sockaddr *)&req->src_addr;
+               dst_in = (struct sockaddr *)&req->dst_addr;
+               req->status = addr_resolve(src_in, dst_in, req->addr,
+                                          true, req->seq);
+               if (req->status && time_after_eq(jiffies, req->timeout)) {
+                       req->status = -ETIMEDOUT;
+               } else if (req->status == -ENODATA) {
+                       /* requeue the work for retrying again */
+                       set_timeout(&req->work, req->timeout);
+                       mutex_unlock(&lock);
+                       return;
+               }
+       }
+       list_del(&req->list);
+       mutex_unlock(&lock);
+
+       req->callback(req->status, (struct sockaddr *)&req->src_addr,
+               req->addr, req->context);
+       put_client(req->client);
+       kfree(req);
+}
+
 static void process_req(struct work_struct *work)
 {
        struct addr_req *req, *temp_req;
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
                                                   true, req->seq);
                        if (req->status && time_after_eq(jiffies, req->timeout))
                                req->status = -ETIMEDOUT;
-                       else if (req->status == -ENODATA)
+                       else if (req->status == -ENODATA) {
+                               set_timeout(&req->work, req->timeout);
                                continue;
+                       }
                }
                list_move_tail(&req->list, &done_list);
        }
 
-       if (!list_empty(&req_list)) {
-               req = list_entry(req_list.next, struct addr_req, list);
-               set_timeout(req->timeout);
-       }
        mutex_unlock(&lock);
 
        list_for_each_entry_safe(req, temp_req, &done_list, list) {
                list_del(&req->list);
+               /* It is safe to cancel other work items from this work item
+                * because at a time there can be only one work item running
+                * with this single threaded work queue.
+                */
+               cancel_delayed_work(&req->work);
                req->callback(req->status, (struct sockaddr *) &req->src_addr,
                        req->addr, req->context);
                put_client(req->client);
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
        req->context = context;
        req->client = client;
        atomic_inc(&client->refcount);
+       INIT_DELAYED_WORK(&req->work, process_one_req);
        req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
 
        req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
                        req->status = -ECANCELED;
                        req->timeout = jiffies;
                        list_move(&req->list, &req_list);
-                       set_timeout(req->timeout);
+                       set_timeout(&req->work, req->timeout);
                        break;
                }
        }
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
        if (event == NETEVENT_NEIGH_UPDATE) {
                struct neighbour *neigh = ctx;
 
-               if (neigh->nud_state & NUD_VALID) {
-                       set_timeout(jiffies);
-               }
+               if (neigh->nud_state & NUD_VALID)
+                       set_timeout(&work, jiffies);
        }
        return 0;
 }
@@ -820,7 +854,7 @@ static struct notifier_block nb = {
 
 int addr_init(void)
 {
-       addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
+       addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
        if (!addr_wq)
                return -ENOMEM;
 
index 2c98533a0203b084fb198a3eb8088a0bac59522c..c551d2b275fdf339310a087bef9c6e821d7c7e09 100644 (file)
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
                            int out_len)
 {
        struct ib_uverbs_resize_cq      cmd;
-       struct ib_uverbs_resize_cq_resp resp;
+       struct ib_uverbs_resize_cq_resp resp = {};
        struct ib_udata                 udata;
        struct ib_cq                    *cq;
        int                             ret = -EINVAL;
index 3d2609608f589625d0077167fa2e66a00430b89f..c023e2c81b8f2b06443452f91edcc506b46b6d17 100644 (file)
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
        if (atomic_dec_and_test(&file->device->refcount))
                ib_uverbs_comp_dev(file->device);
 
+       kobject_put(&file->device->kobj);
        kfree(file);
 }
 
@@ -917,7 +918,6 @@ err:
 static int ib_uverbs_close(struct inode *inode, struct file *filp)
 {
        struct ib_uverbs_file *file = filp->private_data;
-       struct ib_uverbs_device *dev = file->device;
 
        mutex_lock(&file->cleanup_mutex);
        if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
                         ib_uverbs_release_async_event_file);
 
        kref_put(&file->ref, ib_uverbs_release_file);
-       kobject_put(&dev->kobj);
 
        return 0;
 }
index fb98ed67d5bc684b8cc0b941d7140986b95aa99b..7f8fe443df46f5b562ac3b2561e19226e3ab6b68 100644 (file)
@@ -895,7 +895,6 @@ static const struct {
 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
        [IB_QPS_RESET] = {
                [IB_QPS_RESET] = { .valid = 1 },
-               [IB_QPS_ERR] =   { .valid = 1 },
                [IB_QPS_INIT]  = {
                        .valid = 1,
                        .req_param = {
index 23fad6d969440bd2bd50a0c8b0dbafe8a92f4ae9..2540b65e242cebcf5b7c9fd60f936bc35bbf019b 100644 (file)
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
                        continue;
 
                free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
-               if (IS_ERR(free_mr->mr_free_qp[i])) {
+               if (!free_mr->mr_free_qp[i]) {
                        dev_err(dev, "Create loop qp failed!\n");
                        goto create_lp_qp_failed;
                }
index ae0746754008798fc0c4ab7e940f736c376a72f1..3d701c7a4c9140e488b7427d9d901a4ea77d2786 100644 (file)
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
 
        if (qp->ibqp.qp_type != IB_QPT_RC) {
                av = *wqe;
-               if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+               if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
                        *wqe += sizeof(struct mlx5_av);
                else
                        *wqe += sizeof(struct mlx5_base_av);
index ff50a7bd66d864506ec65aef1b63f45ce5d36e36..7ac25059c40f94aad951b28351cf425ebe573197 100644 (file)
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
        unsigned long flags;
 
        struct rw_semaphore vlan_rwsem;
+       struct mutex mcast_mutex;
 
        struct rb_root  path_tree;
        struct list_head path_list;
index f87d104837dcfab7f0e35b5b7fcae1e021599bfc..d69410c2ed97bdeceb17aedb2a7fe6049c59c310 100644 (file)
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
        case IB_CM_REQ_RECEIVED:
                return ipoib_cm_req_handler(cm_id, event);
        case IB_CM_DREQ_RECEIVED:
-               p = cm_id->context;
                ib_send_cm_drep(cm_id, NULL, 0);
                /* Fall through */
        case IB_CM_REJ_RECEIVED:
index 7871379342f48fa77b2e6e8279ca774b4c49ad2f..184a22f4802773efc67131093f4ab4fcc89cd276 100644 (file)
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
        IPOIB_NETDEV_STAT(tx_bytes),
        IPOIB_NETDEV_STAT(tx_errors),
        IPOIB_NETDEV_STAT(rx_dropped),
-       IPOIB_NETDEV_STAT(tx_dropped)
+       IPOIB_NETDEV_STAT(tx_dropped),
+       IPOIB_NETDEV_STAT(multicast),
 };
 
 #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
index 57a9655e844deb1cc2eb57d9485f98e195368ac5..2e075377242e2baccc54cda5859d5b3ba7e768d0 100644 (file)
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 
        ++dev->stats.rx_packets;
        dev->stats.rx_bytes += skb->len;
+       if (skb->pkt_type == PACKET_MULTICAST)
+               dev->stats.multicast++;
 
        skb->dev = dev;
        if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
        return pending;
 }
 
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+                                       struct ib_qp *qp,
+                                       enum ib_qp_state new_state)
+{
+       struct ib_qp_attr qp_attr;
+       struct ib_qp_init_attr query_init_attr;
+       int ret;
+
+       ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+       if (ret) {
+               ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+               return;
+       }
+       /* print according to the new-state and the previous state.*/
+       if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+               ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+       else
+               ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+                          new_state, qp_attr.qp_state);
+}
+
 int ipoib_ib_dev_stop_default(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
         */
        qp_attr.qp_state = IB_QPS_ERR;
        if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
-               ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
+               check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
 
        /* Wait for all sends and receives to complete */
        begin = jiffies;
index 4ce315c92b480fa705c30b33ffd7253b4cfded3b..6c77df34869dfb719d66787f6ccbb7637b042d36 100644 (file)
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
        int i, wait_flushed = 0;
 
        init_completion(&priv->ntbl.flushed);
+       set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
 
        spin_lock_irqsave(&priv->lock, flags);
 
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
 
        ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
        init_completion(&priv->ntbl.deleted);
-       set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
 
        /* Stop GC if called at init fail need to cancel work */
        stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
        .ndo_tx_timeout          = ipoib_timeout,
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
        .ndo_get_iflink          = ipoib_get_iflink,
+       .ndo_get_stats64         = ipoib_get_stats,
 };
 
 void ipoib_setup_common(struct net_device *dev)
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
        priv->dev = dev;
        spin_lock_init(&priv->lock);
        init_rwsem(&priv->vlan_rwsem);
+       mutex_init(&priv->mcast_mutex);
 
        INIT_LIST_HEAD(&priv->path_list);
        INIT_LIST_HEAD(&priv->child_intfs);
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
        priv->dev->dev_id = port - 1;
 
        result = ib_query_port(hca, port, &attr);
-       if (!result)
-               priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
-       else {
+       if (result) {
                printk(KERN_WARNING "%s: ib_query_port %d failed\n",
                       hca->name, port);
                goto device_init_failed;
        }
 
+       priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
        /* MTU will be reset when mcast join happens */
        priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
        priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
                printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
                       hca->name, port, result);
                goto device_init_failed;
-       } else
-               memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+       }
+
+       memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+              sizeof(union ib_gid));
        set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
        result = ipoib_dev_init(priv->dev, hca, port);
-       if (result < 0) {
+       if (result) {
                printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
                       hca->name, port, result);
                goto device_init_failed;
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
        ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+       ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
 #endif
 
        /*
index 057f58e6afca249744f2d9013021e3c1c5d6417f..93e149efc1f5fc0382b61dcfc9f84d786d8b52ca 100644 (file)
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
 int ipoib_mcast_stop_thread(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
-       unsigned long flags;
 
        ipoib_dbg_mcast(priv, "stopping multicast thread\n");
 
-       spin_lock_irqsave(&priv->lock, flags);
-       cancel_delayed_work(&priv->mcast_task);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       flush_workqueue(priv->wq);
+       cancel_delayed_work_sync(&priv->mcast_task);
 
        return 0;
 }
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
 {
        struct ipoib_mcast *mcast, *tmcast;
 
+       /*
+        * make sure the in-flight joins have finished before we attempt
+        * to leave
+        */
+       list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+                       wait_for_completion(&mcast->done);
+
        list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
                ipoib_mcast_leave(mcast->dev, mcast);
                ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        struct ipoib_mcast *mcast, *tmcast;
        unsigned long flags;
 
+       mutex_lock(&priv->mcast_mutex);
        ipoib_dbg_mcast(priv, "flushing multicast list\n");
 
        spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
        ipoib_mcast_remove_list(&remove_list);
+       mutex_unlock(&priv->mcast_mutex);
 }
 
 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        netif_addr_unlock(dev);
        local_irq_restore(flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
        ipoib_mcast_remove_list(&remove_list);
 
        /*
index b97188acc4f1006185a5f8cb4fb3ee0e5cf735cf..2d80fa8a0634aba34b366609d8bcc50f432bb31c 100644 (file)
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
 
        if (using_legacy_binding) {
                ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+               /*
+                * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+                * will allocate/initialise a new one. Thus we need to update fwspec for
+                * later use.
+                */
+               fwspec = dev->iommu_fwspec;
                if (ret)
                        goto out_free;
        } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
index 7b5fd8fb1761d1912be615ee14571e420dc4c92b..aaca0b3d662eb18bda0848652c309985d5f6ad0e 100644 (file)
@@ -44,7 +44,6 @@ struct procdata {
        char log_name[15];      /* log filename */
        struct log_data *log_head, *log_tail;   /* head and tail for queue */
        int if_used;            /* open count for interface */
-       int volatile del_lock;  /* lock for delete operations */
        unsigned char logtmp[LOG_MAX_LINELEN];
        wait_queue_head_t rd_queue;
 };
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
 {
        struct log_data *ib;
        struct procdata *pd = card->proclog;
-       int i;
        unsigned long flags;
 
        if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
        else
                pd->log_tail->next = ib;        /* follows existing messages */
        pd->log_tail = ib;      /* new tail */
-       i = pd->del_lock++;     /* get lock state */
-       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 
        /* delete old entrys */
-       if (!i)
-               while (pd->log_head->next) {
-                       if ((pd->log_head->usage_cnt <= 0) &&
-                           (pd->log_head->next->usage_cnt <= 0)) {
-                               ib = pd->log_head;
-                               pd->log_head = pd->log_head->next;
-                               kfree(ib);
-                       } else
-                               break;
-               }               /* pd->log_head->next */
-       pd->del_lock--;         /* release lock level */
+       while (pd->log_head->next) {
+               if ((pd->log_head->usage_cnt <= 0) &&
+                   (pd->log_head->next->usage_cnt <= 0)) {
+                       ib = pd->log_head;
+                       pd->log_head = pd->log_head->next;
+                       kfree(ib);
+               } else {
+                       break;
+               }
+       }               /* pd->log_head->next */
+
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
        wake_up_interruptible(&(pd->rd_queue));         /* announce new entry */
 }                              /* put_log_buffer */
 
index 8621a198a2ce3eae317e4007cd6c533b56cfecc4..bac33311f55a6d7a6699b362c39730fae9c4f107 100644 (file)
@@ -215,6 +215,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       /*
+        * MEI requires to resume from runtime suspend mode
+        * in order to perform link reset flow upon system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
        /*
        * For not wake-able HW runtime pm framework
        * can't be used on pci device level.
index f811cd52446852beecfedf02b7100f4bb6789169..e38a5f144373451fc87007ffc1cf4292059c5408 100644 (file)
@@ -137,6 +137,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       /*
+        * MEI requires to resume from runtime suspend mode
+        * in order to perform link reset flow upon system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
        /*
        * For not wake-able HW runtime pm framework
        * can't be used on pci device level.
index e5938c791330c9be1203c2698bc511e4f98aaea4..f1bbfd389367ff4530137be199c4063c65f97f5c 100644 (file)
@@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
                 * from being accepted.
                 */
                card = md->queue.card;
+               spin_lock_irq(md->queue.queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+               spin_unlock_irq(md->queue.queue->queue_lock);
                blk_set_queue_dying(md->queue.queue);
                mmc_cleanup_queue(&md->queue);
                if (md->disk->flags & GENHD_FL_UP) {
index 4ffea14b7eb645d92a91d62907d64c97cf8a9998..2bae69e39544452dc323a9a3bd15ae9b3e7de1b9 100644 (file)
@@ -1289,7 +1289,7 @@ out_err:
 static int mmc_select_hs400es(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
-       int err = 0;
+       int err = -EINVAL;
        u8 val;
 
        if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
index 04ff3c97a535143933acec93caa8483a43f633c6..2ab4788d021f0512082c6bd67d1edaa57ba61379 100644 (file)
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        mmc->max_seg_size = mmc->max_req_size;
 
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
-                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
        mmc->caps |= mmc_pdata(host)->caps;
        if (mmc->caps & MMC_CAP_8_BIT_DATA)
index f336a9b855765e5ea236fddf9564f5f084793597..9ec8f033ac5f077b05fdfc32a7f97f1747339ca8 100644 (file)
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->writesect(dev, block, buf))
                                return BLK_STS_IOERR;
+               return BLK_STS_OK;
        default:
                return BLK_STS_IOERR;
        }
index d922a88e407f119bbf52aae494c632e08d113e7f..2c8baa0c2c4e11f2b5d1c39d4c3ad795d634135a 100644 (file)
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
         * tRC < 30ns implies EDO mode. This controller does not support this
         * mode.
         */
-       if (conf->timings.sdr.tRC_min < 30)
+       if (conf->timings.sdr.tRC_min < 30000)
                return -ENOTSUPP;
 
        atmel_smc_cs_conf_init(smcconf);
index 55a8ee5306ea992f39bbcd0c2cbaa5e40ad90214..8c210a5776bcbea1677e422dc3f06397c831d9cc 100644 (file)
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
                 */
                struct platform_device *pdev = to_platform_device(userdev);
                const struct atmel_pmecc_caps *caps;
+               const struct of_device_id *match;
 
                /* No PMECC engine available. */
                if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
 
                caps = &at91sam9g45_caps;
 
-               /*
-                * Try to find the NFC subnode and extract the associated caps
-                * from there.
-                */
-               np = of_find_compatible_node(userdev->of_node, NULL,
-                                            "atmel,sama5d3-nfc");
-               if (np) {
-                       const struct of_device_id *match;
-
-                       match = of_match_node(atmel_pmecc_legacy_match, np);
-                       if (match && match->data)
-                               caps = match->data;
-
-                       of_node_put(np);
-               }
+               /* Find the caps associated to the NAND dev node. */
+               match = of_match_node(atmel_pmecc_legacy_match,
+                                     userdev->of_node);
+               if (match && match->data)
+                       caps = match->data;
 
                pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
        }
index 5fa5ddc94834d0a27a8add0125ed27310601af09..c6c18b82f8f4eade18561edb24439a9d4737efa5 100644 (file)
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
 
        if (!section) {
                oobregion->offset = 0;
-               oobregion->length = 4;
+               if (mtd->oobsize == 16)
+                       oobregion->length = 4;
+               else
+                       oobregion->length = 3;
        } else {
+               if (mtd->oobsize == 8)
+                       return -ERANGE;
+
                oobregion->offset = 6;
                oobregion->length = ecc->total - 4;
        }
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
         * Ensure the timing mode has been changed on the chip side
         * before changing timings on the controller side.
         */
-       if (chip->onfi_version) {
+       if (chip->onfi_version &&
+           (le16_to_cpu(chip->onfi_params.opt_cmd) &
+            ONFI_OPT_CMD_SET_GET_FEATURES)) {
                u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
                        chip->onfi_timing_mode_default,
                };
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
  * @buf: the data to write
  * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
- * @cached: cached programming
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
index f06312df3669c18cb788fc033acf6b5811db8d0a..7e36d7d13c268fc2317c330f7688e188e81d6bdc 100644 (file)
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
                struct nand_sdr_timings *timings = &iface->timings.sdr;
 
                /* microseconds -> picoseconds */
-               timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
-               timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
-               timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+               timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
+               timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
+               timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
 
                /* nanoseconds -> picoseconds */
                timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
index d0b6f8f9f297ab89f355a727c333de1c5a2f7fc8..6abd142b13246f1189e189c03d3ff499665c1b0b 100644 (file)
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
         */
        chip->clk_rate = NSEC_PER_SEC / min_clk_period;
        real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+       if (real_clk_rate <= 0) {
+               dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+               return -EINVAL;
+       }
 
        /*
         * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
index 1e46418a3b74c3f351068fd6d1a3b3b5168e8ab2..264b281eb86bf1b52abb88ef67c2ce7143ba6024 100644 (file)
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
                 * all finished.
                 */
                mt7623_pad_clk_setup(ds);
+       } else {
+               u16 lcl_adv = 0, rmt_adv = 0;
+               u8 flowctrl;
+               u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+
+               switch (phydev->speed) {
+               case SPEED_1000:
+                       mcr |= PMCR_FORCE_SPEED_1000;
+                       break;
+               case SPEED_100:
+                       mcr |= PMCR_FORCE_SPEED_100;
+                       break;
+               };
+
+               if (phydev->link)
+                       mcr |= PMCR_FORCE_LNK;
+
+               if (phydev->duplex) {
+                       mcr |= PMCR_FORCE_FDX;
+
+                       if (phydev->pause)
+                               rmt_adv = LPA_PAUSE_CAP;
+                       if (phydev->asym_pause)
+                               rmt_adv |= LPA_PAUSE_ASYM;
+
+                       if (phydev->advertising & ADVERTISED_Pause)
+                               lcl_adv |= ADVERTISE_PAUSE_CAP;
+                       if (phydev->advertising & ADVERTISED_Asym_Pause)
+                               lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+                       flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+                       if (flowctrl & FLOW_CTRL_TX)
+                               mcr |= PMCR_TX_FC_EN;
+                       if (flowctrl & FLOW_CTRL_RX)
+                               mcr |= PMCR_RX_FC_EN;
+               }
+               mt7530_write(priv, MT7530_PMCR_P(port), mcr);
        }
 }
 
index b83d76b998023c38c9e67b90ca73e393f7d29fc4..74db9822eb40437a92bc21ace75971df48c1bfa4 100644 (file)
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
 #define  PMCR_TX_FC_EN                 BIT(5)
 #define  PMCR_RX_FC_EN                 BIT(4)
 #define  PMCR_FORCE_SPEED_1000         BIT(3)
+#define  PMCR_FORCE_SPEED_100          BIT(2)
 #define  PMCR_FORCE_FDX                        BIT(1)
 #define  PMCR_FORCE_LNK                        BIT(0)
 #define  PMCR_COMMON_LINK              (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
index 86058a9f3417bc59613cb9c019638dc8a85a9731..1d307f2def2d910eff7c983d9866fa88b331d869 100644 (file)
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 
        xgene_enet_gpiod_get(pdata);
 
-       if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
-               pdata->clk = devm_clk_get(&pdev->dev, NULL);
-               if (IS_ERR(pdata->clk)) {
+       pdata->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(pdata->clk)) {
+               if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
                        /* Abort if the clock is defined but couldn't be
                         * retrived. Always abort if the clock is missing on
                         * DT system as the driver can't cope with this case.
index f411936b744cb1fa4d331f7b439a181916fa118e..a1125d10c8255f6eb8fbb6046b09473c35ba52b8 100644 (file)
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
        bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
 
        spin_lock_init(&bp->lock);
+       u64_stats_init(&bp->hw_stats.syncp);
 
        bp->rx_pending = B44_DEF_RX_RING_PENDING;
        bp->tx_pending = B44_DEF_TX_RING_PENDING;
index 5333601f855f88529c04e003eae5e3d19aa59f6d..dc3052751bc13ed2248c218de01849d865dbe952 100644 (file)
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                        p = (char *)&dev->stats;
                else
                        p = (char *)priv;
+
+               if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+                       continue;
+
                p += s->stat_offset;
                data[j] = *(unsigned long *)p;
                j++;
index a3e6946796350d0a3bb79410d1f354a844ab7f60..c45e8e3b82d38da950a7cf1ca72022d120b7947e 100644 (file)
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 static void send_request_unmap(struct ibmvnic_adapter *, u8);
 static void send_login(struct ibmvnic_adapter *adapter);
 static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crqs(struct ibmvnic_adapter *);
 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 static int ibmvnic_init(struct ibmvnic_adapter *);
 static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        unsigned long timeout = msecs_to_jiffies(30000);
        struct device *dev = &adapter->vdev->dev;
+       int rc;
 
        do {
                if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
                                dev_err(dev, "Capabilities query timeout\n");
                                return -1;
                        }
+                       rc = init_sub_crqs(adapter);
+                       if (rc) {
+                               dev_err(dev,
+                                       "Initialization of SCRQ's failed\n");
+                               return -1;
+                       }
+                       rc = init_sub_crq_irqs(adapter);
+                       if (rc) {
+                               dev_err(dev,
+                                       "Initialization of SCRQ's irqs failed\n");
+                               return -1;
+                       }
                }
 
                reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
                *req_value = be64_to_cpu(crq->request_capability_rsp.number);
                ibmvnic_send_req_caps(adapter, 1);
                return;
index b936febc315a17b3d8db0af05f60938d478edefd..2194960d5855c6576ec03c870479344b099ce12b 100644 (file)
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
        if (!tx_ring->tx_bi)
                goto err;
 
+       u64_stats_init(&tx_ring->syncp);
+
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
        /* add u32 for head writeback, align after this takes care of
index 084c5358279319ed6d826aa00f135adb98b7631b..032f8ac06357aefa7a695c6685b8bbbbf7a8949e 100644 (file)
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
        if (!tx_ring->tx_buffer_info)
                goto err;
 
+       u64_stats_init(&tx_ring->syncp);
+
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
        if (!rx_ring->rx_buffer_info)
                goto err;
 
+       u64_stats_init(&rx_ring->syncp);
+
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
index c751a1d434ad7167e6b65a62f46b7295044860f8..3d4e4a5d00d1c5f81267c4a4a9675bc667709211 100644 (file)
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
                            struct ethtool_wolinfo *wol)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
+       struct mlx4_caps *caps = &priv->mdev->dev->caps;
        int err = 0;
        u64 config = 0;
        u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
        mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
                MLX4_DEV_CAP_FLAG_WOL_PORT2;
 
-       if (!(priv->mdev->dev->caps.flags & mask)) {
+       if (!(caps->flags & mask)) {
                wol->supported = 0;
                wol->wolopts = 0;
                return;
        }
 
+       if (caps->wol_port[priv->port])
+               wol->supported = WAKE_MAGIC;
+       else
+               wol->supported = 0;
+
        err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
        if (err) {
                en_err(priv, "Failed to get WoL information\n");
                return;
        }
 
-       if (config & MLX4_EN_WOL_MAGIC)
-               wol->supported = WAKE_MAGIC;
-       else
-               wol->supported = 0;
-
-       if (config & MLX4_EN_WOL_ENABLED)
+       if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
                wol->wolopts = WAKE_MAGIC;
        else
                wol->wolopts = 0;
index 436f7689a03212943d5ea70a2214774d2c940d97..bf1638044a7a89b6e911b3f5786c75597f89f2ba 100644 (file)
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
  * header, the HW adds it. To address that, we are subtracting the pseudo
  * header checksum from the checksum value provided by the HW.
  */
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
-                               struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+                              struct iphdr *iph)
 {
        __u16 length_for_csum = 0;
        __wsum csum_pseudo_header = 0;
+       __u8 ipproto = iph->protocol;
+
+       if (unlikely(ipproto == IPPROTO_SCTP))
+               return -1;
 
        length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
        csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                               length_for_csum, iph->protocol, 0);
+                                               length_for_csum, ipproto, 0);
        skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+       return 0;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
                               struct ipv6hdr *ipv6h)
 {
+       __u8 nexthdr = ipv6h->nexthdr;
        __wsum csum_pseudo_hdr = 0;
 
-       if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
-                    ipv6h->nexthdr == IPPROTO_HOPOPTS))
+       if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+                    nexthdr == IPPROTO_HOPOPTS ||
+                    nexthdr == IPPROTO_SCTP))
                return -1;
-       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
 
        csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
                                       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
        csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
-       csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+       csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+                                  (__force __wsum)htons(nexthdr));
 
        skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
        skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
        }
 
        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-               get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+               return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
-               if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
-                       return -1;
+       if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+               return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
        return 0;
 }
index 37e84a59e751d8ad44c34f5a0c7b16337a207c9e..041c0ed6592909a2d7b99cbaa51fc6cc59b7096b 100644 (file)
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [32] = "Loopback source checks support",
                [33] = "RoCEv2 support",
                [34] = "DMFS Sniffer support (UC & MC)",
-               [35] = "QinQ VST mode support",
-               [36] = "sl to vl mapping table change event support"
+               [35] = "Diag counters per port",
+               [36] = "QinQ VST mode support",
+               [37] = "sl to vl mapping table change event support",
        };
        int i;
 
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET     0x3e
 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET          0x3f
 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET         0x40
+#define QUERY_DEV_CAP_WOL_OFFSET               0x43
 #define QUERY_DEV_CAP_FLAGS_OFFSET             0x44
 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET          0x48
 #define QUERY_DEV_CAP_UAR_SZ_OFFSET            0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
        MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
        dev_cap->flags = flags | (u64)ext_flags << 32;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
+       dev_cap->wol_port[1] = !!(field & 0x20);
+       dev_cap->wol_port[2] = !!(field & 0x40);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
        dev_cap->reserved_uars = field >> 4;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
index 5343a0599253b98d6f2b9b77b65210a4e1e0abf9..b52ba01aa486a0b492cdb623cbe253c7a2e583d0 100644 (file)
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
        u32 dmfs_high_rate_qpn_range;
        struct mlx4_rate_limit_caps rl_caps;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+       bool wol_port[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_func_cap {
index a27c9c13a36ed11d577e7cd9cff1e2a9daec137d..09b9bc17bce998a99f360577a92a0211b552bf38 100644 (file)
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
        dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
+       dev->caps.wol_port[1]          = dev_cap->wol_port[1];
+       dev->caps.wol_port[2]          = dev_cap->wol_port[2];
 
        /* Save uar page shift */
        if (!mlx4_is_slave(dev)) {
index 656b2d3f1bee0e8aa5b1f328d5066d60f5e96099..5eb1606765c58064a5e2fd6677a791165c18c071 100644 (file)
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
                                                orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
                                                   MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
                                                orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        if (!bridge_port->bridge_device->multicast_enabled)
                return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        bridge_device = bridge_port->bridge_device;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
                                                               bridge_device,
                                                               mdb->vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
-               return -EINVAL;
+       if (!mlxsw_sp_port_vlan)
+               return 0;
 
        fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
 
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
        int err = 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
+       if (!bridge_port)
+               return 0;
 
        bridge_device = bridge_port->bridge_device;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
                                                               bridge_device,
                                                               mdb->vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
-               return -EINVAL;
+       if (!mlxsw_sp_port_vlan)
+               return 0;
 
        fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
 
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
 
 }
 
+static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_mid *mid, *tmp;
+
+       list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
+               list_del(&mid->list);
+               clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+               kfree(mid);
+       }
+}
+
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
 {
        mlxsw_sp_fdb_fini(mlxsw_sp);
-       WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+       mlxsw_sp_mids_fini(mlxsw_sp);
        WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
        kfree(mlxsw_sp->bridge);
 }
index 18750ff0ede6262d1a5fd50b8acc808c12a50756..4631ca8b8eb2780865bc2eefae51ce5c5a61f5cf 100644 (file)
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
        tx_ring->idx = idx;
        tx_ring->r_vec = r_vec;
        tx_ring->is_xdp = is_xdp;
+       u64_stats_init(&tx_ring->r_vec->tx_sync);
 
        tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
        tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
 
        rx_ring->idx = idx;
        rx_ring->r_vec = r_vec;
+       u64_stats_init(&rx_ring->r_vec->rx_sync);
 
        rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
        rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
index 9da91045d167b095ad2bb45f5bd3fa1a89255012..3eb241657368e33cb6bc7b25afcecee931a24366 100644 (file)
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
        p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
        p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
-       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+       if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
                goto err;
 
        return 0;
index 32279d21c8363d4976c6d313599fbc0e015e6a36..c2121d214f089eb1fe59af4ceb4b2b358abb8f0c 100644 (file)
 
 #include "cpts.h"
 
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+       unsigned long tmo;
+};
+
 #define cpts_read32(c, r)      readl_relaxed(&c->reg->r)
 #define cpts_write32(c, v, r)  writel_relaxed(v, &c->reg->r)
 
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+                     u16 ts_seqid, u8 ts_msgtype);
+
 static int event_expired(struct cpts_event *event)
 {
        return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
        return removed ? 0 : -1;
 }
 
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+       struct sk_buff *skb, *tmp;
+       u16 seqid;
+       u8 mtype;
+       bool found = false;
+
+       mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+       seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+       /* no need to grab txq.lock as access is always done under cpts->lock */
+       skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+               struct skb_shared_hwtstamps ssh;
+               unsigned int class = ptp_classify_raw(skb);
+               struct cpts_skb_cb_data *skb_cb =
+                                       (struct cpts_skb_cb_data *)skb->cb;
+
+               if (cpts_match(skb, class, seqid, mtype)) {
+                       u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+                       memset(&ssh, 0, sizeof(ssh));
+                       ssh.hwtstamp = ns_to_ktime(ns);
+                       skb_tstamp_tx(skb, &ssh);
+                       found = true;
+                       __skb_unlink(skb, &cpts->txq);
+                       dev_consume_skb_any(skb);
+                       dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+                               mtype, seqid);
+               } else if (time_after(jiffies, skb_cb->tmo)) {
+                       /* timeout any expired skbs over 1s */
+                       dev_dbg(cpts->dev,
+                               "expiring tx timestamp mtype %u seqid %04x\n",
+                               mtype, seqid);
+                       __skb_unlink(skb, &cpts->txq);
+                       dev_consume_skb_any(skb);
+               }
+       }
+
+       return found;
+}
+
 /*
  * Returns zero if matching event type was found.
  */
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
                event->low = lo;
                type = event_type(event);
                switch (type) {
+               case CPTS_EV_TX:
+                       if (cpts_match_tx_ts(cpts, event)) {
+                               /* if the new event matches an existing skb,
+                                * then don't queue it
+                                */
+                               break;
+                       }
                case CPTS_EV_PUSH:
                case CPTS_EV_RX:
-               case CPTS_EV_TX:
                        list_del_init(&event->list);
                        list_add_tail(&event->list, &cpts->events);
                        break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
        return -EOPNOTSUPP;
 }
 
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+       unsigned long delay = cpts->ov_check_period;
+       struct timespec64 ts;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+       if (!skb_queue_empty(&cpts->txq))
+               delay = CPTS_SKB_TX_WORK_TIMEOUT;
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       return (long)delay;
+}
+
 static struct ptp_clock_info cpts_info = {
        .owner          = THIS_MODULE,
        .name           = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
        .gettime64      = cpts_ptp_gettime,
        .settime64      = cpts_ptp_settime,
        .enable         = cpts_ptp_enable,
+       .do_aux_work    = cpts_overflow_check,
 };
 
-static void cpts_overflow_check(struct work_struct *work)
-{
-       struct timespec64 ts;
-       struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
-
-       cpts_ptp_gettime(&cpts->info, &ts);
-       pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
-       schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
-}
-
 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
                      u16 ts_seqid, u8 ts_msgtype)
 {
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
                return 0;
 
        spin_lock_irqsave(&cpts->lock, flags);
-       cpts_fifo_read(cpts, CPTS_EV_PUSH);
+       cpts_fifo_read(cpts, -1);
        list_for_each_safe(this, next, &cpts->events) {
                event = list_entry(this, struct cpts_event, list);
                if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
                        break;
                }
        }
+
+       if (ev_type == CPTS_EV_TX && !ns) {
+               struct cpts_skb_cb_data *skb_cb =
+                               (struct cpts_skb_cb_data *)skb->cb;
+               /* Not found, add frame to queue for processing later.
+                * The periodic FIFO check will handle this.
+                */
+               skb_get(skb);
+               /* get the timestamp for timeouts */
+               skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+               __skb_queue_tail(&cpts->txq, skb);
+               ptp_schedule_worker(cpts->clock, 0);
+       }
        spin_unlock_irqrestore(&cpts->lock, flags);
 
        return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
 {
        int err, i;
 
+       skb_queue_head_init(&cpts->txq);
        INIT_LIST_HEAD(&cpts->events);
        INIT_LIST_HEAD(&cpts->pool);
        for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
        }
        cpts->phc_index = ptp_clock_index(cpts->clock);
 
-       schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
+       ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
        return 0;
 
 err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
        if (WARN_ON(!cpts->clock))
                return;
 
-       cancel_delayed_work_sync(&cpts->overflow_work);
-
        ptp_clock_unregister(cpts->clock);
        cpts->clock = NULL;
 
        cpts_write32(cpts, 0, int_enable);
        cpts_write32(cpts, 0, control);
 
+       /* Drop all packet */
+       skb_queue_purge(&cpts->txq);
+
        clk_disable(cpts->refclk);
 }
 EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
        cpts->dev = dev;
        cpts->reg = (struct cpsw_cpts __iomem *)regs;
        spin_lock_init(&cpts->lock);
-       INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
 
        ret = cpts_of_parse(cpts, node);
        if (ret)
index 01ea82ba9cdca7e83a03f36d9ef1f43ec4267bc0..73d73faf0f38748327cf5ca241f7a5c685f10275 100644 (file)
@@ -119,13 +119,13 @@ struct cpts {
        u32 cc_mult; /* for the nominal frequency */
        struct cyclecounter cc;
        struct timecounter tc;
-       struct delayed_work overflow_work;
        int phc_index;
        struct clk *refclk;
        struct list_head events;
        struct list_head pool;
        struct cpts_event pool_data[CPTS_MAX_EVENTS];
        unsigned long ov_check_period;
+       struct sk_buff_head txq;
 };
 
 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
index de8156c6b2925741534a45a6c3a28a3afe9d1ad6..2bbda71818adb022853964dd6d51a14c26f7cd19 100644 (file)
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
        if (data[IFLA_GENEVE_ID]) {
                __u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
 
-               if (vni >= GENEVE_VID_MASK)
+               if (vni >= GENEVE_N_VID)
                        return -ERANGE;
        }
 
index 1542e837fdfa777e96155f041bc6d2072946d0cc..f38e32a7ec9c979ac4524c31e09da375a6e0606c 100644 (file)
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
 
        gtp->dev = dev;
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
index d6c25580f8dd636dc43fe464adb339f597fdbcee..12cc64bfcff83c3c28c1bf97033dfd0162848b0e 100644 (file)
@@ -765,7 +765,8 @@ struct netvsc_device {
        u32 max_chn;
        u32 num_chn;
 
-       refcount_t sc_offered;
+       atomic_t open_chn;
+       wait_queue_head_t subchan_open;
 
        struct rndis_device *extension;
 
index 0a9167dd72fb94e50692fa70ec9bc50fd99f733e..d18c3326a1f782b403de4a10ef057196beb3aaa5 100644 (file)
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
        init_completion(&net_device->channel_init_wait);
+       init_waitqueue_head(&net_device->subchan_open);
 
        return net_device;
 }
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
                struct netvsc_channel *nvchan = &net_device->chan_table[i];
 
                nvchan->channel = device->channel;
+               u64_stats_init(&nvchan->tx_stats.syncp);
+               u64_stats_init(&nvchan->rx_stats.syncp);
        }
 
        /* Enable NAPI handler before init callbacks */
index 85c00e1c52b6aa9309e782df639e03f444ffc1be..d6308ffda53ec797acf5f9e6038bf0a230008b55 100644 (file)
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        else
                netif_napi_del(&nvchan->napi);
 
-       if (refcount_dec_and_test(&nvscdev->sc_offered))
-               complete(&nvscdev->channel_init_wait);
+       atomic_inc(&nvscdev->open_chn);
+       wake_up(&nvscdev->subchan_open);
 }
 
 int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
-       refcount_set(&net_device->sc_offered, 0);
-
        net_device->extension = rndis_device;
        rndis_device->ndev = net;
 
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
                rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
                                                        net_device->num_chn);
 
+       atomic_set(&net_device->open_chn, 1);
        num_rss_qs = net_device->num_chn - 1;
        if (num_rss_qs == 0)
                return 0;
 
-       refcount_set(&net_device->sc_offered, num_rss_qs);
        vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
 
        init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret)
                goto out;
 
+       wait_for_completion(&net_device->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                ret = -ENODEV;
                goto out;
        }
-       wait_for_completion(&net_device->channel_init_wait);
 
        net_device->num_chn = 1 +
                init_packet->msg.v5_msg.subchn_comp.num_subchannels;
 
+       /* wait for all sub channels to open */
+       wait_event(net_device->subchan_open,
+                  atomic_read(&net_device->open_chn) == net_device->num_chn);
+
        /* ignore failues from setting rss parameters, still have channels */
        rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
                                   net_device->num_chn);
index f37e3c1fd4e73f27e46564a6ef2739ff658523df..8dab74a81303277aed5cf24844941bdcadfcef1a 100644 (file)
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
 
        netdev_lockdep_set_classes(dev);
 
-       ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+       ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
 
index bd4303944e4405d543a399f6fc529df9ffd41e02..a404552555d488c832e7758293d7d4c1e229e679 100644 (file)
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
        spin_unlock(&pch->downl);
        /* see if there is anything from the attached unit to be sent */
        if (skb_queue_empty(&pch->file.xq)) {
-               read_lock(&pch->upl);
                ppp = pch->ppp;
                if (ppp)
-                       ppp_xmit_process(ppp);
-               read_unlock(&pch->upl);
+                       __ppp_xmit_process(ppp);
        }
 }
 
 static void ppp_channel_push(struct channel *pch)
 {
-       local_bh_disable();
-
-       __ppp_channel_push(pch);
-
-       local_bh_enable();
+       read_lock_bh(&pch->upl);
+       if (pch->ppp) {
+               (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+               __ppp_channel_push(pch);
+               (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+       } else {
+               __ppp_channel_push(pch);
+       }
+       read_unlock_bh(&pch->upl);
 }
 
 /*
index d1092421aaa7e7b69ba926d508155574f072ba61..9a4171b9094760871cf4396c99b2236bccd15193 100644 (file)
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                           struct asix_rx_fixup_info *rx);
 int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
+void asix_rx_fixup_common_free(struct asix_common_private *dp);
 
 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                              gfp_t flags);
index 7847436c441e3c16b91fb0d72c3f07061d4e772d..522d2900cd1dd942ae58c407920289e95f0ae5e3 100644 (file)
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                               value, index, data, size);
 }
 
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+       /* Reset the variables that have a lifetime outside of
+        * asix_rx_fixup_internal() so that future processing starts from a
+        * known set of initial conditions.
+        */
+
+       if (rx->ax_skb) {
+               /* Discard any incomplete Ethernet frame in the netdev buffer */
+               kfree_skb(rx->ax_skb);
+               rx->ax_skb = NULL;
+       }
+
+       /* Assume the Data header 32-bit word is at the start of the current
+        * or next URB socket buffer so reset all the state variables.
+        */
+       rx->remaining = 0;
+       rx->split_head = false;
+       rx->header = 0;
+}
+
 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                           struct asix_rx_fixup_info *rx)
 {
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                if (size != ((~rx->header >> 16) & 0x7ff)) {
                        netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
                                   rx->remaining);
-                       if (rx->ax_skb) {
-                               kfree_skb(rx->ax_skb);
-                               rx->ax_skb = NULL;
-                               /* Discard the incomplete netdev Ethernet frame
-                                * and assume the Data header is at the start of
-                                * the current URB socket buffer.
-                                */
-                       }
-                       rx->remaining = 0;
+                       reset_asix_rx_fixup_info(rx);
                }
        }
 
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                        if (size != ((~rx->header >> 16) & 0x7ff)) {
                                netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
                                           rx->header, offset);
+                               reset_asix_rx_fixup_info(rx);
                                return 0;
                        }
                        if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
                                netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
                                           size);
+                               reset_asix_rx_fixup_info(rx);
                                return 0;
                        }
 
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                if (rx->ax_skb) {
                        skb_put_data(rx->ax_skb, skb->data + offset,
                                     copy_length);
-                       if (!rx->remaining)
+                       if (!rx->remaining) {
                                usbnet_skb_return(dev, rx->ax_skb);
+                               rx->ax_skb = NULL;
+                       }
                }
 
                offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
        if (skb->len != offset) {
                netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
                           skb->len, offset);
+               reset_asix_rx_fixup_info(rx);
                return 0;
        }
 
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
        return asix_rx_fixup_internal(dev, skb, rx);
 }
 
+void asix_rx_fixup_common_free(struct asix_common_private *dp)
+{
+       struct asix_rx_fixup_info *rx;
+
+       if (!dp)
+               return;
+
+       rx = &dp->rx_fixup_info;
+
+       if (rx->ax_skb) {
+               kfree_skb(rx->ax_skb);
+               rx->ax_skb = NULL;
+       }
+}
+
 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                              gfp_t flags)
 {
index a3aa0a27dfe56b22121a0571cc4eaca1b2bbee03..b2ff88e69a819cc3098a720ece238d8847d6be57 100644 (file)
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
 static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
+       asix_rx_fixup_common_free(dev->driver_priv);
        kfree(dev->driver_priv);
 }
 
index 5833f7e2a127811aa2298ded2bc62b1d06ae1e9d..b99a7fb09f8e31827a725151b415967699cdfa27 100644 (file)
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        /* Init LTM */
        lan78xx_init_ltm(dev);
 
-       dev->net->hard_header_len += TX_OVERHEAD;
-       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
        if (dev->udev->speed == USB_SPEED_SUPER) {
                buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
                return ret;
        }
 
+       dev->net->hard_header_len += TX_OVERHEAD;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
        /* Init all registers */
        ret = lan78xx_reset(dev);
 
-       lan78xx_mdio_init(dev);
+       ret = lan78xx_mdio_init(dev);
 
        dev->net->flags |= IFF_MULTICAST;
 
        pdata->wol = WAKE_MAGIC;
 
-       return 0;
+       return ret;
 }
 
 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
        udev = interface_to_usbdev(intf);
        udev = usb_get_dev(udev);
 
-       ret = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct lan78xx_net));
        if (!netdev) {
-                       dev_err(&intf->dev, "Error: OOM\n");
-                       goto out1;
+               dev_err(&intf->dev, "Error: OOM\n");
+               ret = -ENOMEM;
+               goto out1;
        }
 
        /* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
        ret = register_netdev(netdev);
        if (ret != 0) {
                netif_err(dev, probe, netdev, "couldn't register the device\n");
-               goto out2;
+               goto out3;
        }
 
        usb_set_intfdata(intf, dev);
index 5894e3c9468f590e6b50144901b3f3b606e4ee69..8c373360827108855717f6d139034ff9f264bf0b 100644 (file)
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
+       {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
 static void qmi_wwan_disconnect(struct usb_interface *intf)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
-       struct qmi_wwan_state *info = (void *)&dev->data;
+       struct qmi_wwan_state *info;
        struct list_head *iter;
        struct net_device *ldev;
 
+       /* called twice if separate control and data intf */
+       if (!dev)
+               return;
+       info = (void *)&dev->data;
        if (info->flags & QMI_WWAN_FLAG_MUX) {
                if (!rtnl_trylock()) {
                        restart_syscall();
index 96aa7e6cf214cc332eba6d54fcd626bee917c633..e17baac70f439f86c723732cc0298eaa5eee15de 100644 (file)
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
+       skb->remcsum_offload = 0;
        NAPI_GRO_CB(skb)->flush |= flush;
 
        return pp;
index c49f1f8b2e57459deb2605f242c9324e8960a95b..37046ac2c4413a51b9a77864cdf9e434a6ac7bd6 100644 (file)
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
 
        c.directive.opcode = nvme_admin_directive_recv;
        c.directive.nsid = cpu_to_le32(nsid);
-       c.directive.numd = cpu_to_le32(sizeof(*s));
+       c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
        c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
        c.directive.dtype = NVME_DIR_STREAMS;
 
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
        blk_queue_write_cache(q, vwc, vwc);
 }
 
-static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
 {
        /*
         * APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
         * then don't do anything.
         */
        if (!ctrl->apsta)
-               return;
+               return 0;
 
        if (ctrl->npss > 31) {
                dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
-               return;
+               return 0;
        }
 
        table = kzalloc(sizeof(*table), GFP_KERNEL);
        if (!table)
-               return;
+               return 0;
 
        if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
                /* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
 
        kfree(table);
+       return ret;
 }
 
 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                 * In fabrics we need to verify the cntlid matches the
                 * admin connect
                 */
-               if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+               if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
                        ret = -EINVAL;
+                       goto out_free;
+               }
 
                if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
                        dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
+                       goto out_free;
                }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        else if (!ctrl->apst_enabled && prev_apst_enabled)
                dev_pm_qos_hide_latency_tolerance(ctrl->device);
 
-       nvme_configure_apst(ctrl);
-       nvme_configure_directives(ctrl);
+       ret = nvme_configure_apst(ctrl);
+       if (ret < 0)
+               return ret;
+
+       ret = nvme_configure_directives(ctrl);
+       if (ret < 0)
+               return ret;
 
        ctrl->identified = true;
 
+       return 0;
+
+out_free:
+       kfree(id);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
        if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
                return sprintf(buf, "eui.%8phN\n", ns->eui);
 
-       while (ctrl->serial[serial_len - 1] == ' ')
+       while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
+                                 ctrl->serial[serial_len - 1] == '\0'))
                serial_len--;
-       while (ctrl->model[model_len - 1] == ' ')
+       while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
+                                ctrl->model[model_len - 1] == '\0'))
                model_len--;
 
        return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
index cd888a47d0fccb728b155f75aa85f0836ec4eac3..74a124a062640ae77abb8554881e1a93222abd2d 100644 (file)
@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
        if (dev->cmb) {
                iounmap(dev->cmb);
                dev->cmb = NULL;
-               if (dev->cmbsz) {
-                       sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
-                                                    &dev_attr_cmb.attr, NULL);
-                       dev->cmbsz = 0;
-               }
+               sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+                                            &dev_attr_cmb.attr, NULL);
+               dev->cmbsz = 0;
        }
 }
 
@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
        /*
         * CMBs can currently only exist on >=1.2 PCIe devices. We only
-        * populate sysfs if a CMB is implemented. Note that we add the
-        * CMB attribute to the nvme_ctrl kobj which removes the need to remove
-        * it on exit. Since nvme_dev_attrs_group has no name we can pass
-        * NULL as final argument to sysfs_add_file_to_group.
+        * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
+        * has no name we can pass NULL as final argument to
+        * sysfs_add_file_to_group.
         */
 
        if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
                dev->cmb = nvme_map_cmb(dev);
-
-               if (dev->cmbsz) {
+               if (dev->cmb) {
                        if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
                                                    &dev_attr_cmb.attr, NULL))
                                dev_warn(dev->ctrl.device,
index 31ca55dfcb1d49f3a1d88f7c6f5d0e7f0ee1e1ea..1b7f2520a20db7e151afe4a85a0e488fe0c85005 100644 (file)
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
        struct kref                     ref;
 };
 
+struct nvmet_fc_defer_fcp_req {
+       struct list_head                req_list;
+       struct nvmefc_tgt_fcp_req       *fcp_req;
+};
+
 struct nvmet_fc_tgt_queue {
        bool                            ninetypercent;
        u16                             qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
        struct nvmet_fc_tgt_assoc       *assoc;
        struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
        struct list_head                fod_list;
+       struct list_head                pending_cmd_list;
+       struct list_head                avail_defer_list;
        struct workqueue_struct         *work_q;
        struct kref                     ref;
 } __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+                                       struct nvmet_fc_fcp_iod *fod);
 
 
 /* *********************** FC-NVME DMA Handling **************************** */
@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
 {
        static struct nvmet_fc_fcp_iod *fod;
-       unsigned long flags;
 
-       spin_lock_irqsave(&queue->qlock, flags);
+       lockdep_assert_held(&queue->qlock);
+
        fod = list_first_entry_or_null(&queue->fod_list,
                                        struct nvmet_fc_fcp_iod, fcp_list);
        if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
                 * will "inherit" that reference.
                 */
        }
-       spin_unlock_irqrestore(&queue->qlock, flags);
        return fod;
 }
 
 
+static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+                      struct nvmet_fc_tgt_queue *queue,
+                      struct nvmefc_tgt_fcp_req *fcpreq)
+{
+       struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+       /*
+        * put all admin cmds on hw queue id 0. All io commands go to
+        * the respective hw queue based on a modulo basis
+        */
+       fcpreq->hwqid = queue->qid ?
+                       ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+       if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+               queue_work_on(queue->cpu, queue->work_q, &fod->work);
+       else
+               nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
 static void
 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
                        struct nvmet_fc_fcp_iod *fod)
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
        struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+       struct nvmet_fc_defer_fcp_req *deferfcp;
        unsigned long flags;
 
        fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 
        fcpreq->nvmet_fc_private = NULL;
 
-       spin_lock_irqsave(&queue->qlock, flags);
-       list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
        fod->active = false;
        fod->abort = false;
        fod->aborted = false;
        fod->writedataactive = false;
        fod->fcpreq = NULL;
+
+       tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+       spin_lock_irqsave(&queue->qlock, flags);
+       deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+                               struct nvmet_fc_defer_fcp_req, req_list);
+       if (!deferfcp) {
+               list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               /* Release reference taken at queue lookup and fod allocation */
+               nvmet_fc_tgt_q_put(queue);
+               return;
+       }
+
+       /* Re-use the fod for the next pending cmd that was deferred */
+       list_del(&deferfcp->req_list);
+
+       fcpreq = deferfcp->fcp_req;
+
+       /* deferfcp can be reused for another IO at a later date */
+       list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
        spin_unlock_irqrestore(&queue->qlock, flags);
 
+       /* Save NVME CMD IO in fod */
+       memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+       /* Setup new fcpreq to be processed */
+       fcpreq->rspaddr = NULL;
+       fcpreq->rsplen  = 0;
+       fcpreq->nvmet_fc_private = fod;
+       fod->fcpreq = fcpreq;
+       fod->active = true;
+
+       /* inform LLDD IO is now being processed */
+       tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+       /* Submit deferred IO for processing */
+       nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
        /*
-        * release the reference taken at queue lookup and fod allocation
+        * Leave the queue lookup get reference taken when
+        * fod was originally allocated.
         */
-       nvmet_fc_tgt_q_put(queue);
-
-       tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 }
 
 static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
        queue->port = assoc->tgtport->port;
        queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
        INIT_LIST_HEAD(&queue->fod_list);
+       INIT_LIST_HEAD(&queue->avail_defer_list);
+       INIT_LIST_HEAD(&queue->pending_cmd_list);
        atomic_set(&queue->connected, 0);
        atomic_set(&queue->sqtail, 0);
        atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
 {
        struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
        struct nvmet_fc_fcp_iod *fod = queue->fod;
+       struct nvmet_fc_defer_fcp_req *deferfcp;
        unsigned long flags;
        int i, writedataactive;
        bool disconnect;
@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
                        }
                }
        }
+
+       /* Cleanup defer'ed IOs in queue */
+       list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+               list_del(&deferfcp->req_list);
+               kfree(deferfcp);
+       }
+
+       for (;;) {
+               deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+                               struct nvmet_fc_defer_fcp_req, req_list);
+               if (!deferfcp)
+                       break;
+
+               list_del(&deferfcp->req_list);
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               kfree(deferfcp);
+
+               spin_lock_irqsave(&queue->qlock, flags);
+       }
        spin_unlock_irqrestore(&queue->qlock, flags);
 
        flush_workqueue(queue->work_q);
@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
  * layer for processing.
  *
- * The nvmet-fc layer will copy cmd payload to an internal structure for
- * processing.  As such, upon completion of the routine, the LLDD may
- * immediately free/reuse the CMD IU buffer passed in the call.
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
+ *
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
  *
- * If this routine returns error, the lldd should abort the exchange.
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
  *
  * @target_port: pointer to the (registered) target port the FCP CMD IU
  *              was received on.
@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
        struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
        struct nvmet_fc_tgt_queue *queue;
        struct nvmet_fc_fcp_iod *fod;
+       struct nvmet_fc_defer_fcp_req *deferfcp;
+       unsigned long flags;
 
        /* validate iu, so the connection id can be used to find the queue */
        if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
         * when the fod is freed.
         */
 
+       spin_lock_irqsave(&queue->qlock, flags);
+
        fod = nvmet_fc_alloc_fcp_iod(queue);
-       if (!fod) {
+       if (fod) {
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               fcpreq->nvmet_fc_private = fod;
+               fod->fcpreq = fcpreq;
+
+               memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+               nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+               return 0;
+       }
+
+       if (!tgtport->ops->defer_rcv) {
+               spin_unlock_irqrestore(&queue->qlock, flags);
                /* release the queue lookup reference */
                nvmet_fc_tgt_q_put(queue);
                return -ENOENT;
        }
 
-       fcpreq->nvmet_fc_private = fod;
-       fod->fcpreq = fcpreq;
-       /*
-        * put all admin cmds on hw queue id 0. All io commands go to
-        * the respective hw queue based on a modulo basis
-        */
-       fcpreq->hwqid = queue->qid ?
-                       ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
-       memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+       deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+                       struct nvmet_fc_defer_fcp_req, req_list);
+       if (deferfcp) {
+               /* Just re-use one that was previously allocated */
+               list_del(&deferfcp->req_list);
+       } else {
+               spin_unlock_irqrestore(&queue->qlock, flags);
 
-       if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
-               queue_work_on(queue->cpu, queue->work_q, &fod->work);
-       else
-               nvmet_fc_handle_fcp_rqst(tgtport, fod);
+               /* Now we need to dynamically allocate one */
+               deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+               if (!deferfcp) {
+                       /* release the queue lookup reference */
+                       nvmet_fc_tgt_q_put(queue);
+                       return -ENOMEM;
+               }
+               spin_lock_irqsave(&queue->qlock, flags);
+       }
 
-       return 0;
+       /* For now, use rspaddr / rsplen to save payload information */
+       fcpreq->rspaddr = cmdiubuf;
+       fcpreq->rsplen  = cmdiubuf_len;
+       deferfcp->fcp_req = fcpreq;
+
+       /* defer processing till a fod becomes available */
+       list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+       /* NOTE: the queue lookup reference is still valid */
+
+       spin_unlock_irqrestore(&queue->qlock, flags);
+
+       return -EOVERFLOW;
 }
 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
 
index af0cc3456dc1b48b1325c06c5edd2ca8cc22a640..b4b7eab2940024024c46ead23d6b1c415fa146f7 100644 (file)
@@ -4259,6 +4259,41 @@ int pci_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_reset_function);
 
+/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device.  This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset.  It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+       int rc;
+
+       rc = pci_probe_reset_function(dev);
+       if (rc)
+               return rc;
+
+       pci_dev_save_and_disable(dev);
+
+       rc = __pci_reset_function_locked(dev);
+
+       pci_dev_restore(dev);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
 /**
  * pci_try_reset_function - quiesce and reset a PCI device function
  * @dev: PCI device to reset
index 20f1b44939944614ff270c757fc7152f901e9f09..04e929fd0ffee494cc744cf495e5acd9e437ea6b 100644 (file)
@@ -1547,6 +1547,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
        },
+       {
+               .ident = "HP Chromebook 11 G5 (Setzer)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+               },
+       },
        {
                .ident = "Acer Chromebook R11 (Cyan)",
                .matches = {
index 4d4ef42a39b5faaa1969d20a5aeeedffef90074c..86c4b3fab7b0ea8f0abfdf36e5a2b035e024ec66 100644 (file)
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
 
 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
 static const unsigned int mrfld_pwm0_pins[] = { 144 };
 static const unsigned int mrfld_pwm1_pins[] = { 145 };
 static const unsigned int mrfld_pwm2_pins[] = { 132 };
index f024e25787fc603c3469ea75de53452743d6af16..0c6d7812d6fd981b95f9d526cb9d6645e4b7855d 100644 (file)
@@ -37,7 +37,7 @@
 #define IRQ_STATUS     0x10
 #define IRQ_WKUP       0x18
 
-#define NB_FUNCS 2
+#define NB_FUNCS 3
 #define GPIO_PER_REG   32
 
 /**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
                .funcs = {_func1, "gpio"}       \
        }
 
+#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
+       {                                       \
+               .name = _name,                  \
+               .start_pin = _start,            \
+               .npins = _nr,                   \
+               .reg_mask = _mask,              \
+               .val = {_v1, _v2, _v3}, \
+               .funcs = {_f1, _f2, "gpio"}     \
+       }
+
 #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
                      _f1, _f2)                         \
        {                                               \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
        PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
        PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
        PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
-       PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+       PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
        PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
        PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
        PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
        PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
-       PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"),
+       PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+                      "mii", "mii_err"),
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_sb = {
-       .nr_pins = 29,
+       .nr_pins = 30,
        .name = "GPIO2",
        .groups = armada_37xx_sb_groups,
        .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
 {
        int f;
 
-       for (f = 0; f < NB_FUNCS; f++)
+       for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
                if (!strcmp(grp->funcs[f], func))
                        return f;
 
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
                for (j = 0; j < grp->extra_npins; j++)
                        grp->pins[i+j] = grp->extra_pin + j;
 
-               for (f = 0; f < NB_FUNCS; f++) {
+               for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
                        int ret;
                        /* check for unique functions and count groups */
                        ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
                        struct armada_37xx_pin_group *gp = &info->groups[g];
                        int f;
 
-                       for (f = 0; f < NB_FUNCS; f++) {
+                       for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
                                if (strcmp(gp->funcs[f], name) == 0) {
                                        *groups = gp->name;
                                        groups++;
index 159580c04b14b138c5ec78b6768db2224d63fb58..47a392bc73c821203abe1c7cb1e966db9a40a3ba 100644 (file)
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
                  SUNXI_FUNCTION_VARIANT(0x3, "emac",   /* ETXD1 */
                                         PINCTRL_SUN7I_A20),
                  SUNXI_FUNCTION(0x4, "keypad"),        /* IN6 */
+                 SUNXI_FUNCTION(0x5, "sim"),           /* DET */
                  SUNXI_FUNCTION_IRQ(0x6, 16),          /* EINT16 */
                  SUNXI_FUNCTION(0x7, "csi1")),         /* D16 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
index a433a306a2d06ae11dd2a8c61830e0c76c4e7862..c75e094b2d90779f92570a534fd1c8d53e6a9e97 100644 (file)
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
 static const int usb1_muxvals[] = {0, 0};
 static const unsigned usb2_pins[] = {184, 185};
 static const int usb2_muxvals[] = {0, 0};
-static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_pins[] = {187, 188};
 static const int usb3_muxvals[] = {0, 0};
 static const unsigned port_range0_pins[] = {
        300, 301, 302, 303, 304, 305, 306, 307,         /* PORT0x */
index 787e3967bd5c5741aeb7a2cb96c18e38901092ed..f828ee340a98238052448d15a5f7604c286926dd 100644 (file)
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        struct zx_pinctrl_soc_info *info = zpctl->info;
        const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
        struct zx_pin_data *data = pindesc->drv_data;
-       struct zx_mux_desc *mux = data->muxes;
-       u32 mask = (1 << data->width) - 1;
-       u32 offset = data->offset;
-       u32 bitpos = data->bitpos;
+       struct zx_mux_desc *mux;
+       u32 mask, offset, bitpos;
        struct function_desc *func;
        unsigned long flags;
        u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        if (!data)
                return -EINVAL;
 
+       mux = data->muxes;
+       mask = (1 << data->width) - 1;
+       offset = data->offset;
+       bitpos = data->bitpos;
+
        func = pinmux_generic_get_function(pctldev, func_selector);
        if (!func)
                return -EINVAL;
index b77435783ef332c30963f84e280cefbedbdb8e3b..7eacc1c4b3b10e1103e6e9c895112eb176245faa 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
 
 #include "ptp_private.h"
 
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
        kfree(ptp);
 }
 
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+       struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+                                            aux_work.work);
+       struct ptp_clock_info *info = ptp->info;
+       long delay;
+
+       delay = info->do_aux_work(info);
+
+       if (delay >= 0)
+               kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
 /* public interface */
 
 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        mutex_init(&ptp->pincfg_mux);
        init_waitqueue_head(&ptp->tsev_wq);
 
+       if (ptp->info->do_aux_work) {
+               char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
+
+               kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+               ptp->kworker = kthread_create_worker(0, worker_name ?
+                                                    worker_name : info->name);
+               kfree(worker_name);
+               if (IS_ERR(ptp->kworker)) {
+                       err = PTR_ERR(ptp->kworker);
+                       pr_err("failed to create ptp aux_worker %d\n", err);
+                       goto kworker_err;
+               }
+       }
+
        err = ptp_populate_pin_groups(ptp);
        if (err)
                goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
 no_device:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
+       if (ptp->kworker)
+               kthread_destroy_worker(ptp->kworker);
+kworker_err:
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        ptp->defunct = 1;
        wake_up_interruptible(&ptp->tsev_wq);
 
+       if (ptp->kworker) {
+               kthread_cancel_delayed_work_sync(&ptp->aux_work);
+               kthread_destroy_worker(ptp->kworker);
+       }
+
        /* Release the clock's resources. */
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
 }
 EXPORT_SYMBOL(ptp_find_pin);
 
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+       return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
index d95888974d0c67f1e4cf4d3c2229ba4e901a2d87..b86f1bfecd6f2329cdd19c16da49c562e9e86fc6 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/cdev.h>
 #include <linux/device.h>
+#include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/posix-clock.h>
 #include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
        struct attribute_group pin_attr_group;
        /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
        const struct attribute_group *pin_attr_groups[2];
+       struct kthread_worker *kworker;
+       struct kthread_delayed_work aux_work;
 };
 
 /*
index 8975cd32139047cf03afe9a93544cc919eaed661..d42e758518ed92e33dfd6c6aaf2a4ee2ec198ff0 100644 (file)
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct rtable *rt = (struct rtable *) dst;
                __be32 *pkey = &ip_hdr(skb)->daddr;
 
-               if (rt->rt_gateway)
+               if (rt && rt->rt_gateway)
                        pkey = &rt->rt_gateway;
 
                /* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct rt6_info *rt = (struct rt6_info *) dst;
                struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
 
-               if (!ipv6_addr_any(&rt->rt6i_gateway))
+               if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
                        pkey = &rt->rt6i_gateway;
 
                /* IPv6 */
index 707ee2f5954d0ac0890c6f05967f7acd24157704..4591113c49de3af951908ed2257f6f5e88663b96 100644 (file)
@@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
                return -EBUSY;
        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
                return -EFAULT;
-       if (qd.cnum == -1)
+       if (qd.cnum == -1) {
+               if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
+                       return -EINVAL;
                qd.cnum = qd.id;
-       else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
-       {
+       } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
                        return -EINVAL;
                qd.instance = dev->scsi_host_ptr->host_no;
index 7dfe709a713837b075cd2021c00ee6a5fb44d978..6844ba36161638d995f3d5d5135c160fc54bc9ed 100644 (file)
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
 };
 
 /**
- * bnx2fc_percpu_thread_create - Create a receive thread for an
- *                              online CPU
+ * bnx2fc_cpu_online - Create a receive thread for an  online CPU
  *
  * @cpu: cpu index for the online cpu
  */
-static void bnx2fc_percpu_thread_create(unsigned int cpu)
+static int bnx2fc_cpu_online(unsigned int cpu)
 {
        struct bnx2fc_percpu_s *p;
        struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
        thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
                                        (void *)p, cpu_to_node(cpu),
                                        "bnx2fc_thread/%d", cpu);
+       if (IS_ERR(thread))
+               return PTR_ERR(thread);
+
        /* bind thread to the cpu */
-       if (likely(!IS_ERR(thread))) {
-               kthread_bind(thread, cpu);
-               p->iothread = thread;
-               wake_up_process(thread);
-       }
+       kthread_bind(thread, cpu);
+       p->iothread = thread;
+       wake_up_process(thread);
+       return 0;
 }
 
-static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+static int bnx2fc_cpu_offline(unsigned int cpu)
 {
        struct bnx2fc_percpu_s *p;
        struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
        thread = p->iothread;
        p->iothread = NULL;
 
-
        /* Free all work in the list */
        list_for_each_entry_safe(work, tmp, &p->work_list, list) {
                list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
 
        if (thread)
                kthread_stop(thread);
-}
-
-
-static int bnx2fc_cpu_online(unsigned int cpu)
-{
-       printk(PFX "CPU %x online: Create Rx thread\n", cpu);
-       bnx2fc_percpu_thread_create(cpu);
-       return 0;
-}
-
-static int bnx2fc_cpu_dead(unsigned int cpu)
-{
-       printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
-       bnx2fc_percpu_thread_destroy(cpu);
        return 0;
 }
 
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
                spin_lock_init(&p->fp_work_lock);
        }
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2fc_percpu_thread_create(cpu);
-
-       rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                      "scsi/bnx2fc:online",
-                                      bnx2fc_cpu_online, NULL);
+       rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
+                              bnx2fc_cpu_online, bnx2fc_cpu_offline);
        if (rc < 0)
-               goto stop_threads;
+               goto stop_thread;
        bnx2fc_online_state = rc;
 
-       cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
-                                 NULL, bnx2fc_cpu_dead);
-       put_online_cpus();
-
        cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
-
        return 0;
 
-stop_threads:
-       for_each_online_cpu(cpu)
-               bnx2fc_percpu_thread_destroy(cpu);
-       put_online_cpus();
+stop_thread:
        kthread_stop(l2_thread);
 free_wq:
        destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
        struct fcoe_percpu_s *bg;
        struct task_struct *l2_thread;
        struct sk_buff *skb;
-       unsigned int cpu = 0;
 
        /*
         * NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
        if (l2_thread)
                kthread_stop(l2_thread);
 
-       get_online_cpus();
-       /* Destroy per cpu threads */
-       for_each_online_cpu(cpu) {
-               bnx2fc_percpu_thread_destroy(cpu);
-       }
-
-       cpuhp_remove_state_nocalls(bnx2fc_online_state);
-       cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
-
-       put_online_cpus();
+       cpuhp_remove_state(bnx2fc_online_state);
 
        destroy_workqueue(bnx2fc_wq);
        /*
index 913c750205ce2a3f1a90f5d255d98d3b39f90145..26de61d65a4d259fa41e7e070648f775031a9662 100644 (file)
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
        return work;
 }
 
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+       unsigned int cpu = wqe % num_possible_cpus();
+       struct bnx2fc_percpu_s *fps;
+       struct bnx2fc_work *work;
+
+       fps = &per_cpu(bnx2fc_percpu, cpu);
+       spin_lock_bh(&fps->fp_work_lock);
+       if (fps->iothread) {
+               work = bnx2fc_alloc_work(tgt, wqe);
+               if (work) {
+                       list_add_tail(&work->list, &fps->work_list);
+                       wake_up_process(fps->iothread);
+                       spin_unlock_bh(&fps->fp_work_lock);
+                       return;
+               }
+       }
+       spin_unlock_bh(&fps->fp_work_lock);
+       bnx2fc_process_cq_compl(tgt, wqe);
+}
+
 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 {
        struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
                        /* Unsolicited event notification */
                        bnx2fc_process_unsol_compl(tgt, wqe);
                } else {
-                       /* Pending work request completion */
-                       struct bnx2fc_work *work = NULL;
-                       struct bnx2fc_percpu_s *fps = NULL;
-                       unsigned int cpu = wqe % num_possible_cpus();
-
-                       fps = &per_cpu(bnx2fc_percpu, cpu);
-                       spin_lock_bh(&fps->fp_work_lock);
-                       if (unlikely(!fps->iothread))
-                               goto unlock;
-
-                       work = bnx2fc_alloc_work(tgt, wqe);
-                       if (work)
-                               list_add_tail(&work->list,
-                                             &fps->work_list);
-unlock:
-                       spin_unlock_bh(&fps->fp_work_lock);
-
-                       /* Pending work request completion */
-                       if (fps->iothread && work)
-                               wake_up_process(fps->iothread);
-                       else
-                               bnx2fc_process_cq_compl(tgt, wqe);
+                       bnx2fc_pending_work(tgt, wqe);
                        num_free_sqes++;
                }
                cqe++;
index 86afc002814cd07bbc3b55b64ca31201208a514b..4ebcda8d9500439941bd630f8b1187875a7beac5 100644 (file)
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
 
 
 /**
- * bnx2i_percpu_thread_create - Create a receive thread for an
- *                             online CPU
+ * bnx2i_cpu_online - Create a receive thread for an online CPU
  *
  * @cpu:       cpu index for the online cpu
  */
-static void bnx2i_percpu_thread_create(unsigned int cpu)
+static int bnx2i_cpu_online(unsigned int cpu)
 {
        struct bnx2i_percpu_s *p;
        struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
        thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
                                        cpu_to_node(cpu),
                                        "bnx2i_thread/%d", cpu);
+       if (IS_ERR(thread))
+               return PTR_ERR(thread);
+
        /* bind thread to the cpu */
-       if (likely(!IS_ERR(thread))) {
-               kthread_bind(thread, cpu);
-               p->iothread = thread;
-               wake_up_process(thread);
-       }
+       kthread_bind(thread, cpu);
+       p->iothread = thread;
+       wake_up_process(thread);
+       return 0;
 }
 
-
-static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+static int bnx2i_cpu_offline(unsigned int cpu)
 {
        struct bnx2i_percpu_s *p;
        struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
        spin_unlock_bh(&p->p_work_lock);
        if (thread)
                kthread_stop(thread);
-}
-
-static int bnx2i_cpu_online(unsigned int cpu)
-{
-       pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
-       bnx2i_percpu_thread_create(cpu);
-       return 0;
-}
-
-static int bnx2i_cpu_dead(unsigned int cpu)
-{
-       pr_info("CPU %x offline: Remove Rx thread\n", cpu);
-       bnx2i_percpu_thread_destroy(cpu);
        return 0;
 }
 
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
                p->iothread = NULL;
        }
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_create(cpu);
-
-       err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                      "scsi/bnx2i:online",
-                                      bnx2i_cpu_online, NULL);
+       err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
+                               bnx2i_cpu_online, bnx2i_cpu_offline);
        if (err < 0)
-               goto remove_threads;
+               goto unreg_driver;
        bnx2i_online_state = err;
-
-       cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
-                                 NULL, bnx2i_cpu_dead);
-       put_online_cpus();
        return 0;
 
-remove_threads:
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_destroy(cpu);
-       put_online_cpus();
+unreg_driver:
        cnic_unregister_driver(CNIC_ULP_ISCSI);
 unreg_xport:
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
 static void __exit bnx2i_mod_exit(void)
 {
        struct bnx2i_hba *hba;
-       unsigned cpu = 0;
 
        mutex_lock(&bnx2i_dev_lock);
        while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
        }
        mutex_unlock(&bnx2i_dev_lock);
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               bnx2i_percpu_thread_destroy(cpu);
-
-       cpuhp_remove_state_nocalls(bnx2i_online_state);
-       cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
-       put_online_cpus();
+       cpuhp_remove_state(bnx2i_online_state);
 
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
        cnic_unregister_driver(CNIC_ULP_ISCSI);
index 4ed48ed38e79316f02ca1e299e56f66eea84ba8e..7ee1a94c0b33eefd57a6889df66649477ad4713b 100644 (file)
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP: Rcv %08x Release %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
                                atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
index 5cc8b0f7d885fb0dfd5a00342f6ca72c72a40129..744f3f395b64852a294a9300adb64a496087aed7 100644 (file)
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf + len, size - len,
-                               "FCP: Rcv %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
+                               atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
index fbeec344c6cc3be0bdd878db6bafda7353dcf901..bbbd0f84160d36563008a212afd8252f86ef15c8 100644 (file)
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 }
 
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+                    struct nvmefc_tgt_fcp_req *rsp)
+{
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct lpfc_nvmet_rcv_ctx *ctxp =
+               container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+       struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+       struct lpfc_hba *phba = ctxp->phba;
+
+       lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+                        ctxp->oxid, ctxp->size, smp_processor_id());
+
+       tgtp = phba->targetport->private;
+       atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+       lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+       .defer_rcv      = lpfc_nvmet_defer_rcv,
 
        .max_hw_queues  = 1,
        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                return;
        }
 
+       /* Processing of FCP command is deferred */
+       if (rc == -EOVERFLOW) {
+               lpfc_nvmeio_data(phba,
+                                "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+                                oxid, size, sid);
+               /* defer reposting rcv buffer till .defer_rcv callback */
+               ctxp->rqb_buffer = nvmebuf;
+               atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               return;
+       }
+
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
index e675ef17be08a0f67dd76f9d33a2ab831f961ce8..48a76788b003cb746afa45376272af362c4446ce 100644 (file)
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
+       atomic_t rcv_fcp_cmd_defer;
        atomic_t xmt_fcp_release;
 
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */
index 4d038926a4558c45b8685ca08c4976233a7ed5e7..351f06dfc5a0dac7bf4f7606166611848876d477 100644 (file)
@@ -528,7 +528,8 @@ struct fip_vlan {
 #define QEDF_WRITE                    (1 << 0)
 #define MAX_FIBRE_LUNS                 0xffffffff
 
-#define QEDF_MAX_NUM_CQS               8
+#define MIN_NUM_CPUS_MSIX(x)   min_t(u32, x->dev_info.num_cqs, \
+                                       num_online_cpus())
 
 /*
  * PCI function probe defines
index 7786c97e033fdcdd9643a41dd1d0bb928036d65b..1d13c9ca517de7e2cec033bdf050498fafbfeccc 100644 (file)
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
         * we allocation is the minimum off:
         *
         * Number of CPUs
-        * Number of MSI-X vectors
-        * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
+        * Number allocated by qed for our PCI function
         */
-       qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
-           num_online_cpus());
+       qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
                   qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                goto err1;
        }
 
+       /* Learn information crucial for qedf to progress */
+       rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+       if (rc) {
+               QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
+               goto err1;
+       }
+
        /* queue allocation code should come here
         * order should be
         *      slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
        }
        qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
 
-       /* Learn information crucial for qedf to progress */
-       rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
-       if (rc) {
-               QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
-               goto err1;
-       }
-
        /* Record BDQ producer doorbell addresses */
        qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
        qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
index b20da0d27ad78494bc9a0172d428dc176cbcb852..3f82ea1b72dc8739283992b4f425d77692b51e7b 100644 (file)
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-       unsigned long flags;
 
        /*
         * Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
         */
        cmd->cmd_in_wq = 0;
 
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
-       cmd->data_work = 1;
-       if (cmd->aborted) {
-               cmd->data_work_free = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
-               tcm_qla2xxx_free_cmd(cmd);
-               return;
-       }
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
        cmd->qpair->tgt_counters.qla_core_ret_ctio++;
        if (!cmd->write_data_transferred) {
                /*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
        qlt_xmit_tm_rsp(mcmd);
 }
 
-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       unsigned long flags;
 
        if (qlt_abort_cmd(cmd))
                return;
-
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
-       if ((cmd->state == QLA_TGT_STATE_NEW)||
-           ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
-               DATA_WORK_NOT_FREE(cmd))) {
-               cmd->data_work_free = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-               /*
-                * cmd has not reached fw, Use this trigger to free it.
-                */
-               tcm_qla2xxx_free_cmd(cmd);
-               return;
-       }
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-       return;
-
 }
 
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
index 4fe606b000b4461c05e441cbb787edcfec51e72b..d7ff71e0c85c6ecd525d0d59d3f3f0da63952b47 100644 (file)
@@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
        return count;
 }
 
-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
-{
-       switch (hp->dxfer_direction) {
-       case SG_DXFER_NONE:
-               if (hp->dxferp || hp->dxfer_len > 0)
-                       return false;
-               return true;
-       case SG_DXFER_FROM_DEV:
-               /*
-                * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
-                * can either be NULL or != NULL so there's no point in checking
-                * it either. So just return true.
-                */
-               return true;
-       case SG_DXFER_TO_DEV:
-       case SG_DXFER_TO_FROM_DEV:
-               if (!hp->dxferp || hp->dxfer_len == 0)
-                       return false;
-               return true;
-       case SG_DXFER_UNKNOWN:
-               if ((!hp->dxferp && hp->dxfer_len) ||
-                   (hp->dxferp && hp->dxfer_len == 0))
-                       return false;
-               return true;
-       default:
-               return false;
-       }
-}
-
 static int
 sg_common_write(Sg_fd * sfp, Sg_request * srp,
                unsigned char *cmnd, int timeout, int blocking)
@@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
                        "sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
                        (int) cmnd[0], (int) hp->cmd_len));
 
-       if (!sg_is_valid_dxfer(hp))
+       if (hp->dxfer_len >= SZ_256M)
                return -EINVAL;
 
        k = sg_start_req(srp, cmnd);
index ca11be21f64b606091d5e8e0e62044ae263dc3ab..34ca7823255d692d05aa753e7c5bc28a385c5583 100644 (file)
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                        continue;
                }
 
+               set_current_state(TASK_RUNNING);
                wp = async->buf_write_ptr;
                n1 = min(n, async->prealloc_bufsz - wp);
                n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                        }
                        continue;
                }
+
+               set_current_state(TASK_RUNNING);
                rp = async->buf_read_ptr;
                n1 = min(n, async->prealloc_bufsz - rp);
                n2 = n - n1;
index a6a8393d66645e75c13ecf8f67ca9c71ef48a6e5..3e00df74b18c883d4e19f0bac7ec087988b58821 100644 (file)
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
                             long m)
 {
        struct ad2s1210_state *st = iio_priv(indio_dev);
-       bool negative;
+       u16 negative;
        int ret = 0;
        u16 pos;
        s16 vel;
index e583dd8a418b537eda69a9606fa8a3fc2c6a6207..d4fa41be80f9a1719574af28c8981ef8e8d287ca 100644 (file)
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
 
        if (!cnp) {
                pr_info("%s stid %d lookup failure\n", __func__, stid);
-               return;
+               goto rel_skb;
        }
 
        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
        cxgbit_put_cnp(cnp);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
 
        if (!cnp) {
                pr_info("%s stid %d lookup failure\n", __func__, stid);
-               return;
+               goto rel_skb;
        }
 
        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
        cxgbit_put_cnp(cnp);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
        struct tid_info *t = lldi->tids;
 
        csk = lookup_tid(t, tid);
-       if (unlikely(!csk))
+       if (unlikely(!csk)) {
                pr_err("can't find connection for tid %u.\n", tid);
-       else
+               goto rel_skb;
+       } else {
                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+       }
 
        cxgbit_put_csk(csk);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
index dda13f1af38e581f746e4528590510c07efb916f..514986b57c2d60ce19c1074f4d19d65dd550be2e 100644 (file)
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 
 static void
 cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
-                     unsigned int nents)
+                     unsigned int nents, u32 skip)
 {
        struct skb_seq_state st;
        const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
                }
 
                consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
-                                                buf_len, consumed);
+                                                buf_len, skip + consumed);
        }
 }
 
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
                struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
                u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
 
-               cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+               cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
        }
 
        cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
                  cmd->se_cmd.data_length);
 
        if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+               u32 skip = data_offset % PAGE_SIZE;
+
                sg_off = data_offset / PAGE_SIZE;
                sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-               sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+               sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
 
-               cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+               cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
        }
 
 check_payload:
index 74e4975dd1b1e74d6c39517be102ccdc92614e80..5001261f5d69d759dd25161401510551d35c3da7 100644 (file)
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
                return 0;
        }
        np->np_thread_state = ISCSI_NP_THREAD_RESET;
+       atomic_inc(&np->np_reset_count);
 
        if (np->np_thread) {
                spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
        cmd->data_direction     = DMA_NONE;
+       kfree(cmd->text_in_ptr);
        cmd->text_in_ptr        = NULL;
 
        return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                return text_length;
 
        if (completed) {
-               hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+               hdr->flags = ISCSI_FLAG_CMD_FINAL;
        } else {
-               hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+               hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
                cmd->read_data_done += text_length;
                if (cmd->targ_xfer_tag == 0xFFFFFFFF)
                        cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
index e9bdc8b86e7d1d71d77cf4388370af6fcf3fded7..dc13afbd4c88dec2390ca8e65b3332d2f78acd73 100644 (file)
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        flush_signals(current);
 
        spin_lock_bh(&np->np_thread_lock);
-       if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+       if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+               spin_unlock_bh(&np->np_thread_lock);
                complete(&np->np_restart_comp);
+               return 1;
        } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
                spin_unlock_bh(&np->np_thread_lock);
                goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                goto exit;
        } else if (rc < 0) {
                spin_lock_bh(&np->np_thread_lock);
-               if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+               if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+                       np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                        spin_unlock_bh(&np->np_thread_lock);
                        complete(&np->np_restart_comp);
                        iscsit_put_transport(conn->conn_transport);
index 36913734c6bc58ed326ac793dc19d3d265ec878d..02e8a5d8665837f415ac7b2d78e51067d1ef5b62 100644 (file)
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
        mutex_lock(&tpg->acl_node_mutex);
        if (acl->dynamic_node_acl)
                acl->dynamic_node_acl = 0;
-       list_del(&acl->acl_list);
+       list_del_init(&acl->acl_list);
        mutex_unlock(&tpg->acl_node_mutex);
 
        target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
         * in transport_deregister_session().
         */
        list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
-               list_del(&nacl->acl_list);
+               list_del_init(&nacl->acl_list);
 
                core_tpg_wait_for_nacl_pr_ref(nacl);
                core_free_device_list_for_node(nacl, se_tpg);
index 97fed9a298bdc29a19d184ba49df458e04e4f9f1..836d552b0385e978bc1a0b98c59a3379c262fd61 100644 (file)
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
        }
 
        mutex_lock(&se_tpg->acl_node_mutex);
-       list_del(&nacl->acl_list);
+       list_del_init(&nacl->acl_list);
        mutex_unlock(&se_tpg->acl_node_mutex);
 
        core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
                        spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 
                        if (se_nacl->dynamic_stop)
-                               list_del(&se_nacl->acl_list);
+                               list_del_init(&se_nacl->acl_list);
                }
                mutex_unlock(&se_tpg->acl_node_mutex);
 
index 80ee130f8253ec44324ac2ba616cfd93beb1ba76..942d094269fba5db66ff7e791dcfaab1c6acec15 100644 (file)
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
                                        block_remaining);
                        to_offset = get_block_offset_user(udev, dbi,
                                        block_remaining);
-                       offset = DATA_BLOCK_SIZE - block_remaining;
-                       to += offset;
 
                        if (*iov_cnt != 0 &&
                            to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
                                (*iov)->iov_len = copy_bytes;
                        }
                        if (copy_data) {
-                               memcpy(to, from + sg->length - sg_remaining,
-                                       copy_bytes);
+                               offset = DATA_BLOCK_SIZE - block_remaining;
+                               memcpy(to + offset,
+                                      from + sg->length - sg_remaining,
+                                      copy_bytes);
                                tcmu_flush_dcache_range(to, copy_bytes);
                        }
                        sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
                        offset = DATA_BLOCK_SIZE - block_remaining;
-                       from += offset;
                        tcmu_flush_dcache_range(from, copy_bytes);
-                       memcpy(to + sg->length - sg_remaining, from,
+                       memcpy(to + sg->length - sg_remaining, from + offset,
                                        copy_bytes);
 
                        sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
        if (udev->dev_config[0])
                snprintf(str + used, size - used, "/%s", udev->dev_config);
 
+       /* If the old string exists, free it */
+       kfree(info->name);
        info->name = str;
 
        return 0;
index 308b6e17c88aace0775b4ed0a5460097559d66ca..fe2f00ceafc5d7e1a3bbafea4224c838dc2158a4 100644 (file)
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
        int res;
        enum tb_port_type type;
 
+       /*
+        * Some DROMs list more ports than the controller actually has
+        * so we skip those but allow the parser to continue.
+        */
+       if (header->index > sw->config.max_port_number) {
+               dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+               return 0;
+       }
+
        port = &sw->ports[header->index];
        port->disabled = header->port_disabled;
        if (port->disabled)
index b5def356af63b70e3ebc2e23a48da0753e41b47e..1aab3010fbfae76e2c25cb60085f21051a1f24cf 100644 (file)
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->dl_write)
                        uart->dl_write = up->dl_write;
 
-               if (serial8250_isa_config != NULL)
-                       serial8250_isa_config(0, &uart->port,
-                                       &uart->capabilities);
+               if (uart->port.type != PORT_8250_CIR) {
+                       if (serial8250_isa_config != NULL)
+                               serial8250_isa_config(0, &uart->port,
+                                               &uart->capabilities);
+
+                       ret = uart_add_one_port(&serial8250_reg,
+                                               &uart->port);
+                       if (ret == 0)
+                               ret = uart->port.line;
+               } else {
+                       dev_info(uart->port.dev,
+                               "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+                               uart->port.iobase,
+                               (unsigned long long)uart->port.mapbase,
+                               uart->port.irq);
 
-               ret = uart_add_one_port(&serial8250_reg, &uart->port);
-               if (ret == 0)
-                       ret = uart->port.line;
+                       ret = 0;
+               }
        }
        mutex_unlock(&serial_mutex);
 
index 8a857bb34fbb26c6d60784d3fe7576730a9aa5b3..1888d168a41c87c605962da2605df8ab1c02bd20 100644 (file)
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
        .fixed_options          = true,
 };
 
-/*
- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
- * occasionally getting stuck as 1. To avoid the potential for a hang, check
- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
- * implementations, so only do so if an affected platform is detected in
- * parse_spcr().
- */
-static bool qdf2400_e44_present = false;
-
+#ifdef CONFIG_ACPI_SPCR_TABLE
 static struct vendor_data vendor_qdt_qdf2400_e44 = {
        .reg_offset             = pl011_std_offsets,
        .fr_busy                = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
        .always_enabled         = true,
        .fixed_options          = true,
 };
+#endif
 
 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
        [REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
        resource_size_t addr;
        int i;
 
-       if (strcmp(name, "qdf2400_e44") == 0) {
-               pr_info_once("UART: Working around QDF2400 SoC erratum 44");
-               qdf2400_e44_present = true;
-       } else if (strcmp(name, "pl011") != 0) {
+       /*
+        * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
+        * have a distinct console name, so make sure we check for that.
+        * The actual implementation of the erratum occurs in the probe
+        * function.
+        */
+       if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
                return -ENODEV;
-       }
 
        if (uart_parse_earlycon(options, &iotype, &addr, &options))
                return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
        }
        uap->port.irq   = ret;
 
-       uap->reg_offset = vendor_sbsa.reg_offset;
-       uap->vendor     = qdf2400_e44_present ?
-                                       &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
+#ifdef CONFIG_ACPI_SPCR_TABLE
+       if (qdf2400_e44_present) {
+               dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
+               uap->vendor = &vendor_qdt_qdf2400_e44;
+       } else
+#endif
+               uap->vendor = &vendor_sbsa;
+
+       uap->reg_offset = uap->vendor->reg_offset;
        uap->fifosize   = 32;
-       uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
+       uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
        uap->port.ops   = &sbsa_uart_pops;
        uap->fixed_baud = baudrate;
 
index ab1bb3b538ac6175dd1fbe5babb686a6a8f4bc49..7f277b092b5bf070c21d9c321d15b2e0d630792e 100644 (file)
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
        /* No more submits can occur */
        spin_lock_irq(&hcd_urb_list_lock);
 rescan:
-       list_for_each_entry (urb, &ep->urb_list, urb_list) {
+       list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
                int     is_in;
 
                if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
        }
        if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
                hcd = hcd->shared_hcd;
+               clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+               set_bit(HCD_FLAG_DEAD, &hcd->flags);
                if (hcd->rh_registered) {
                        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
 
index 6e6797d145dd80136c413129641bdbbae2f1e63b..822f8c50e4233c70d159a4e374ad66b49502c0c1 100644 (file)
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
 static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                u16 portchange)
 {
-       int status, i;
+       int status = -ENODEV;
+       int i;
        unsigned unit_load;
        struct usb_device *hdev = hub->hdev;
        struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
 
 done:
        hub_port_disable(hub, port1, 1);
-       if (hcd->driver->relinquish_port && !hub->hdev->parent)
-               hcd->driver->relinquish_port(hcd, port1);
-
+       if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+               if (status != -ENOTCONN && status != -ENODEV)
+                       hcd->driver->relinquish_port(hcd, port1);
+       }
 }
 
 /* Handle physical or logical connection change events.
index 3116edfcdc18558aa768d248f1ff1881448ced09..574da2b4529cc26cc0a97ee7146e42889ca55bbe 100644 (file)
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+       { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Avision AV600U */
        { USB_DEVICE(0x0638, 0x0a13), .driver_info =
          USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
        { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
        { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
        { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+       { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
 
        /* Logitech Optical Mouse M90/M100 */
        { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
index 6b299c7b765611e0cb6c0d30e5de50f8ef279cb7..f064f1549333dcd7dab10fd491e43cf31bac887e 100644 (file)
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
                if (!node) {
                        trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
 
+                       /*
+                        * USB Specification 2.0 Section 5.9.2 states that: "If
+                        * there is only a single transaction in the microframe,
+                        * only a DATA0 data packet PID is used.  If there are
+                        * two transactions per microframe, DATA1 is used for
+                        * the first transaction data packet and DATA0 is used
+                        * for the second transaction data packet.  If there are
+                        * three transactions per microframe, DATA2 is used for
+                        * the first transaction data packet, DATA1 is used for
+                        * the second, and DATA0 is used for the third."
+                        *
+                        * IOW, we should satisfy the following cases:
+                        *
+                        * 1) length <= maxpacket
+                        *      - DATA0
+                        *
+                        * 2) maxpacket < length <= (2 * maxpacket)
+                        *      - DATA1, DATA0
+                        *
+                        * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+                        *      - DATA2, DATA1, DATA0
+                        */
                        if (speed == USB_SPEED_HIGH) {
                                struct usb_ep *ep = &dep->endpoint;
-                               trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+                               unsigned int mult = ep->mult - 1;
+                               unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+                               if (length <= (2 * maxp))
+                                       mult--;
+
+                               if (length <= maxp)
+                                       mult--;
+
+                               trb->size |= DWC3_TRB_SIZE_PCM1(mult);
                        }
                } else {
                        trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
index 62dc9c7798e78a034a9e63f8c3163fcebec9e57a..e1de8fe599a35695eceda22d3138492af8995b20 100644 (file)
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
        return usb3_req;
 }
 
-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
-                             struct renesas_usb3_request *usb3_req, int status)
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+                               struct renesas_usb3_request *usb3_req,
+                               int status)
 {
        struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
-       unsigned long flags;
 
        dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
                usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
                status);
        usb3_req->req.status = status;
-       spin_lock_irqsave(&usb3->lock, flags);
        usb3_ep->started = false;
        list_del_init(&usb3_req->queue);
-       spin_unlock_irqrestore(&usb3->lock, flags);
+       spin_unlock(&usb3->lock);
        usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+       spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+                             struct renesas_usb3_request *usb3_req, int status)
+{
+       struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+       unsigned long flags;
+
+       spin_lock_irqsave(&usb3->lock, flags);
+       __usb3_request_done(usb3_ep, usb3_req, status);
+       spin_unlock_irqrestore(&usb3->lock, flags);
 }
 
 static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
index c8989c62a2621b88cf8b9d0c3001a37a31d5e151..c8f38649f749311a81ff63aa194c1b97cb71718f 100644 (file)
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
        AMD_CHIPSET_HUDSON2,
        AMD_CHIPSET_BOLTON,
        AMD_CHIPSET_YANGTZE,
+       AMD_CHIPSET_TAISHAN,
        AMD_CHIPSET_UNKNOWN,
 };
 
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
                        pinfo->sb_type.gen = AMD_CHIPSET_SB700;
                else if (rev >= 0x40 && rev <= 0x4f)
                        pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+       }
+       pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+                                         0x145c, NULL);
+       if (pinfo->smbus_dev) {
+               pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
        } else {
                pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
                                PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 {
        /* Make sure amd chipset type has already been initialized */
        usb_amd_find_chipset_info();
-       if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
-               return 0;
-
-       dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
-       return 1;
+       if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+           amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+               dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+               return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
                        PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+       /*
+        * Our dear uPD72020{1,2} friend only partially resets when
+        * asked to via the XHCI interface, and may end up doing DMA
+        * at the wrong addresses, as it keeps the top 32bit of some
+        * addresses from its previous programming under obscure
+        * circumstances.
+        * Give it a good wack at probe time. Unfortunately, this
+        * needs to happen before we've had a chance to discover any
+        * quirk, or the system will be in a rather bad state.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+           (pdev->device == 0x0014 || pdev->device == 0x0015))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
index 6559944801987728a1db6ba31f09db51b92362e3..5582cbafecd4c1a3ddc5443d6cc9182a9b9bc89f 100644 (file)
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
 #else
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
index 5b0fa553c8bc940e88a6db731cf6dfeb0c9fb971..8071c8fdd15e741b008af64075cda3c87072bfb4 100644 (file)
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        driver = (struct hc_driver *)id->driver_data;
 
+       /* For some HW implementation, a XHCI reset is just not enough... */
+       if (usb_xhci_needs_pci_reset(dev)) {
+               dev_info(&dev->dev, "Resetting\n");
+               if (pci_reset_function_locked(dev))
+                       dev_warn(&dev->dev, "Reset failed");
+       }
+
        /* Prevent runtime suspending between USB-2 and USB-3 initialization */
        pm_runtime_get_noresume(&dev->dev);
 
index 76decb8011ebc2d37d3e7e64a394bd2bb6e5c8ba..3344ffd5bb13743812bab737f52b8e81eb77b9db 100644 (file)
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
                                "Could not flush host TX%d fifo: csr: %04x\n",
                                ep->epnum, csr))
                        return;
+               mdelay(1);
        }
 }
 
index 8fb86a5f458e01275186dc83ff86e38b38fff35b..3d0dd2f9741571423522ee80c972d8db2ffec853 100644 (file)
@@ -197,6 +197,7 @@ struct msm_otg {
        struct regulator *v3p3;
        struct regulator *v1p8;
        struct regulator *vddcx;
+       struct regulator_bulk_data supplies[3];
 
        struct reset_control *phy_rst;
        struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
 
 static int msm_otg_probe(struct platform_device *pdev)
 {
-       struct regulator_bulk_data regs[3];
        int ret = 0;
        struct device_node *np = pdev->dev.of_node;
        struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
                return motg->irq;
        }
 
-       regs[0].supply = "vddcx";
-       regs[1].supply = "v3p3";
-       regs[2].supply = "v1p8";
+       motg->supplies[0].supply = "vddcx";
+       motg->supplies[1].supply = "v3p3";
+       motg->supplies[2].supply = "v1p8";
 
-       ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+       ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
+                                     motg->supplies);
        if (ret)
                return ret;
 
-       motg->vddcx = regs[0].consumer;
-       motg->v3p3  = regs[1].consumer;
-       motg->v1p8  = regs[2].consumer;
+       motg->vddcx = motg->supplies[0].consumer;
+       motg->v3p3  = motg->supplies[1].consumer;
+       motg->v1p8  = motg->supplies[2].consumer;
 
        clk_set_rate(motg->clk, 60000000);
 
index 93fba9033b00a7136b85a9995d7da32a140c61e9..2c8161bcf5b5e22ee8ed2eb413d29fdfde504e95 100644 (file)
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
        struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
        struct usbhs_pipe *pipe;
        unsigned long flags;
-       int ret = 0;
 
        spin_lock_irqsave(&uep->lock, flags);
        pipe = usbhsg_uep_to_pipe(uep);
-       if (!pipe) {
-               ret = -EINVAL;
+       if (!pipe)
                goto out;
-       }
 
        usbhsg_pipe_disable(uep);
        usbhs_pipe_free(pipe);
index d544b331c9f2ce80d83095f30184e2102eda6ea6..02b67abfc2a16139a3230d1120a17a4a9ab1cc09 100644 (file)
 /* Low Power Status register (LPSTS) */
 #define LPSTS_SUSPM    0x4000
 
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
 #define UGCTRL2_RESERVED_3     0x00000001      /* bit[3:0] should be B'0001 */
 #define UGCTRL2_USB0SEL_OTG    0x00000030
+#define UGCTRL2_VBUSSEL                0x00000400
 
 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
 {
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
 {
        struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
 
-       usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+       usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+                     UGCTRL2_VBUSSEL);
 
        if (enable) {
                usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
index f64e914a8985495bf6ef51bb3de4772c40ffbf49..2d945c9f975c04d5cd7909e00a37017a97e8062e 100644 (file)
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+       { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index ebe51f11105d8889d6144a72bbf719f14e4b1103..fe123153b1a5439f016f2637d4f9d19f0fde3f14 100644 (file)
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },                   /* D-Link DWM-158 */
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),                     /* D-Link DWM-221 B1 */
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
index c9ebefd8f35fdbe5491627f4df89b80c87aeabad..a585b477415dde58b38c9e2defe0d26c24552757 100644 (file)
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
                .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
+               .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
index 09d9be88209e1ce6b1f53dc052a53c5e4c491336..3b5a15d1dc0dd50a0ebf9d925e33ef4cf8db0090 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_UC485     0x2021
 #define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
index cbea9f329e715aad97b23928d79579cdcf5e8c92..cde115359793001dcf18b8884c3c9e22eec0fa73 100644 (file)
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
                "Initio Corporation",
-               "",
+               "INIC-3069",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_NO_ATA_1X),
+               US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
 
 /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
index 06615934fed1cc537694b03d023b613c95aea9f1..0dceb9fa3a0629af3bd1a1990508d2c32bb42db2 100644 (file)
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
 {
        struct us_data *us = (struct us_data *)__us;
        struct Scsi_Host *host = us_to_host(us);
+       struct scsi_cmnd *srb;
 
        for (;;) {
                usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
                scsi_lock(host);
 
                /* When we are called with no command pending, we're done */
+               srb = us->srb;
                if (us->srb == NULL) {
                        scsi_unlock(host);
                        mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
                /* lock access to the state */
                scsi_lock(host);
 
-               /* indicate that the command is done */
-               if (us->srb->result != DID_ABORT << 16) {
-                       usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
-                                    us->srb->result);
-                       us->srb->scsi_done(us->srb);
-               } else {
+               /* was the command aborted? */
+               if (us->srb->result == DID_ABORT << 16) {
 SkipForAbort:
                        usb_stor_dbg(us, "scsi command aborted\n");
+                       srb = NULL;     /* Don't call srb->scsi_done() */
                }
 
                /*
@@ -429,6 +428,13 @@ SkipForAbort:
 
                /* unlock the device pointers */
                mutex_unlock(&us->dev_mutex);
+
+               /* now that the locks are released, notify the SCSI core */
+               if (srb) {
+                       usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+                                       srb->result);
+                       srb->scsi_done(srb);
+               }
        } /* for (;;) */
 
        /* Wait until we are told to stop */
index ff01bed7112f1566ca13b3e17330851bf02fec05..1e784adb89b17534ce31751b546ef20801b2427f 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/efi.h>
 
 static bool request_mem_succeeded = false;
+static bool nowc = false;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+                       else if (!strcmp(this_opt, "nowc"))
+                               nowc = true;
                }
        }
 
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (nowc)
+               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+       else
+               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
                pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
index c166e0725be5dab13e9a685a93ea7ea9c23a3351..ba82f97fb42b2d10fdbebd227fcb7e5eb19dcbdc 100644 (file)
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
        imxfb_disable_controller(fbi);
 
        unregister_framebuffer(info);
-
+       fb_dealloc_cmap(&info->cmap);
        pdata = dev_get_platdata(&pdev->dev);
        if (pdata && pdata->exit)
                pdata->exit(fbi->pdev);
-
-       fb_dealloc_cmap(&info->cmap);
-       kfree(info->pseudo_palette);
-       framebuffer_release(info);
-
        dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
                    fbi->map_dma);
-
        iounmap(fbi->regs);
        release_mem_region(res->start, resource_size(res));
+       kfree(info->pseudo_palette);
+       framebuffer_release(info);
 
        return 0;
 }
index eecf695c16f41b6996520e47b8fa7e1b71efbabc..09e5bb013d28071c69b63b1968ef6eca44252f1e 100644 (file)
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
 
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
-       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
 
        core.pdev = pdev;
index bae1f5d36c26e8eac1a7ce9473a3ca7999b90642..2d43118077e4eecc12f68bee605fb068def11c95 100644 (file)
@@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
 
 static void enable_pirq(struct irq_data *data)
 {
-       startup_pirq(data);
+       enable_dynirq(data);
 }
 
 static void disable_pirq(struct irq_data *data)
index e460802149555b6f0d5def7659d8e8207828eb53..3e59590c7254ddc8f1a08f4232262a74a29e3711 100644 (file)
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
        struct list_head *ent;
        struct xs_watch_event *event;
 
+       xenwatch_pid = current->pid;
+
        for (;;) {
                wait_event_interruptible(watch_events_waitq,
                                         !list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
        task = kthread_run(xenwatch_thread, NULL, "xenwatch");
        if (IS_ERR(task))
                return PTR_ERR(task);
-       xenwatch_pid = task->pid;
 
        /* shutdown watches for kexec boot */
        xs_reset_watches();
index 3ee4fdc3da9ec359ad847afa36354240329a2da6..ab60051be6e533eb167a72e590494f0a46e3a488 100644 (file)
@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
 {
        struct fuse_file *ff;
 
-       ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+       ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
        if (unlikely(!ff))
                return NULL;
 
@@ -609,7 +609,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
        struct fuse_io_priv *io = req->io;
        ssize_t pos = -1;
 
-       fuse_release_user_pages(req, !io->write);
+       fuse_release_user_pages(req, io->should_dirty);
 
        if (io->write) {
                if (req->misc.write.in.size != req->misc.write.out.size)
@@ -1316,7 +1316,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
-       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1346,6 +1345,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        inode_unlock(inode);
        }
 
+       io->should_dirty = !write && iter_is_iovec(iter);
        while (count) {
                size_t nres;
                fl_owner_t owner = current->files;
@@ -1360,7 +1360,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, should_dirty);
+                       fuse_release_user_pages(req, io->should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
@@ -1669,6 +1669,7 @@ err_nofile:
 err_free:
        fuse_request_free(req);
 err:
+       mapping_set_error(page->mapping, error);
        end_page_writeback(page);
        return error;
 }
index 1bd7ffdad593977013c1ddd233b2a91093471471..bd4d2a3e1ec1b8cc0af29bcb8c26f13d82708804 100644 (file)
@@ -249,6 +249,7 @@ struct fuse_io_priv {
        size_t size;
        __u64 offset;
        bool write;
+       bool should_dirty;
        int err;
        struct kiocb *iocb;
        struct file *file;
index 69d02cf8cf370678609175d6e5626d0dedf02c3a..5f93cfacb3d14b9befc6ba33a91d79ccde044d89 100644 (file)
@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
 config PNFS_BLOCK
        tristate
        depends on NFS_V4_1 && BLK_DEV_DM
+       depends on 64BIT || LBDAF
        default NFS_V4
 
 config PNFS_FLEXFILE_LAYOUT
index 6df7a0cf566015378aa3f76c480115675454297d..f32c58bbe55671cb75abdcb9934152d110e3537d 100644 (file)
@@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
        nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
        nfs4_pnfs_ds_put(mirror_ds->ds);
+       kfree(mirror_ds->ds_versions);
        kfree_rcu(mirror_ds, id_node.rcu);
 }
 
index ffd2e712595d8ac875dc2780b52731543c687ca8..d901326423401c3e7d442f62bfa80d03d281ed02 100644 (file)
@@ -2553,9 +2553,8 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
                clear_bit(NFS_O_RDWR_STATE, &state->flags);
                clear_bit(NFS_OPEN_STATE, &state->flags);
                stateid->type = NFS4_INVALID_STATEID_TYPE;
-       }
-       if (status != NFS_OK)
                return status;
+       }
        if (nfs_open_stateid_recover_openmode(state))
                return -NFS4ERR_OPENMODE;
        return NFS_OK;
index 8a428498d6b21f08c8c26ef184ff9f4332b5cdd0..509a61668d902b84f6756e2ed1bcb22a6d7020a5 100644 (file)
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_FILE_MAPPED));
        show_val_kb(m, "Shmem:          ", i.sharedram);
        show_val_kb(m, "Slab:           ",
-                   global_page_state(NR_SLAB_RECLAIMABLE) +
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 
        show_val_kb(m, "SReclaimable:   ",
-                   global_page_state(NR_SLAB_RECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE));
        show_val_kb(m, "SUnreclaim:     ",
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
index b836fd61ed878a38d25d5ffe44bb86e30066955c..fe8f3265e8779ac18a5694ef600c024f9e88f281 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
+#include <linux/uaccess.h>
 
 #include <asm/elf.h>
-#include <linux/uaccess.h>
+#include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        enum clear_refs_types type;
+       struct mmu_gather tlb;
        int itype;
        int rv;
 
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                }
 
                down_read(&mm->mmap_sem);
+               tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(mm, 0, -1);
-               flush_tlb_mm(mm);
+               tlb_finish_mmu(&tlb, 0, -1);
                up_read(&mm->mmap_sem);
 out_mm:
                mmput(mm);
index 06ea26b8c996f3cc7a9d6fd177260f89394fb325..b0d5897bc4e6d0e019c79f65b6d41df1d3b0d050 100644 (file)
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                                   uffdio_copy.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
                return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                                     uffdio_zeropage.range.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
                return -EFAULT;
index ceef77c0416ad5833c2b513006496c8d57a4c62f..ff48f00968100df0de830892f0c9aed7bca6d74d 100644 (file)
@@ -874,7 +874,6 @@ xfs_ialloc(
        case S_IFREG:
        case S_IFDIR:
                if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
-                       uint64_t        di_flags2 = 0;
                        uint            di_flags = 0;
 
                        if (S_ISDIR(mode)) {
@@ -911,20 +910,23 @@ xfs_ialloc(
                                di_flags |= XFS_DIFLAG_NODEFRAG;
                        if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
                                di_flags |= XFS_DIFLAG_FILESTREAM;
-                       if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
-                               di_flags2 |= XFS_DIFLAG2_DAX;
 
                        ip->i_d.di_flags |= di_flags;
-                       ip->i_d.di_flags2 |= di_flags2;
                }
                if (pip &&
                    (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
                    pip->i_d.di_version == 3 &&
                    ip->i_d.di_version == 3) {
+                       uint64_t        di_flags2 = 0;
+
                        if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
-                               ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+                               di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
                                ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
                        }
+                       if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
+                               di_flags2 |= XFS_DIFLAG2_DAX;
+
+                       ip->i_d.di_flags2 |= di_flags2;
                }
                /* FALLTHROUGH */
        case S_IFLNK:
index fbe72b134bef219e80420063bfe2db2abc594e49..43aa42a3a5d319fffff98417bff98055083f1a75 100644 (file)
@@ -539,6 +539,7 @@ xlog_discard_endio(
 
        INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
        queue_work(xfs_discard_wq, &ctx->discard_endio_work);
+       bio_put(bio);
 }
 
 static void
index 8afa4335e5b2bfd0c42c00e1b1506d4e1f7377ac..faddde44de8c902e6884e64eeb8b22bd0d11b75a 100644 (file)
@@ -112,10 +112,11 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+       struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
-                                                       unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                        unsigned long start, unsigned long end, bool force);
 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
                                   int page_size);
 
index c749eef1daa1557910ec81c5f296dc907f5ccab2..27b4b66152637fe38b82711f0fa66d6ce946f7d7 100644 (file)
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
 #endif
 
 #ifdef CONFIG_ACPI_SPCR_TABLE
+extern bool qdf2400_e44_present;
 int parse_spcr(bool earlycon);
 #else
 static inline int parse_spcr(bool earlycon) { return 0; }
index b56573bf440db4b85f8f11f678edb9e28d4e6cd8..82b30e638430fa6e9b3b410feb84bc79a4bf2b13 100644 (file)
@@ -39,8 +39,6 @@ enum cpuhp_state {
        CPUHP_PCI_XGENE_DEAD,
        CPUHP_IOMMU_INTEL_DEAD,
        CPUHP_LUSTRE_CFS_DEAD,
-       CPUHP_SCSI_BNX2FC_DEAD,
-       CPUHP_SCSI_BNX2I_DEAD,
        CPUHP_WORKQUEUE_PREP,
        CPUHP_POWER_NUMA_PREPARE,
        CPUHP_HRTIMERS_PREPARE,
index 723cd54b94da84f95cd18934d14198b6d21040dd..beabdbc0842059b36a2c6b22b882e04ddc968f75 100644 (file)
@@ -843,7 +843,7 @@ struct dev_links_info {
  *             hibernation, system resume and during runtime PM transitions
  *             along with subsystem-level and driver-level callbacks.
  * @pins:      For device pin management.
- *             See Documentation/pinctrl.txt for details.
+ *             See Documentation/driver-api/pinctl.rst for details.
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
index 00ca5b86a753f8023cad87ce02cbe90748255383..d501d3956f13f041864dc25f0d7e8724ea2b5210 100644 (file)
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
 #define I2C_CLASS_DDC          (1<<3)  /* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD          (1<<7)  /* Memory modules */
-#define I2C_CLASS_DEPRECATED   (1<<8)  /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED   (1<<8)
 
 /* Internal numbers to terminate lists */
 #define I2C_CLIENT_END         0xfffeU
index 497f2b3a5a62c8da6f87107de16519b176cc9f1f..97f1b465d04ff0b1ab33b0c1074a722ca41c1b0a 100644 (file)
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
        struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
 };
 
+struct st_sensor_sim {
+       u8 addr;
+       u8 value;
+};
+
 /**
  * struct st_sensor_bdu - ST sensor device block data update
  * @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
  * @bdu: Block data update register.
  * @das: Data Alignment Selection register.
  * @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
  * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
  * @bootime: samples to discard when sensor passing from power-down to power-up.
  */
@@ -213,6 +219,7 @@ struct st_sensor_settings {
        struct st_sensor_bdu bdu;
        struct st_sensor_das das;
        struct st_sensor_data_ready_irq drdy_irq;
+       struct st_sensor_sim sim;
        bool multi_read_bit;
        unsigned int bootime;
 };
index aad5d81dfb444aeb0dcb4b92aef475023aa897c1..b54517c05e9ab20fff33e3526fe0ea8de1e702bb 100644 (file)
@@ -620,6 +620,7 @@ struct mlx4_caps {
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
        u32                     vf_caps;
+       bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
 };
 
index 6f41270d80c03128bdeb60e5c6fc1b6ca2b5fe54..f378dc0e7eaf4db75eab8606f03df4e9269602e4 100644 (file)
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
 
 enum {
        MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
index 7f384bb62d8ec6bc7eafa25828b0716be63c7ccb..3cadee0a350889f748e7b1a999b449ae003e9c3f 100644 (file)
@@ -487,14 +487,12 @@ struct mm_struct {
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
        /*
         * An operation with batched TLB flushing is going on. Anything that
         * can move process memory needs to flush the TLB when moving a
         * PROT_NONE or PROT_NUMA mapped page.
         */
-       bool tlb_flush_pending;
-#endif
+       atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        /* See flush_tlb_batched_pending() */
        bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+                               unsigned long start, unsigned long end);
+
 /*
  * Memory barriers to keep this state in sync are graciously provided by
  * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
  */
 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 {
-       barrier();
-       return mm->tlb_flush_pending;
+       return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+       return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+       atomic_set(&mm->tlb_flush_pending, 0);
 }
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 {
-       mm->tlb_flush_pending = true;
+       atomic_inc(&mm->tlb_flush_pending);
 
        /*
-        * Guarantee that the tlb_flush_pending store does not leak into the
+        * Guarantee that the tlb_flush_pending increase does not leak into the
         * critical section updating the page tables
         */
        smp_mb__before_spinlock();
 }
+
 /* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
-       barrier();
-       mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
-       return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 {
+       /*
+        * Guarantee that the tlb_flush_pending does not not leak into the
+        * critical section, since we must order the PTE change and changes to
+        * the pending TLB flush indication. We could have relied on TLB flush
+        * as a memory barrier, but this behavior is not clearly documented.
+        */
+       smp_mb__before_atomic();
+       atomic_dec(&mm->tlb_flush_pending);
 }
-#endif
 
 struct vm_fault;
 
index 892148c448cce2c9c9e9e59b2d44941283635cbd..5216d2eb22891010187b86a5911a10b17aedf273 100644 (file)
@@ -681,10 +681,10 @@ struct nand_buffers {
  * @tWW_min: WP# transition to WE# low
  */
 struct nand_sdr_timings {
-       u32 tBERS_max;
+       u64 tBERS_max;
        u32 tCCS_min;
-       u32 tPROG_max;
-       u32 tR_max;
+       u64 tPROG_max;
+       u64 tR_max;
        u32 tALH_min;
        u32 tADL_min;
        u32 tALS_min;
index 6c8c5d8041b72ec01097d1c0563b793ea7449f1f..2591878c1d4804d374d39491c2a3f64d1b43a214 100644 (file)
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
  *       indicating an FC transport Aborted status.
  *       Entrypoint is Mandatory.
  *
+ * @defer_rcv:  Called by the transport to signal the LLLD that it has
+ *       begun processing of a previously received NVME CMD IU. The LLDD
+ *       is now free to re-use the rcv buffer associated with the
+ *       nvmefc_tgt_fcp_req.
+ *
  * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
  *       supports for cpu affinitization.
  *       Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
                                struct nvmefc_tgt_fcp_req *fcpreq);
        void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
                                struct nvmefc_tgt_fcp_req *fcpreq);
+       void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+                               struct nvmefc_tgt_fcp_req *fcpreq);
 
        u32     max_hw_queues;
        u16     max_sgl_segments;
index 4869e66dd659a6bc8fe4ad90df2ed9d3ff98ccac..a75c136738529db410baf870f3baafc6e178a5a0 100644 (file)
@@ -1067,6 +1067,7 @@ void pcie_flr(struct pci_dev *dev);
 int __pci_reset_function(struct pci_dev *dev);
 int __pci_reset_function_locked(struct pci_dev *dev);
 int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
 int pci_try_reset_function(struct pci_dev *dev);
 int pci_probe_reset_slot(struct pci_slot *slot);
 int pci_reset_slot(struct pci_slot *slot);
index 231d3075815adfa63d462a236e66f214c3216117..e91d1b6a260d5996583a4365d4020524ad57700a 100644 (file)
@@ -81,8 +81,8 @@
  *     it.
  * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
  *     value on the line. Use argument 1 to indicate high level, argument 0 to
- *     indicate low level. (Please see Documentation/pinctrl.txt, section
- *     "GPIO mode pitfalls" for a discussion around this parameter.)
+ *     indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ *     section "GPIO mode pitfalls" for a discussion around this parameter.)
  * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
  *     supplies, the argument to this parameter (on a custom format) tells
  *     the driver which alternative power source to use.
index 79b0e4cdb8141a10e73affb9a837d5bcc5f00a2a..f8274b0c68880ccbd02de1a85ece46a3cab2053f 100644 (file)
  *     Available only for accelerometer and pressure sensors.
  *     Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
  * @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
  */
 struct st_sensors_platform_data {
        u8 drdy_int_pin;
        bool open_drain;
+       bool spi_3wire;
 };
 
 #endif /* ST_SENSORS_PDATA_H */
index a026bfd089db81191b5a322979d4bf58c2777dc5..51349d124ee5d47ded05a433028d765858c61f00 100644 (file)
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
  *            parameter func: the desired function to use.
  *            parameter chan: the function channel index to use.
  *
+ * @do_work:  Request driver to perform auxiliary (periodic) operations
+ *           Driver should return delay of the next auxiliary work scheduling
+ *           time (>=0) or negative value in case further scheduling
+ *           is not required.
+ *
  * Drivers should embed their ptp_clock_info within a private
  * structure, obtaining a reference to it using container_of().
  *
@@ -126,6 +131,7 @@ struct ptp_clock_info {
                      struct ptp_clock_request *request, int on);
        int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
                      enum ptp_pin_function func, unsigned int chan);
+       long (*do_aux_work)(struct ptp_clock_info *ptp);
 };
 
 struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
 int ptp_find_pin(struct ptp_clock *ptp,
                 enum ptp_pin_function func, unsigned int chan);
 
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ * @delay:  number of jiffies to wait before queuing
+ *          See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
 #else
 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                                                   struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
 static inline int ptp_find_pin(struct ptp_clock *ptp,
                               enum ptp_pin_function func, unsigned int chan)
 { return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+                                     unsigned long delay)
+{ return -EOPNOTSUPP; }
+
 #endif
 
 #endif
index 5726107963b2d9930d61c3cf5c486abfe244c18a..0ad87c434ae6a344984837e8dd053fe7705d21f1 100644 (file)
@@ -43,12 +43,13 @@ struct sync_file {
 #endif
 
        wait_queue_head_t       wq;
+       unsigned long           flags;
 
        struct dma_fence        *fence;
        struct dma_fence_cb cb;
 };
 
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
 
 struct sync_file *sync_file_create(struct dma_fence *fence);
 struct dma_fence *sync_file_get_fence(int fd);
index 70483296157f87acdf5acd5e96eaa910119ba220..ada65e767b28dfcabb662a7b08f65c6fc04f5b73 100644 (file)
@@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                             u64 xmit_time);
 extern void tcp_rack_reo_timeout(struct sock *sk);
 
+/* At how many usecs into the future should the RTO fire? */
+static inline s64 tcp_rto_delta_us(const struct sock *sk)
+{
+       const struct sk_buff *skb = tcp_write_queue_head(sk);
+       u32 rto = inet_csk(sk)->icsk_rto;
+       u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+
+       return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+}
+
 /*
  * Save and compile IPv4 options, return a pointer to it
  */
index 0ca1fb08805b254fa8ffda73f4decd4c48ac2211..fb87d32f5e513de3c2a1b7c6f402b5f48e401c45 100644 (file)
@@ -786,6 +786,7 @@ struct iscsi_np {
        int                     np_sock_type;
        enum np_thread_state_table np_thread_state;
        bool                    enabled;
+       atomic_t                np_reset_count;
        enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
index 26c54f6d595d4070c7708ef22daf7533468404a2..ad4eb2863e70ee195f9abc68e6b8c3c3020f27bc 100644 (file)
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
        __u32 size;           /* in, cmdstream size */
        __u32 pad;
        __u32 nr_relocs;      /* in, number of submit_reloc's */
-       __u64 __user relocs;  /* in, ptr to array of submit_reloc's */
+       __u64 relocs;         /* in, ptr to array of submit_reloc's */
 };
 
 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
        __u32 fence;          /* out */
        __u32 nr_bos;         /* in, number of submit_bo's */
        __u32 nr_cmds;        /* in, number of submit_cmd's */
-       __u64 __user bos;     /* in, ptr to array of submit_bo's */
-       __u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+       __u64 bos;            /* in, ptr to array of submit_bo's */
+       __u64 cmds;           /* in, ptr to array of submit_cmd's */
        __s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 };
 
index 17921b0390b4f91113bcf8c9ccac5c1225751460..e075b7780421dee1d8243b9dc178248398c5f189 100644 (file)
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_init_aio(mm);
        mm_init_owner(mm, p);
        mmu_notifier_mm_init(mm);
-       clear_tlb_flush_pending(mm);
+       init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
        mm->pmd_huge_pte = NULL;
 #endif
index 16dbe4c938953a70a49faf0a5264af8c19a9491f..f50b434756c18eb0c200ec6e3b4db16231062f24 100644 (file)
@@ -670,13 +670,14 @@ again:
                 * this reference was taken by ihold under the page lock
                 * pinning the inode in place so i_lock was unnecessary. The
                 * only way for this check to fail is if the inode was
-                * truncated in parallel so warn for now if this happens.
+                * truncated in parallel which is almost certainly an
+                * application bug. In such a case, just retry.
                 *
                 * We are not calling into get_futex_key_refs() in file-backed
                 * cases, therefore a successful atomic_inc return below will
                 * guarantee that get_futex_key() will still imply smp_mb(); (B).
                 */
-               if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+               if (!atomic_inc_not_zero(&inode->i_count)) {
                        rcu_read_unlock();
                        put_page(page);
 
index 222317721c5a09291c6b78fc839e722b2196b177..0972a8e09d082d99c7f197cbe6bd4fdb6475ba33 100644 (file)
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 {
        unsigned long size;
 
-       size = global_page_state(NR_SLAB_RECLAIMABLE)
+       size = global_node_page_state(NR_SLAB_RECLAIMABLE)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
index 7d315fdb9f13d9b17d8a2aa129c75790c7599bdb..cf7b129b0b2b08adcc1aae98f990c384761532dc 100644 (file)
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
        if (in_task()) {
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
 
-               if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
-                       goto fail;
+               if (fail_nth) {
+                       if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+                               goto fail;
 
-               return false;
+                       return false;
+               }
        }
 
        /* No need to check any other properties if the probability is 0 */
index 6c1d678bcf8b00ff7b2d2fc70747045e6c14327a..ff9148969b9233ba7502b992b026d31e100be497 100644 (file)
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_driver);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "driver:\tEMTPY\n");
+                               "driver:\tEMPTY\n");
 
        if (config->test_fs)
                len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_fs);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "fs:\tEMTPY\n");
+                               "fs:\tEMPTY\n");
 
        mutex_unlock(&test_dev->config_mutex);
 
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
                                                      strlen(test_str));
                break;
        case TEST_KMOD_FS_TYPE:
-               break;
                kfree_const(config->test_fs);
                config->test_driver = NULL;
                copied = config_copy_test_fs(config, test_str,
                                             strlen(test_str));
+               break;
        default:
                mutex_unlock(&test_dev->config_mutex);
                return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
                                            int (*test_sync)(struct kmod_test_device *test_dev))
 {
        int ret;
-       long new;
+       unsigned long new;
        unsigned int old_val;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
                                             unsigned int max)
 {
        int ret;
-       long new;
+       unsigned long new;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
        struct kmod_test_device *test_dev = NULL;
        int ret;
 
-       mutex_unlock(&reg_dev_mutex);
+       mutex_lock(&reg_dev_mutex);
 
        /* int should suffice for number of devices, test for wrap */
        if (unlikely(num_test_devs + 1) < 0) {
index 9075aa54e95517cdbb1094f04e72c36357401e52..b06d9fe23a28c14f71c3263daaa84965dadeee45 100644 (file)
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 {
        unsigned long flags;
        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
-                               __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
+                                      __GFP_NOMEMALLOC | __GFP_NORETRY);
        if (!page)
                return NULL;
 
index db1cd26d8752022b7f8b576cdff78f5412209d39..5715448ab0b53db5d8bd4b64d47706f7deaaf7a6 100644 (file)
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
                "tlb_flush_pending %d\n"
-#endif
                "def_flags: %#lx(%pGv)\n",
 
                mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-               mm->tlb_flush_pending,
-#endif
+               atomic_read(&mm->tlb_flush_pending),
                mm->def_flags, &mm->def_flags
        );
 }
index 86975dec0ba160feadfb8aa0d13b8f2be943638d..216114f6ef0b7f8c09378edd3615d6a39527ead0 100644 (file)
@@ -1495,6 +1495,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                goto clear_pmdnuma;
        }
 
+       /*
+        * The page_table_lock above provides a memory barrier
+        * with change_protection_range.
+        */
+       if (mm_tlb_flush_pending(vma->vm_mm))
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and access rights restored.
index a1a0ac0ad6f67ad479916fcbc43036973ddca824..31e207cb399bebd11371e46eb26f625a5b74487c 100644 (file)
@@ -4062,9 +4062,9 @@ out:
        return ret;
 out_release_unlock:
        spin_unlock(ptl);
-out_release_nounlock:
        if (vm_shared)
                unlock_page(page);
+out_release_nounlock:
        put_page(page);
        goto out;
 }
index 4dc92f138786988c4ef0f9d371ff8a48b2e6e905..db20f8436bc3c15bf05f86ccec5e7b1f80d807cc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                goto out_unlock;
 
        if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
+           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
+                                               mm_tlb_flush_pending(mm)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
index f65beaad319be4c597f9a071771e5f376234d753..e158f7ac67300b10b8827fe6825667506095f550 100644 (file)
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
        return true;
 }
 
-/* tlb_gather_mmu
- *     Called to initialize an (on-stack) mmu_gather structure for page-table
- *     tear-down from @mm. The @fullmm argument is used when @mm is without
- *     users and we're going to destroy the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
@@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
        struct mmu_gather_batch *batch, *next;
 
+       if (force)
+               __tlb_adjust_range(tlb, start, end - start);
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
+{
+       arch_tlb_gather_mmu(tlb, mm, start, end);
+       inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end)
+{
+       /*
+        * If there are parallel threads are doing PTE changes on same range
+        * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+        * flush by batching, a thread has stable TLB entry can fail to flush
+        * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+        * forcefully if we detect parallel PTE batching threads.
+        */
+       bool force = mm_tlb_flush_nested(tlb->mm);
+
+       arch_tlb_finish_mmu(tlb, start, end, force);
+       dec_tlb_flush_pending(tlb->mm);
+}
+
 /*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
index 62767155187356d54d1fa7333ad402e76183ca0b..d68a41da6abb0743d6b09cc49c5c9524463715c3 100644 (file)
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-       /*
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (mm_tlb_flush_pending(mm))
-               flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
index 4180ad8cc9c5e70c661efc8f30416af40e9c0066..bd0f409922cb2fc133f9fecba64a839380d4f937 100644 (file)
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       set_tlb_flush_pending(mm);
+       inc_tlb_flush_pending(mm);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        /* Only flush the TLB if we actually modified any entries: */
        if (pages)
                flush_tlb_range(vma, start, end);
-       clear_tlb_flush_pending(mm);
+       dec_tlb_flush_pending(mm);
 
        return pages;
 }
index fc32aa81f3593537cc2b11d5f63b5c5f517097a4..6d00f746c2fd96452661fde3f704289eed7f1f70 100644 (file)
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
         * Part of the reclaimable slab consists of items that are in use,
         * and cannot be freed. Cap this estimate at the low watermark.
         */
-       available += global_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+                        wmark_low);
 
        if (available < 0)
                available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_FILE_DIRTY),
                global_node_page_state(NR_WRITEBACK),
                global_node_page_state(NR_UNSTABLE_NFS),
-               global_page_state(NR_SLAB_RECLAIMABLE),
-               global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_node_page_state(NR_SLAB_RECLAIMABLE),
+               global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
@@ -7668,7 +7669,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
index c8993c63eb259b3a5302a058ce231d1290fc9b66..c1286d47aa1fad7fee7ea5bb865a2dc7efd672f2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                .flags = PVMW_SYNC,
        };
        int *cleaned = arg;
+       bool invalidation_needed = false;
 
        while (page_vma_mapped_walk(&pvmw)) {
                int ret = 0;
-               address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pte_dirty(*pte) && !pte_write(*pte))
                                continue;
 
-                       flush_cache_page(vma, address, pte_pfn(*pte));
-                       entry = ptep_clear_flush(vma, address, pte);
+                       flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, pvmw.address, pte);
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
-                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
-                       flush_cache_page(vma, address, page_to_pfn(page));
-                       entry = pmdp_huge_clear_flush(vma, address, pmd);
+                       flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
-                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                }
 
                if (ret) {
-                       mmu_notifier_invalidate_page(vma->vm_mm, address);
                        (*cleaned)++;
+                       invalidation_needed = true;
                }
        }
 
+       if (invalidation_needed) {
+               mmu_notifier_invalidate_range(vma->vm_mm, address,
+                               address + (1UL << compound_order(page)));
+       }
+
        return true;
 }
 
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        };
        pte_t pteval;
        struct page *subpage;
-       bool ret = true;
+       bool ret = true, invalidation_needed = false;
        enum ttu_flags flags = (enum ttu_flags)arg;
 
        /* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(!pvmw.pte, page);
 
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
 
                if (!(flags & TTU_IGNORE_ACCESS)) {
-                       if (ptep_clear_flush_young_notify(vma, address,
+                       if (ptep_clear_flush_young_notify(vma, pvmw.address,
                                                pvmw.pte)) {
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+               flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
                if (should_defer_flush(mm, flags)) {
                        /*
                         * We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * transition on a cached TLB entry is written through
                         * and traps if the PTE is unmapped.
                         */
-                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+                       pteval = ptep_get_and_clear(mm, pvmw.address,
+                                                   pvmw.pte);
 
                        set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
                }
 
                /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (PageHuge(page)) {
                                int nr = 1 << compound_order(page);
                                hugetlb_count_sub(nr, mm);
-                               set_huge_swap_pte_at(mm, address,
+                               set_huge_swap_pte_at(mm, pvmw.address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
                        } else {
                                dec_mm_counter(mm, mm_counter(page));
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                        }
 
                } else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * If the page was redirtied, it cannot be
                                 * discarded. Remap the page to page table.
                                 */
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                SetPageSwapBacked(page);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        }
 
                        if (swap_duplicate(entry) < 0) {
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else
                        dec_mm_counter(mm, mm_counter_file(page));
 discard:
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_page(mm, address);
+               invalidation_needed = true;
        }
+
+       if (invalidation_needed)
+               mmu_notifier_invalidate_range(mm, address,
+                               address + (1UL << compound_order(page)));
        return ret;
 }
 
index b0aa6075d164df9ae4766876cc823394abaebc6d..6540e598244412023db650412062604b704b58b3 100644 (file)
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                         */
                        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                spin_lock(&sbinfo->shrinklist_lock);
-                               if (list_empty(&info->shrinklist)) {
+                               /*
+                                * _careful to defend against unlocked access to
+                                * ->shrink_list in shmem_unused_huge_shrink()
+                                */
+                               if (list_empty_careful(&info->shrinklist)) {
                                        list_add_tail(&info->shrinklist,
                                                        &sbinfo->shrinklist);
                                        sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge:            page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
                         * to shrink under memory pressure.
                         */
                        spin_lock(&sbinfo->shrinklist_lock);
-                       if (list_empty(&info->shrinklist)) {
+                       /*
+                        * _careful to defend against unlocked access to
+                        * ->shrink_list in shmem_unused_huge_shrink()
+                        */
+                       if (list_empty_careful(&info->shrinklist)) {
                                list_add_tail(&info->shrinklist,
                                                &sbinfo->shrinklist);
                                sbinfo->shrinklist_len++;
index 7b07ec852e01fa931b2b302e8df5cff9f17f62d6..9ecddf568fe30e5cf1fba6db8eda3b7abe96d379 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += global_page_state(NR_SLAB_RECLAIMABLE);
+               free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
index e1133bc634b5e8ed9a4639677e577a0d52e7c1d5..8a3ce79b1307b7f260ce2f64e96bdacfb9a322f0 100644 (file)
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
        return found;
 }
 
+/**
+ * batadv_tt_global_sync_flags - update TT sync flags
+ * @tt_global: the TT global entry to update sync flags in
+ *
+ * Updates the sync flag bits in the tt_global flag attribute with a logical
+ * OR of all sync flags from any of its TT orig entries.
+ */
+static void
+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
+{
+       struct batadv_tt_orig_list_entry *orig_entry;
+       const struct hlist_head *head;
+       u16 flags = BATADV_NO_FLAGS;
+
+       rcu_read_lock();
+       head = &tt_global->orig_list;
+       hlist_for_each_entry_rcu(orig_entry, head, list)
+               flags |= orig_entry->flags;
+       rcu_read_unlock();
+
+       flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
+       tt_global->common.flags = flags;
+}
+
+/**
+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * @tt_global: the TT global entry to add an orig entry in
+ * @orig_node: the originator to add an orig entry for
+ * @ttvn: translation table version number of this changeset
+ * @flags: TT sync flags
+ */
 static void
 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
-                               struct batadv_orig_node *orig_node, int ttvn)
+                               struct batadv_orig_node *orig_node, int ttvn,
+                               u8 flags)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
                 * was added during a "temporary client detection"
                 */
                orig_entry->ttvn = ttvn;
-               goto out;
+               orig_entry->flags = flags;
+               goto sync_flags;
        }
 
        orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
+       orig_entry->flags = flags;
        kref_init(&orig_entry->refcount);
 
        spin_lock_bh(&tt_global->list_lock);
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        spin_unlock_bh(&tt_global->list_lock);
        atomic_inc(&tt_global->orig_list_count);
 
+sync_flags:
+       batadv_tt_global_sync_flags(tt_global);
 out:
        if (orig_entry)
                batadv_tt_orig_list_entry_put(orig_entry);
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                }
 
                /* the change can carry possible "attribute" flags like the
-                * TT_CLIENT_WIFI, therefore they have to be copied in the
+                * TT_CLIENT_TEMP, therefore they have to be copied in the
                 * client entry
                 */
-               common->flags |= flags;
+               common->flags |= flags & (~BATADV_TT_SYNC_MASK);
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
        }
 add_orig_entry:
        /* add the new orig_entry (if needed) or update it */
-       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
+       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
+                                       flags & BATADV_TT_SYNC_MASK);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
                               struct batadv_tt_orig_list_entry *orig,
                               bool best)
 {
+       u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
        void *hdr;
        struct batadv_orig_node_vlan *vlan;
        u8 last_ttvn;
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
            nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
            nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
-           nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+           nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
                goto nla_put_failure;
 
        if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                unsigned short vid)
 {
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+       struct batadv_tt_orig_list_entry *tt_orig;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_head *head;
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        /* find out if this global entry is announced by this
                         * originator
                         */
-                       if (!batadv_tt_global_entry_has_orig(tt_global,
-                                                            orig_node))
+                       tt_orig = batadv_tt_global_orig_entry_find(tt_global,
+                                                                  orig_node);
+                       if (!tt_orig)
                                continue;
 
                        /* use network order to read the VID: this ensures that
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        /* compute the CRC on flags that have to be kept in sync
                         * among nodes
                         */
-                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       flags = tt_orig->flags;
                        crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
 
                        crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+
+                       batadv_tt_orig_list_entry_put(tt_orig);
                }
                rcu_read_unlock();
        }
index ea43a64492479809fe6bdf95b436792078f50e9f..a62795868794103d7e712ba91def5997dc3a5779 100644 (file)
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
  * @orig_node: pointer to orig node announcing this non-mesh client
  * @ttvn: translation table version number which added the non-mesh client
+ * @flags: per orig entry TT sync flags
  * @list: list node for batadv_tt_global_entry::orig_list
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
 struct batadv_tt_orig_list_entry {
        struct batadv_orig_node *orig_node;
        u8 ttvn;
+       u8 flags;
        struct hlist_node list;
        struct kref refcount;
        struct rcu_head rcu;
index 8515f8fe0460ae08e08e269a47524e0738714626..ce15a06d5558af0292cc739b42a7dc3c1d89428d 100644 (file)
@@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 {
        if (tx_path)
                return skb->ip_summed != CHECKSUM_PARTIAL &&
-                      skb->ip_summed != CHECKSUM_NONE;
+                      skb->ip_summed != CHECKSUM_UNNECESSARY;
 
        return skb->ip_summed == CHECKSUM_NONE;
 }
index 76c2077c3f5b697bf8e0d4b030b70dde8fc70345..2e548eca34898f51316275c918bb1f0f4a63526e 100644 (file)
@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 #endif
 
+       /* Some igmp sysctl, whose values are always used */
+       net->ipv4.sysctl_igmp_max_memberships = 20;
+       net->ipv4.sysctl_igmp_max_msf = 10;
+       /* IGMP reports for link-local multicast groups are enabled by default */
+       net->ipv4.sysctl_igmp_llm_reports = 1;
+       net->ipv4.sysctl_igmp_qrv = 2;
+
        return 0;
 }
 
index c4c6e1969ed0606ff9fb4ea46609f75b249e589b..2ae8f54cb32148f2499f78ecbf29259db36bd207 100644 (file)
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
        int taglen;
 
        for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
-               if (optptr[0] == IPOPT_CIPSO)
+               switch (optptr[0]) {
+               case IPOPT_CIPSO:
                        return optptr;
-               taglen = optptr[1];
+               case IPOPT_END:
+                       return NULL;
+               case IPOPT_NOOP:
+                       taglen = 1;
+                       break;
+               default:
+                       taglen = optptr[1];
+               }
                optlen -= taglen;
                optptr += taglen;
        }
index 8e0257d0120097770e37017439684a2345619f5e..1540db65241a6fd4d96b00546f13a3e3d3cd1815 100644 (file)
@@ -450,6 +450,7 @@ out_unlock:
 out:
        NAPI_GRO_CB(skb)->flush |= flush;
        skb_gro_remcsum_cleanup(skb, &grc);
+       skb->remcsum_offload = 0;
 
        return pp;
 }
index 28f14afd0dd3a392da3b84c5e791fffaf46ad254..498706b072fb70e1ffe6b5dba817816db5a4cfa7 100644 (file)
@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
                goto out_sock;
        }
 
-       /* Sysctl initialization */
-       net->ipv4.sysctl_igmp_max_memberships = 20;
-       net->ipv4.sysctl_igmp_max_msf = 10;
-       /* IGMP reports for link-local multicast groups are enabled by default */
-       net->ipv4.sysctl_igmp_llm_reports = 1;
-       net->ipv4.sysctl_igmp_qrv = 2;
        return 0;
 
 out_sock:
index 50c74cd890bc79ed6c85c958c5397d833e9aa74a..e153c40c2436109d4bca4a9caf34b90cbf000cd9 100644 (file)
@@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
@@ -1288,6 +1289,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EINVAL;
 
        if ((size + skb->len > mtu) &&
+           (skb_queue_len(&sk->sk_write_queue) == 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
                if (skb->ip_summed != CHECKSUM_PARTIAL)
index 2920e0cb09f8d3e743eb4f49c16060ba1af48ed4..53de1424c13cda5d1fec826b97cacf4f95adc99a 100644 (file)
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define FLAG_ORIG_SACK_ACKED   0x200 /* Never retransmitted data are (s)acked  */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
 #define FLAG_DSACKING_ACK      0x800 /* SACK blocks contained D-SACK info */
+#define FLAG_SET_XMIT_TIMER    0x1000 /* Set TLP or RTO timer */
 #define FLAG_SACK_RENEGING     0x2000 /* snd_una advanced to a sacked seq */
 #define FLAG_UPDATE_TS_RECENT  0x4000 /* tcp_replace_ts_recent() */
 #define FLAG_NO_CHALLENGE_ACK  0x8000 /* do not call tcp_send_challenge_ack()  */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
                return;
 
        /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
-       if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
-           (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+       if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+           (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
                tp->snd_cwnd = tp->snd_ssthresh;
                tp->snd_cwnd_stamp = tcp_jiffies32;
        }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
                /* Offset the time elapsed after installing regular RTO */
                if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
                    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
-                       struct sk_buff *skb = tcp_write_queue_head(sk);
-                       u64 rto_time_stamp = skb->skb_mstamp +
-                                            jiffies_to_usecs(rto);
-                       s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
+                       s64 delta_us = tcp_rto_delta_us(sk);
                        /* delta_us may not be positive if the socket is locked
                         * when the retrans timer fires and is rescheduled.
                         */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
        }
 }
 
+/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
+static void tcp_set_xmit_timer(struct sock *sk)
+{
+       if (!tcp_schedule_loss_probe(sk))
+               tcp_rearm_rto(sk);
+}
+
 /* If we get here, the whole TSO packet has not been acked. */
 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
 {
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                        ca_rtt_us, sack->rate);
 
        if (flag & FLAG_ACKED) {
-               tcp_rearm_rto(sk);
+               flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
                if (unlikely(icsk->icsk_mtup.probe_size &&
                             !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
                        tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
                 */
-               tcp_rearm_rto(sk);
+               flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
        }
 
        if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (after(ack, tp->snd_nxt))
                goto invalid_ack;
 
-       if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
-               tcp_rearm_rto(sk);
-
        if (after(ack, prior_snd_una)) {
                flag |= FLAG_SND_UNA_ADVANCED;
                icsk->icsk_retransmits = 0;
@@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
                                    &sack_state);
 
+       if (tp->tlp_high_seq)
+               tcp_process_tlp_ack(sk, ack, flag);
+       /* If needed, reset TLP/RTO timer; RACK may later override this. */
+       if (flag & FLAG_SET_XMIT_TIMER)
+               tcp_set_xmit_timer(sk);
+
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        }
-       if (tp->tlp_high_seq)
-               tcp_process_tlp_ack(sk, ack, flag);
 
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
                sk_dst_confirm(sk);
 
-       if (icsk->icsk_pending == ICSK_TIME_RETRANS)
-               tcp_schedule_loss_probe(sk);
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
        tcp_rate_gen(sk, delivered, lost, sack_state.rate);
index 2f1588bf73dad9b34aebee45ce738a7e9a4515ae..b7661a68d4984c485a4853441d21abe8da9e325a 100644 (file)
@@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 timeout, tlp_time_stamp, rto_time_stamp;
        u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
+       u32 timeout, rto_delta_us;
 
-       /* No consecutive loss probes. */
-       if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
-               tcp_rearm_rto(sk);
-               return false;
-       }
        /* Don't do any loss probe on a Fast Open connection before 3WHS
         * finishes.
         */
        if (tp->fastopen_rsk)
                return false;
 
-       /* TLP is only scheduled when next timer event is RTO. */
-       if (icsk->icsk_pending != ICSK_TIME_RETRANS)
-               return false;
-
        /* Schedule a loss probe in 2*RTT for SACK capable connections
         * in Open state, that are either limited by cwnd or application.
         */
@@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
                                (rtt + (rtt >> 1) + TCP_DELACK_MAX));
        timeout = max_t(u32, timeout, msecs_to_jiffies(10));
 
-       /* If RTO is shorter, just schedule TLP in its place. */
-       tlp_time_stamp = tcp_jiffies32 + timeout;
-       rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
-       if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
-               s32 delta = rto_time_stamp - tcp_jiffies32;
-               if (delta > 0)
-                       timeout = delta;
-       }
+       /* If the RTO formula yields an earlier time, then use that time. */
+       rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
+       if (rto_delta_us > 0)
+               timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
                                  TCP_RTO_MAX);
@@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
        int err;
 
        tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
+
+       if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
+               return -EHOSTUNREACH; /* Routing failure or similar. */
+
        tcp_connect_init(sk);
 
        if (unlikely(tp->repair)) {
index c0feeeef962aa31401ee90f8bd015c2aae2ef932..e906014890b64ef6a2bfe022e17358bd9659d204 100644 (file)
@@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
                goto death;
        }
 
-       if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
+       if (!sock_flag(sk, SOCK_KEEPOPEN) ||
+           ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
                goto out;
 
        elapsed = keepalive_time_when(tp);
index e6276fa3750b909615668fddf84495369bd7d369..a7c804f73990a0610bc85c02fc2dd76858973c22 100644 (file)
@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
        if (is_udplite)                                  /*     UDP-Lite      */
                csum = udplite_csum(skb);
 
-       else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
+       else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
index 781250151d40ee4559f7b90d15dccad8ffaeafd0..0932c85b42af0bc868badd1771b5cb9353c969a9 100644 (file)
@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
-       skb->ip_summed = CHECKSUM_NONE;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        /* If there is no outer header we can fake a checksum offload
         * due to the fact that we have already done the checksum in
index 162efba0d0cd851848363588318cf6ade4a5a62c..2dfe50d8d609a7a623edacbe40e93022dfac685e 100644 (file)
@@ -1381,11 +1381,12 @@ emsgsize:
         */
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : headersize)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : headersize)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen, exthdrlen,
                                          transhdrlen, mtu, flags, fl6);
index 4d30c96a819dee548ec34704a34b22774fac5da1..a640fbcba15dbf246e419d3e03da8eca0fa6901a 100644 (file)
@@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
+       nrt->rt6i_protocol = RTPROT_REDIRECT;
        nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 
        if (ip6_ins_rt(nrt))
@@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
+               .fc_protocol = RTPROT_RA,
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = net,
@@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
                                  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
+               .fc_protocol = RTPROT_RA,
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = dev_net(dev),
@@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
        rtm->rtm_flags = 0;
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
-       if (rt->rt6i_flags & RTF_DYNAMIC)
-               rtm->rtm_protocol = RTPROT_REDIRECT;
-       else if (rt->rt6i_flags & RTF_ADDRCONF) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
-                       rtm->rtm_protocol = RTPROT_RA;
-               else
-                       rtm->rtm_protocol = RTPROT_KERNEL;
-       }
 
        if (rt->rt6i_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
index a2267f80febbb6f31459097f27bd89d51d0f2b11..e7d378c032cb6ebe80323db987ca201e5ae2d845 100644 (file)
@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
 
-               skb->ip_summed = CHECKSUM_NONE;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                /* If there is no outer header we can fake a checksum offload
                 * due to the fact that we have already done the checksum in
index 0615c2a950fab992134d0071707b5b336f6fb231..008a45ca31124ed5fa54d666fce61c7982b12a2f 100644 (file)
@@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val > INT_MAX)
                        return -EINVAL;
-               po->tp_reserve = val;
-               return 0;
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_reserve = val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_LOSS:
        {
index e10624aa6959b596a2629a9f18bb25504428545f..9722bf839d9dec7fc7c7bed5cca0818389b245ba 100644 (file)
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
        if (rds_ib_ring_empty(&ic->i_recv_ring))
                rds_ib_stats_inc(s_ib_rx_ring_empty);
 
-       if (rds_ib_ring_low(&ic->i_recv_ring))
+       if (rds_ib_ring_low(&ic->i_recv_ring)) {
                rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
+               rds_ib_stats_inc(s_ib_rx_refill_from_cq);
+       }
 }
 
 int rds_ib_recv_path(struct rds_conn_path *cp)
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
        if (rds_conn_up(conn)) {
                rds_ib_attempt_ack(ic);
                rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+               rds_ib_stats_inc(s_ib_rx_refill_from_thread);
        }
 
        return ret;
index 36f0ced9e60c03297e195135ca5a8a53d1a3a27b..d516ba8178b8099f5e8e180f2e60e7a61de37811 100644 (file)
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
 static unsigned int xt_net_id;
 static struct tc_action_ops act_xt_ops;
 
-static int ipt_init_target(struct xt_entry_target *t, char *table,
-                          unsigned int hook)
+static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+                          char *table, unsigned int hook)
 {
        struct xt_tgchk_param par;
        struct xt_target *target;
@@ -49,8 +49,9 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
                return PTR_ERR(target);
 
        t->u.kernel.target = target;
+       memset(&par, 0, sizeof(par));
+       par.net       = net;
        par.table     = table;
-       par.entryinfo = NULL;
        par.target    = target;
        par.targinfo  = t->data;
        par.hook_mask = hook;
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
        [TCA_IPT_TARG]  = { .len = sizeof(struct xt_entry_target) },
 };
 
-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
+static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          const struct tc_action_ops *ops, int ovr, int bind)
 {
+       struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
        struct tcf_ipt *ipt;
        struct xt_entry_target *td, *t;
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        if (unlikely(!t))
                goto err2;
 
-       err = ipt_init_target(t, tname, hook);
+       err = ipt_init_target(net, t, tname, hook);
        if (err < 0)
                goto err3;
 
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
                        int bind)
 {
-       struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-       return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
+       return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
+                             bind);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
                       int bind)
 {
-       struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-       return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
+       return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
+                             bind);
 }
 
 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
index aeef8011ac7d82d828289f4085efe3acaa8a3945..9b4dcb6a16b50eefc04167dfdd1e509546b71bf6 100644 (file)
@@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        /* Initiate synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
-               if (!tipc_link_is_up(l)) {
-                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!tipc_link_is_up(l))
                        __tipc_node_link_up(n, bearer_id, xmitq);
-               }
                if (n->state == SELF_UP_PEER_UP) {
                        n->sync_point = syncpt;
                        tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
index 3bd5f4f302354cf8cecd78ae0ab13b9b3bf130ef..bc443201d3ef00ac2b197da0896a186891cff188 100755 (executable)
@@ -18,6 +18,7 @@ my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 use Cwd;
+use File::Find;
 
 my $cur_path = fastgetcwd() . '/';
 my $lk_path = "./";
@@ -58,6 +59,7 @@ my $from_filename = 0;
 my $pattern_depth = 0;
 my $version = 0;
 my $help = 0;
+my $find_maintainer_files = 0;
 
 my $vcs_used = 0;
 
@@ -249,6 +251,7 @@ if (!GetOptions(
                'sections!' => \$sections,
                'fe|file-emails!' => \$file_emails,
                'f|file' => \$from_filename,
+               'find-maintainer-files' => \$find_maintainer_files,
                'v|version' => \$version,
                'h|help|usage' => \$help,
                )) {
@@ -307,36 +310,74 @@ if (!top_of_kernel_tree($lk_path)) {
 
 my @typevalue = ();
 my %keyword_hash;
+my @mfiles = ();
 
-open (my $maint, '<', "${lk_path}MAINTAINERS")
-    or die "$P: Can't open MAINTAINERS: $!\n";
-while (<$maint>) {
-    my $line = $_;
-
-    if ($line =~ m/^([A-Z]):\s*(.*)/) {
-       my $type = $1;
-       my $value = $2;
-
-       ##Filename pattern matching
-       if ($type eq "F" || $type eq "X") {
-           $value =~ s@\.@\\\.@g;       ##Convert . to \.
-           $value =~ s/\*/\.\*/g;       ##Convert * to .*
-           $value =~ s/\?/\./g;         ##Convert ? to .
-           ##if pattern is a directory and it lacks a trailing slash, add one
-           if ((-d $value)) {
-               $value =~ s@([^/])$@$1/@;
+sub read_maintainer_file {
+    my ($file) = @_;
+
+    open (my $maint, '<', "$file")
+       or die "$P: Can't open MAINTAINERS file '$file': $!\n";
+    while (<$maint>) {
+       my $line = $_;
+
+       if ($line =~ m/^([A-Z]):\s*(.*)/) {
+           my $type = $1;
+           my $value = $2;
+
+           ##Filename pattern matching
+           if ($type eq "F" || $type eq "X") {
+               $value =~ s@\.@\\\.@g;       ##Convert . to \.
+               $value =~ s/\*/\.\*/g;       ##Convert * to .*
+               $value =~ s/\?/\./g;         ##Convert ? to .
+               ##if pattern is a directory and it lacks a trailing slash, add one
+               if ((-d $value)) {
+                   $value =~ s@([^/])$@$1/@;
+               }
+           } elsif ($type eq "K") {
+               $keyword_hash{@typevalue} = $value;
            }
-       } elsif ($type eq "K") {
-           $keyword_hash{@typevalue} = $value;
+           push(@typevalue, "$type:$value");
+       } elsif (!(/^\s*$/ || /^\s*\#/)) {
+           $line =~ s/\n$//g;
+           push(@typevalue, $line);
        }
-       push(@typevalue, "$type:$value");
-    } elsif (!/^(\s)*$/) {
-       $line =~ s/\n$//g;
-       push(@typevalue, $line);
     }
+    close($maint);
+}
+
+sub find_is_maintainer_file {
+    my ($file) = $_;
+    return if ($file !~ m@/MAINTAINERS$@);
+    $file = $File::Find::name;
+    return if (! -f $file);
+    push(@mfiles, $file);
 }
-close($maint);
 
+sub find_ignore_git {
+    return grep { $_ !~ /^\.git$/; } @_;
+}
+
+if (-d "${lk_path}MAINTAINERS") {
+    opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
+    my @files = readdir(DIR);
+    closedir(DIR);
+    foreach my $file (@files) {
+       push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
+    }
+}
+
+if ($find_maintainer_files) {
+    find( { wanted => \&find_is_maintainer_file,
+           preprocess => \&find_ignore_git,
+           no_chdir => 1,
+       }, "${lk_path}");
+} else {
+    push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
+}
+
+foreach my $file (@mfiles) {
+    read_maintainer_file("$file");
+}
 
 #
 # Read mail address map
@@ -873,7 +914,7 @@ sub top_of_kernel_tree {
     if (   (-f "${lk_path}COPYING")
        && (-f "${lk_path}CREDITS")
        && (-f "${lk_path}Kbuild")
-       && (-f "${lk_path}MAINTAINERS")
+       && (-e "${lk_path}MAINTAINERS")
        && (-f "${lk_path}Makefile")
        && (-f "${lk_path}README")
        && (-d "${lk_path}Documentation")
index a0fe34349b24fccd9fb9b8834ea51d3e2d9e155a..e40b53db7f9fdc7c8e3f7094f8b862d66ef7651f 100644 (file)
@@ -2,9 +2,9 @@
 
 use strict;
 
-my %map;
+my $P = $0;
 
-# sort comparison function
+# sort comparison functions
 sub by_category($$) {
     my ($a, $b) = @_;
 
@@ -15,20 +15,33 @@ sub by_category($$) {
     $a =~ s/THE REST/ZZZZZZ/g;
     $b =~ s/THE REST/ZZZZZZ/g;
 
-    $a cmp $b;
+    return $a cmp $b;
 }
 
-sub alpha_output {
-    my $key;
-    my $sort_method = \&by_category;
-    my $sep = "";
-
-    foreach $key (sort $sort_method keys %map) {
-        if ($key ne " ") {
-            print $sep . $key . "\n";
-            $sep = "\n";
-        }
-        print $map{$key};
+sub by_pattern($$) {
+    my ($a, $b) = @_;
+    my $preferred_order = 'MRPLSWTQBCFXNK';
+
+    my $a1 = uc(substr($a, 0, 1));
+    my $b1 = uc(substr($b, 0, 1));
+
+    my $a_index = index($preferred_order, $a1);
+    my $b_index = index($preferred_order, $b1);
+
+    $a_index = 1000 if ($a_index == -1);
+    $b_index = 1000 if ($b_index == -1);
+
+    if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||
+       ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
+       return $a cmp $b;
+    }
+
+    if ($a_index < $b_index) {
+       return -1;
+    } elsif ($a_index == $b_index) {
+       return 0;
+    } else {
+       return 1;
     }
 }
 
@@ -39,39 +52,77 @@ sub trim {
     return $s;
 }
 
+sub alpha_output {
+    my ($hashref, $filename) = (@_);
+
+    open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";
+    foreach my $key (sort by_category keys %$hashref) {
+       if ($key eq " ") {
+           chomp $$hashref{$key};
+           print $file $$hashref{$key};
+       } else {
+           print $file "\n" . $key . "\n";
+           foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
+               print $file ($pattern . "\n");
+           }
+       }
+    }
+    close($file);
+}
+
 sub file_input {
+    my ($hashref, $filename) = (@_);
+
     my $lastline = "";
     my $case = " ";
-    $map{$case} = "";
+    $$hashref{$case} = "";
+
+    open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";
 
-    while (<>) {
+    while (<$file>) {
         my $line = $_;
 
         # Pattern line?
         if ($line =~ m/^([A-Z]):\s*(.*)/) {
             $line = $1 . ":\t" . trim($2) . "\n";
             if ($lastline eq "") {
-                $map{$case} = $map{$case} . $line;
+                $$hashref{$case} = $$hashref{$case} . $line;
                 next;
             }
             $case = trim($lastline);
-            exists $map{$case} and die "Header '$case' already exists";
-            $map{$case} = $line;
+            exists $$hashref{$case} and die "Header '$case' already exists";
+            $$hashref{$case} = $line;
             $lastline = "";
             next;
         }
 
         if ($case eq " ") {
-            $map{$case} = $map{$case} . $lastline;
+            $$hashref{$case} = $$hashref{$case} . $lastline;
             $lastline = $line;
             next;
         }
         trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");
         $lastline = $line;
     }
-    $map{$case} = $map{$case} . $lastline;
+    $$hashref{$case} = $$hashref{$case} . $lastline;
+    close($file);
 }
 
-&file_input;
-&alpha_output;
+my %hash;
+my %new_hash;
+
+file_input(\%hash, "MAINTAINERS");
+
+foreach my $type (@ARGV) {
+    foreach my $key (keys %hash) {
+       if ($key =~ /$type/ || $hash{$key} =~ /$type/) {
+           $new_hash{$key} = $hash{$key};
+           delete $hash{$key};
+       }
+    }
+}
+
+alpha_output(\%hash, "MAINTAINERS.new");
+alpha_output(\%new_hash, "SECTION.new");
+
 exit(0);
index 7598361ef1f10898ea73d944ae0b0c02c4725d99..da2172ff9662d0e86ffd132b6041b30ea232eb87 100644 (file)
@@ -11,6 +11,8 @@
 #  define __NR_bpf 280
 # elif defined(__sparc__)
 #  define __NR_bpf 349
+# elif defined(__s390__)
+#  define __NR_bpf 351
 # else
 #  error __NR_bpf not defined. libbpf does not support your arch.
 # endif
index 256f571f2ab525700c121c94b6a8e866acd79b73..e5bbb090bf88549111f939dcd7c1f500e6f84a4f 100644 (file)
@@ -39,6 +39,8 @@
 #  define __NR_bpf 280
 # elif defined(__sparc__)
 #  define __NR_bpf 349
+# elif defined(__s390__)
+#  define __NR_bpf 351
 # else
 #  error __NR_bpf not defined. libbpf does not support your arch.
 # endif
index 71729d47eb8552bca1350fc923b91ec189e2ef2f..7956302ecdf2ace692ba49927fe72c97ebf0cca3 100644 (file)
 
 int _version SEC("version") = 1;
 
+#if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 #define TEST_FIELD(TYPE, FIELD, MASK)                                  \
        {                                                               \
                TYPE tmp = *(volatile TYPE *)&skb->FIELD;               \
                if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))   \
                        return TC_ACT_SHOT;                             \
        }
+#else
+#define TEST_FIELD_OFFSET(a, b)        ((sizeof(a) - sizeof(b)) / sizeof(b))
+#define TEST_FIELD(TYPE, FIELD, MASK)                                  \
+       {                                                               \
+               TYPE tmp = *((volatile TYPE *)&skb->FIELD +             \
+                             TEST_FIELD_OFFSET(skb->FIELD, TYPE));     \
+               if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))   \
+                       return TC_ACT_SHOT;                             \
+       }
+#endif
 
 SEC("test1")
 int process(struct __sk_buff *skb)
index addea82f76c943edb42ebbb6075abe849b5b7f32..d3ed7324105e4eeeb281c4c90922c27541b2b58f 100644 (file)
@@ -8,6 +8,7 @@
  * License as published by the Free Software Foundation.
  */
 
+#include <endian.h>
 #include <asm/types.h>
 #include <linux/types.h>
 #include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load not permitted 3",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 3),
 #else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 2),
 #else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period word load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
                "check skb->data half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
 #else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
                "check skb->tc_classid half load not permitted for lwt prog",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, tc_classid)),
 #else