]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Aug 2017 19:26:49 +0000 (12:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Aug 2017 19:26:49 +0000 (12:26 -0700)
Pull block fixes from Jens Axboe:
 "A set of fixes that should go into this series. This contains:

   - Fix from Bart for blk-mq requeue queue running, preventing a
     continued loop of run/restart.

   - Fix for a bio/blk-integrity issue, in two parts. One from
     Christoph, fixing where verification happens, and one from Milan,
     for a NULL profile.

   - NVMe pull request, most of the changes being for nvme-fc, but also
     a few trivial core/pci fixes"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvme: fix directive command numd calculation
  nvme: fix nvme reset command timeout handling
  nvme-pci: fix CMB sysfs file removal in reset path
  lpfc: support nvmet_fc defer_rcv callback
  nvmet_fc: add defer_req callback for deferment of cmd buffer return
  nvme: strip trailing 0-bytes in wwid_show
  block: Make blk_mq_delay_kick_requeue_list() rerun the queue at a quiet time
  bio-integrity: only verify integrity on the lowest stacked driver
  bio-integrity: Fix regression if profile verify_fn is NULL

118 files changed:
Documentation/fb/efifb.txt
Documentation/gpio/gpio-legacy.txt
MAINTAINERS
arch/arm/include/asm/tlb.h
arch/ia64/include/asm/tlb.h
arch/powerpc/configs/powernv_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/platforms/powernv/idle.c
arch/s390/include/asm/tlb.h
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/spitfire.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/um/include/asm/tlb.h
drivers/block/sunvdc.c
drivers/block/zram/zram_drv.c
drivers/cpuidle/cpuidle-powernv.c
drivers/dma-buf/sync_file.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/stm/Kconfig
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/i2c-core-acpi.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core.h
drivers/i2c/muxes/Kconfig
drivers/iommu/arm-smmu.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/geneve.c
drivers/pci/pci.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
drivers/pinctrl/zte/pinctrl-zx.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-pci.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/omap2/omapfb/dss/core.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/proc/meminfo.c
fs/proc/task_mmu.c
fs/userfaultfd.c
include/asm-generic/tlb.h
include/linux/device.h
include/linux/i2c.h
include/linux/mm_types.h
include/linux/pci.h
include/linux/pinctrl/pinconf-generic.h
include/linux/sync_file.h
include/uapi/drm/msm_drm.h
kernel/fork.c
kernel/futex.c
kernel/power/snapshot.c
lib/fault-inject.c
lib/test_kmod.c
mm/balloon_compaction.c
mm/debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/util.c
net/ipv4/af_inet.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/udp.c
net/ipv6/ip6_output.c
net/packet/af_packet.c
net/sched/act_ipt.c
net/tipc/node.c

index a59916c29b3312cd4946a1d9a8da2331819e7845..1a85c1bdaf38a9ae7fb8b6555afc30abae661a20 100644 (file)
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
        Macbook Pro 17", iMac 20" :
                video=efifb:i20
 
+Accepted options:
+
+nowc   Don't map the framebuffer write combined. This can be used
+       to workaround side-effects and slowdowns on other CPU cores
+       when large amounts of console data are written.
+
 --
 Edgar Hucek <gimli@dark-green.com>
index b34fd94f70898a7f65c2a0313349588411eb8e81..5eacc147ea870c80bb06c38d43bd5b662c171194 100644 (file)
@@ -459,7 +459,7 @@ pin controller?
 
 This is done by registering "ranges" of pins, which are essentially
 cross-reference tables. These are described in
-Documentation/pinctrl.txt
+Documentation/driver-api/pinctl.rst
 
 While the pin allocation is totally managed by the pinctrl subsystem,
 gpio (under gpiolib) is still maintained by gpio drivers. It may happen
index 3c419022ed93750f5a2783cbd599f6b316234fd6..6f7721d1634c2eb7247538f2cb4d85fa1be1a458 100644 (file)
@@ -10383,7 +10383,7 @@ L:      linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/
-F:     Documentation/pinctrl.txt
+F:     Documentation/driver-api/pinctl.rst
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
@@ -14004,6 +14004,7 @@ F:      drivers/block/virtio_blk.c
 F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
+F:     mm/balloon_compaction.c
 
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
index 3f2eb76243e3c5f9d387959acae740ce871e5afa..d5562f9ce60079139d360e5d6afac59469051454 100644 (file)
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->range_start = start;
+               tlb->range_end = end;
+       }
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index fced197b96264e01b20743e90706ed20cf30b242..cbe5ac3699bf0f9dbdfd726c112f6fc6bd1271f0 100644 (file)
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
  * collected.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force)
+               tlb->need_flush = 1;
        /*
         * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
         * tlb->end_addr.
index 0695ce047d565199e4501333fa41ece48cdf9e45..34fc9bbfca9e68d6372e1d34b79ebf95d978e685 100644 (file)
@@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 5175028c56ce74e3e50a2b30eabccf7b87ed8f0e..c5246d29f3859965316bd4d48e4e816283439bf0 100644 (file)
@@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
index 1a61aa20dfbac9d5072ae83ef90640b8be380bd3..fd5d98a0b95c7b1ae5fda56892c2ecd43ea29f3a 100644 (file)
@@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 49d8422767b4de686ec0ee64fbf69ac415f05003..e925c1c99c71cab982967e7f7df6325e3135506f 100644 (file)
@@ -223,17 +223,27 @@ system_call_exit:
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    .Lsyscall_exit_work
 
-       /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
-       li      r7,MSR_FP
+       andi.   r0,r8,MSR_FP
+       beq 2f
 #ifdef CONFIG_ALTIVEC
-       oris    r7,r7,MSR_VEC@h
+       andis.  r0,r8,MSR_VEC@h
+       bne     3f
 #endif
-       and     r0,r8,r7
-       cmpd    r0,r7
-       bne     .Lsyscall_restore_math
-.Lsyscall_restore_math_cont:
+2:     addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+       li      r10,MSR_RI
+       mtmsrd  r10,1           /* Restore RI */
+#endif
+       bl      restore_math
+#ifdef CONFIG_PPC_BOOK3S
+       li      r11,0
+       mtmsrd  r11,1
+#endif
+       ld      r8,_MSR(r1)
+       ld      r3,RESULT(r1)
+       li      r11,-MAX_ERRNO
 
-       cmpld   r3,r11
+3:     cmpld   r3,r11
        ld      r5,_CCR(r1)
        bge-    .Lsyscall_error
 .Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        std     r5,_CCR(r1)
        b       .Lsyscall_error_cont
 
-.Lsyscall_restore_math:
-       /*
-        * Some initial tests from restore_math to avoid the heavyweight
-        * C code entry and MSR manipulations.
-        */
-       LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
-       and.    r0,r0,r8
-       bne     1f
-
-       ld      r7,PACACURRENT(r13)
-       lbz     r0,THREAD+THREAD_LOAD_FP(r7)
-#ifdef CONFIG_ALTIVEC
-       lbz     r6,THREAD+THREAD_LOAD_VEC(r7)
-       add     r0,r0,r6
-#endif
-       cmpdi   r0,0
-       beq     .Lsyscall_restore_math_cont
-
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
-       li      r10,MSR_RI
-       mtmsrd  r10,1           /* Restore RI */
-#endif
-       bl      restore_math
-#ifdef CONFIG_PPC_BOOK3S
-       li      r11,0
-       mtmsrd  r11,1
-#endif
-       /* Restore volatiles, reload MSR from updated one */
-       ld      r8,_MSR(r1)
-       ld      r3,RESULT(r1)
-       li      r11,-MAX_ERRNO
-       b       .Lsyscall_restore_math_cont
-
 /* Traced system call support */
 .Lsyscall_dotrace:
        bl      save_nvgprs
index 9f3e2c932dccc1c3a1158fc174a8cf57e63dd75d..ec480966f9bf55f17184537f64e7d10c40c723c0 100644 (file)
@@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
 {
        unsigned long msr;
 
-       /*
-        * Syscall exit makes a similar initial check before branching
-        * to restore_math. Keep them in synch.
-        */
        if (!msr_tm_active(regs->msr) &&
                !current->thread.load_fp && !loadvec(current->thread))
                return;
index cf0e1245b8cc1c78948a4004be2d20c5b5ac0b78..8d3320562c70f3ef7308645fb7b805fc14794e42 100644 (file)
@@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
        hard_irq_disable();
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
                raw_local_irq_restore(*flags);
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
                raw_local_irq_save(*flags);
                hard_irq_disable();
        }
@@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
 static void nmi_ipi_lock(void)
 {
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 }
 
 static void nmi_ipi_unlock(void)
@@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
        nmi_ipi_lock_start(&flags);
        while (nmi_ipi_busy_count) {
                nmi_ipi_unlock_end(&flags);
-               cpu_relax();
+               spin_until_cond(nmi_ipi_busy_count == 0);
                nmi_ipi_lock_start(&flags);
        }
 
index b67f8b03a32d0f12ce29eeb4ac3be0a97384fe72..34721a257a770c450baac0288f8006c94ff1975b 100644 (file)
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
         * This may be called from low level interrupt handlers at some
         * point in future.
         */
-       local_irq_save(*flags);
-       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
-               cpu_relax();
+       raw_local_irq_save(*flags);
+       hard_irq_disable(); /* Make it soft-NMI safe */
+       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
+               raw_local_irq_restore(*flags);
+               spin_until_cond(!test_bit(0, &__wd_smp_lock));
+               raw_local_irq_save(*flags);
+               hard_irq_disable();
+       }
 }
 
 static inline void wd_smp_unlock(unsigned long *flags)
 {
        clear_bit_unlock(0, &__wd_smp_lock);
-       local_irq_restore(*flags);
+       raw_local_irq_restore(*flags);
 }
 
 static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
                nmi_panic(regs, "Hard LOCKUP");
 }
 
-static void set_cpu_stuck(int cpu, u64 tb)
+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
 {
-       cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
-       cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+       cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
+       cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
        if (cpumask_empty(&wd_smp_cpus_pending)) {
                wd_smp_last_reset_tb = tb;
                cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
                                &wd_smp_cpus_stuck);
        }
 }
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+       set_cpumask_stuck(cpumask_of(cpu), tb);
+}
 
 static void watchdog_smp_panic(int cpu, u64 tb)
 {
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
        }
        smp_flush_nmi_ipi(1000000);
 
-       /* Take the stuck CPU out of the watch group */
-       for_each_cpu(c, &wd_smp_cpus_pending)
-               set_cpu_stuck(c, tb);
+       /* Take the stuck CPUs out of the watch group */
+       set_cpumask_stuck(&wd_smp_cpus_pending, tb);
 
-out:
        wd_smp_unlock(&flags);
 
        printk_safe_flush();
@@ -152,6 +159,11 @@ out:
 
        if (hardlockup_panic)
                nmi_panic(NULL, "Hard LOCKUP");
+
+       return;
+
+out:
+       wd_smp_unlock(&flags);
 }
 
 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
 
 void arch_touch_nmi_watchdog(void)
 {
+       unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
        int cpu = smp_processor_id();
 
-       watchdog_timer_interrupt(cpu);
+       if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
+               watchdog_timer_interrupt(cpu);
 }
 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
 
@@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
 
 static int start_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
                WARN_ON(1);
                return 0;
@@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
        if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
                return 0;
 
+       wd_smp_lock(&flags);
        cpumask_set_cpu(cpu, &wd_cpus_enabled);
        if (cpumask_weight(&wd_cpus_enabled) == 1) {
                cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
                wd_smp_last_reset_tb = get_tb();
        }
-       smp_wmb();
+       wd_smp_unlock(&flags);
+
        start_watchdog_timer_on(cpu);
 
        return 0;
@@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
 
 static int stop_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
                return 0; /* Can happen in CPU unplug case */
 
        stop_watchdog_timer_on(cpu);
 
+       wd_smp_lock(&flags);
        cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+       wd_smp_unlock(&flags);
+
        wd_smp_clear_cpu_pending(cpu, get_tb());
 
        return 0;
index 2abee070373fb3a8b757b8d3cb269e5d0b89dff6..a553aeea7af683812ba2f5a80d65e97cda163919 100644 (file)
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
  */
 static u64 pnv_deepest_stop_psscr_val;
 static u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_flag;
 static bool deepest_stop_found;
 
 static int pnv_save_sprs_for_deep_states(void)
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
 
        update_subcore_sibling_mask();
 
-       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
-               pnv_save_sprs_for_deep_states();
+       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+               int rc = pnv_save_sprs_for_deep_states();
+
+               if (likely(!rc))
+                       return;
+
+               /*
+                * The stop-api is unable to restore hypervisor
+                * resources on wakeup from platform idle states which
+                * lose full context. So disable such states.
+                */
+               supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+               pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+               pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+               if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+                   (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+                       /*
+                        * Use the default stop state for CPU-Hotplug
+                        * if available.
+                        */
+                       if (default_stop_found) {
+                               pnv_deepest_stop_psscr_val =
+                                       pnv_default_stop_val;
+                               pnv_deepest_stop_psscr_mask =
+                                       pnv_default_stop_mask;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+                                       pnv_deepest_stop_psscr_val);
+                       } else { /* Fallback to snooze loop for CPU-Hotplug */
+                               deepest_stop_found = false;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+                       }
+               }
+       }
 }
 
 u32 pnv_get_supported_cpuidle_states(void)
@@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
                                                pnv_deepest_stop_psscr_val;
                srr1 = power9_idle_stop(psscr);
 
-       } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+       } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
+                  (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
                srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
        } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
                   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
                        max_residency_ns = residency_ns[i];
                        pnv_deepest_stop_psscr_val = psscr_val[i];
                        pnv_deepest_stop_psscr_mask = psscr_mask[i];
+                       pnv_deepest_stop_flag = flags[i];
                        deepest_stop_found = true;
                }
 
index 7317b3108a88859a91523c45f1e52c08cb22fdc4..2eb8ff0d6fca443543c32ac80ff690b4b67be1ef 100644 (file)
@@ -47,10 +47,9 @@ struct mmu_table_batch {
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
-                                 struct mm_struct *mm,
-                                 unsigned long start,
-                                 unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
-                                 unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+       }
+
        tlb_flush_mmu(tlb);
 }
 
index 46e0d635e36f711aff9a88c45955905d7fbf3cc2..51a8bc967e75f1e3c96a70783e9da439310edbcb 100644 (file)
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
-       if (tlb->fullmm)
+       if (tlb->fullmm || force)
                flush_tlb_mm(tlb->mm);
 
        /* keep the page table cache within bounds */
index 1d8321c827a8821bb4e9f4989eb883cd761370db..1b1286d0506910c0f9a92ab6af14e272dd008d61 100644 (file)
 #define SUN4V_CHIP_NIAGARA5    0x05
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
+#define SUN4V_CHIP_SPARC_M8    0x08
 #define SUN4V_CHIP_SPARC64X    0x8a
 #define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
+/*
+ * The following CPU_ID_xxx constants are used
+ * to identify the CPU type in the setup phase
+ * (see head_64.S)
+ */
+#define CPU_ID_NIAGARA1                ('1')
+#define CPU_ID_NIAGARA2                ('2')
+#define CPU_ID_NIAGARA3                ('3')
+#define CPU_ID_NIAGARA4                ('4')
+#define CPU_ID_NIAGARA5                ('5')
+#define CPU_ID_M6              ('6')
+#define CPU_ID_M7              ('7')
+#define CPU_ID_M8              ('8')
+#define CPU_ID_SONOMA1         ('N')
+
 #ifndef __ASSEMBLY__
 
 enum ultra_tlb_layout {
index 493e023a468a919c61d77451e43e0a4a2e414bbe..ef4f18f7a67402ed8baceb2ea05ee7f6368cc404 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_M8:
+               sparc_cpu_type = "SPARC-M8";
+               sparc_fpu_type = "SPARC-M8 integrated FPU";
+               sparc_pmu_type = "sparc-m8";
+               break;
+
        case SUN4V_CHIP_SPARC_SN:
                sparc_cpu_type = "SPARC-SN";
                sparc_fpu_type = "SPARC-SN integrated FPU";
index 45c820e1cba5d949ff936f15392ca3c0c8578a34..90d550bbfeefe484f1560940f111235f26332d7a 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
index 41a4073286671eff51f275bfca4ae6d9d01db74d..78e0211753d28f14f955af865704248b1e5daf24 100644 (file)
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
         nop
 
 70:    ldub    [%g1 + 7], %g2
-       cmp     %g2, '3'
+       cmp     %g2, CPU_ID_NIAGARA3
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA3, %g4
-       cmp     %g2, '4'
+       cmp     %g2, CPU_ID_NIAGARA4
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA4, %g4
-       cmp     %g2, '5'
+       cmp     %g2, CPU_ID_NIAGARA5
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA5, %g4
-       cmp     %g2, '6'
+       cmp     %g2, CPU_ID_M6
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M6, %g4
-       cmp     %g2, '7'
+       cmp     %g2, CPU_ID_M7
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
-       cmp     %g2, 'N'
+       cmp     %g2, CPU_ID_M8
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_M8, %g4
+       cmp     %g2, CPU_ID_SONOMA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
 91:    sethi   %hi(prom_cpu_compatible), %g1
        or      %g1, %lo(prom_cpu_compatible), %g1
        ldub    [%g1 + 17], %g2
-       cmp     %g2, '1'
+       cmp     %g2, CPU_ID_NIAGARA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA1, %g4
-       cmp     %g2, '2'
+       cmp     %g2, CPU_ID_NIAGARA2
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA2, %g4
        
@@ -600,6 +603,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_M8
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_SN
index 4d9c3e13c15056b5d60e7ccd266b36cfe29d2c00..150ee7d4b059a69e174dff7c7d16ff906f73e1ed 100644 (file)
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
-           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
+
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
+       case SUN4V_CHIP_SPARC_SN:
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
+               break;
+       default:
+               break;
+       }
 
        sun4v_hvapi_init();
 }
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
index fed73f14aa49befee59b93b0fcab02f65f7e10d2..afa0099f374852e0cf093088d942512008a45a68 100644 (file)
@@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
                        break;
                case SUN4V_CHIP_SPARC_M7:
                case SUN4V_CHIP_SPARC_SN:
-               default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
                        sparc64_va_hole_bottom = 0x0008000000000000UL;
                        max_phys_bits = 49;
                        break;
+               case SUN4V_CHIP_SPARC_M8:
+               default:
+                       /* M8 and later support 54-bit virtual addresses.
+                        * However, restricting M8 and above VA bits to 53
+                        * as 4-level page table cannot support more than
+                        * 53 VA bits.
+                        */
+                       sparc64_va_hole_top =    0xfff0000000000000UL;
+                       sparc64_va_hole_bottom = 0x0010000000000000UL;
+                       max_phys_bits = 51;
+                       break;
                }
        }
 
@@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
@@ -2313,6 +2324,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
index 600a2e9bfee2feea2a6dbc8b91d2a5a872d9d8d3..344d95619d0334659e6f4a9f3a5bff70ae95f67c 100644 (file)
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+               tlb->need_flush = 1;
+       }
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index 6b16ead1da5871abcef5b2233733f281158596a8..ad9749463d4fa9a382afa7f24587bbbe3a2efcc9 100644 (file)
@@ -875,6 +875,56 @@ static void print_version(void)
                printk(KERN_INFO "%s", version);
 }
 
+struct vdc_check_port_data {
+       int     dev_no;
+       char    *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+       struct vio_dev *vdev = to_vio_dev(dev);
+       struct vdc_check_port_data *port_data;
+
+       port_data = (struct vdc_check_port_data *)arg;
+
+       if ((vdev->dev_no == port_data->dev_no) &&
+           (!(strcmp((char *)&vdev->type, port_data->type))) &&
+               dev_get_drvdata(dev)) {
+               /* This device has already been configured
+                * by vdc_port_probe()
+                */
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+       struct vdc_check_port_data port_data;
+       struct device *dev;
+
+       port_data.dev_no = vdev->dev_no;
+       port_data.type = (char *)&vdev->type;
+
+       dev = device_find_child(vdev->dev.parent, &port_data,
+                               vdc_device_probed);
+
+       if (dev)
+               return true;
+
+       return false;
+}
+
 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 {
        struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto err_out_release_mdesc;
        }
 
+       /* Check if this device is part of an mpgroup */
+       if (vdc_port_mpgroup_check(vdev)) {
+               printk(KERN_WARNING
+                       "VIO: Ignoring extra vdisk port %s",
+                       dev_name(&vdev->dev));
+               goto err_out_release_mdesc;
+       }
+
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        err = -ENOMEM;
        if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        if (err)
                goto err_out_free_tx_ring;
 
+       /* Note that the device driver_data is used to determine
+        * whether the port has been probed.
+        */
        dev_set_drvdata(&vdev->dev, port);
 
        mdesc_release(hp);
index 856d5dc02451d44b59695127994017877cd02b38..3b1b6340ba13a2977ffd0a13424ce95322f67f0e 100644 (file)
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct zram *zram = dev_to_zram(dev);
-       char compressor[CRYPTO_MAX_ALG_NAME];
+       char compressor[ARRAY_SIZE(zram->compressor)];
        size_t sz;
 
        strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                return -EBUSY;
        }
 
-       strlcpy(zram->compressor, compressor, sizeof(compressor));
+       strcpy(zram->compressor, compressor);
        up_write(&zram->init_lock);
        return len;
 }
index 37b0698b7193e60be4107a8be107285e55c65877..42896a67aeae38325cbda2acb7ca655c1b43915c 100644 (file)
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
        return -1;
 }
 
+extern u32 pnv_get_supported_cpuidle_states(void);
 static int powernv_add_idle_states(void)
 {
        struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
        const char *names[CPUIDLE_STATE_MAX];
        u32 has_stop_states = 0;
        int i, rc;
+       u32 supported_flags = pnv_get_supported_cpuidle_states();
+
 
        /* Currently we have snooze statically defined */
 
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
        for (i = 0; i < dt_idle_states; i++) {
                unsigned int exit_latency, target_residency;
                bool stops_timebase = false;
+
+               /*
+                * Skip the platform idle state whose flag isn't in
+                * the supported_cpuidle_states flag mask.
+                */
+               if ((flags[i] & supported_flags) != flags[i])
+                       continue;
                /*
                 * If an idle state has exit latency beyond
                 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
index d7e219d2669daf01c935bd18c0836cb88a34e075..66fb40d0ebdbbec521499cd58cf2e1d55c195878 100644 (file)
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
 {
        struct sync_file *sync_file = file->private_data;
 
-       if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+       if (test_bit(POLL_ENABLED, &sync_file->flags))
                dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
        dma_fence_put(sync_file->fence);
        kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &sync_file->wq, wait);
 
-       if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+       if (list_empty(&sync_file->cb.node) &&
+           !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
                if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
                                           fence_check_cb_func) < 0)
                        wake_up_all(&sync_file->wq);
index 5c26488e7a2d7a0320ddf321375b8ff4c200185f..0529e500c5341ed5e0d93bb658cd42d9288b13ad 100644 (file)
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        /* port@2 is the output port */
        ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
-       if (ret)
+       if (ret && ret != -ENODEV)
                return ret;
 
        /* Shut down GPIO is optional */
index 5bd93169dac2059a0981cc2f24b8c9032447ba9e..6463fc2c736fd4db5881a259b21848328b7f6cea 100644 (file)
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
                if (ret)
                        return ret;
 
-               if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
-                       DRM_ERROR("relocation %u outside object", i);
+               if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+                       DRM_ERROR("relocation %u outside object\n", i);
                        return -EINVAL;
                }
 
index d48fd7c918f880df0b3a27da5e8fa4f09c011b04..73217c281c9a87e51ac2a3d8ddf235b227ba2a1d 100644 (file)
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
        struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
        struct drm_gem_object *obj;
        struct drm_framebuffer *fb;
        int i;
        int ret;
 
-       for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+       for (i = 0; i < info->num_planes; i++) {
+               unsigned int height = (i == 0) ? mode_cmd->height :
+                                    DIV_ROUND_UP(mode_cmd->height, info->vsub);
+               unsigned long size = height * mode_cmd->pitches[i] +
+                                    mode_cmd->offsets[i];
+
                obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                }
 
                exynos_gem[i] = to_exynos_gem(obj);
+
+               if (size > exynos_gem[i]->size) {
+                       i++;
+                       ret = -EINVAL;
+                       goto err;
+               }
        }
 
        fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
index 700050556242480e6fbf8eb4a8d97c6307e9390d..1648887d3f55248cf055524a2f0e341062f0cd8d 100644 (file)
@@ -46,6 +46,8 @@
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
                ((a)->lrca == (b)->lrca))
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
 static int context_switch_events[] = {
        [RCS] = RCS_AS_CONTEXT_SWITCH,
        [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_execlist *execlist =
-               &vgpu->execlist[workload->ring_id];
+       int ring_id = workload->ring_id;
+       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
        struct intel_vgpu_workload *next_workload;
-       struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+       struct list_head *next = workload_q_head(vgpu, ring_id)->next;
        bool lite_restore = false;
        int ret;
 
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
        release_shadow_batch_buffer(workload);
        release_shadow_wa_ctx(&workload->wa_ctx);
 
-       if (workload->status || vgpu->resetting)
+       if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+               /* if workload->status is not successful means HW GPU
+                * has occurred GPU hang or something wrong with i915/GVT,
+                * and GVT won't inject context switch interrupt to guest.
+                * So this error is a vGPU hang actually to the guest.
+                * According to this we should emunlate a vGPU hang. If
+                * there are pending workloads which are already submitted
+                * from guest, we should clean them up like HW GPU does.
+                *
+                * if it is in middle of engine resetting, the pending
+                * workloads won't be submitted to HW GPU and will be
+                * cleaned up during the resetting process later, so doing
+                * the workload clean up here doesn't have any impact.
+                **/
+               clean_workloads(vgpu, ENGINE_MASK(ring_id));
                goto out;
+       }
 
-       if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+       if (!list_empty(workload_q_head(vgpu, ring_id))) {
                struct execlist_ctx_descriptor_format *this_desc, *next_desc;
 
                next_workload = container_of(next,
index 5dad9298b2d5dbbe7b626895806e6008047bbd6a..a26c1705430eb2134d002b68ddcb26d272684bd9 100644 (file)
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct intel_gvt_device_info *info = &gvt->device_info;
        struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
        struct intel_gvt_mmio_info *e;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
        unsigned long size, crc32_start;
-       int i;
+       int i, j;
        int ret;
 
        size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
                *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 
+       for (i = 0; i < num; i++, block++) {
+               for (j = 0; j < block->size; j += 4)
+                       *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+                               I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+                                                       block->offset) + j));
+       }
+
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
        crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
index 3a74e79eac2f6c13fef32e1611b539db7b8f46c3..2964a4d01a66da5d2fb06d256ed35fa83fa96a38 100644 (file)
@@ -149,7 +149,7 @@ struct intel_vgpu {
        bool active;
        bool pv_notified;
        bool failsafe;
-       bool resetting;
+       unsigned int resetting_eng;
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
 
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
        unsigned long vgpu_allocated_fence_num;
 };
 
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+       unsigned int device;
+       i915_reg_t   offset;
+       unsigned int size;
+       gvt_mmio_func read;
+       gvt_mmio_func write;
+};
+
 #define INTEL_GVT_MMIO_HASH_BITS 11
 
 struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
 
+       struct gvt_mmio_block *mmio_block;
+       unsigned int num_mmio_block;
+
        DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
        unsigned int num_tracked_mmio;
 };
index 17febe830ff6984e06bb81cb91601a76b67d5f2a..feed9921b3b3eb05e6e8dce5e1b510f4d6fc9479 100644 (file)
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        return 0;
 }
 
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
-       unsigned int device;
-       i915_reg_t   offset;
-       unsigned int size;
-       gvt_mmio_func read;
-       gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
-       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
-       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
-       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
-               pvinfo_mmio_read, pvinfo_mmio_write},
-       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
                                              unsigned int offset)
 {
        unsigned long device = intel_gvt_get_device_type(gvt);
-       struct gvt_mmio_block *block = gvt_mmio_blocks;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+       for (i = 0; i < num; i++, block++) {
                if (!(device & block->device))
                        continue;
                if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
        gvt->mmio.mmio_attribute = NULL;
 }
 
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+               pvinfo_mmio_read, pvinfo_mmio_write},
+       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
 /**
  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                        goto err;
        }
 
+       gvt->mmio.mmio_block = mmio_blocks;
+       gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
        gvt_dbg_mmio("traced %u virtual mmio registers\n",
                     gvt->mmio.num_tracked_mmio);
        return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
        gvt_mmio_func func;
        int ret;
 
-       if (WARN_ON(bytes > 4))
+       if (WARN_ON(bytes > 8))
                return -EINVAL;
 
        /*
index 4f7057d62d88b393ce77670f9100bc2d3b246014..22e08eb2d0b7c66faf01741656fb33d1535925f3 100644 (file)
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                i915_gem_request_put(fetch_and_zero(&workload->req));
 
-               if (!workload->status && !vgpu->resetting) {
+               if (!workload->status && !(vgpu->resetting_eng &
+                                          ENGINE_MASK(ring_id))) {
                        update_guest_context(workload);
 
                        for_each_set_bit(event, workload->pending_events,
index 90c14e6e3ea06b8de36d90284132659eb80f72c6..3deadcbd5a245c039169f1a10c6c91cc791d3a66 100644 (file)
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+       unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
 
        gvt_dbg_core("------------------------------------------\n");
        gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
                     vgpu->id, dmlr, engine_mask);
-       vgpu->resetting = true;
+
+       vgpu->resetting_eng = resetting_eng;
 
        intel_vgpu_stop_schedule(vgpu);
        /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                mutex_lock(&gvt->lock);
        }
 
-       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+       intel_vgpu_reset_execlist(vgpu, resetting_eng);
 
        /* full GPU reset or device model level reset */
        if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                }
        }
 
-       vgpu->resetting = false;
+       vgpu->resetting_eng = 0;
        gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
        gvt_dbg_core("------------------------------------------\n");
 }
index 1032f98add112a66a19fb186a2b28de773caadf8..77fb3980813143d2d9e3432c0ebb994a4bcad032 100644 (file)
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
                return true;
 
        case MUTEX_TRYLOCK_FAILED:
+               *unlock = false;
+               preempt_disable();
                do {
                        cpu_relax();
                        if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-       case MUTEX_TRYLOCK_SUCCESS:
                                *unlock = true;
-                               return true;
+                               break;
                        }
                } while (!need_resched());
+               preempt_enable();
+               return *unlock;
 
-               return false;
+       case MUTEX_TRYLOCK_SUCCESS:
+               *unlock = true;
+               return true;
        }
 
        BUG();
index 9cd22f83b0cfaee680ed06c5bde67db6fc89d0fa..f33d90226704108e71ee5662e01977e32b627fcb 100644 (file)
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
        u32 *cs;
        int i;
 
-       cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+       cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+       *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
 
        *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
        *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
index 306c6b06b330bfc57f75a992c60468cb9d88e81c..17c4ae7e4e7c51e85de97cb8c803b280115de8cf 100644 (file)
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                }
 
                /* Program the max register to clamp values > 1.0. */
+               i = lut_size - 1;
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
                           drm_color_lut_extract(lut[i].red, 16));
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
index 96c2cbd81869e7e55dedc8ce09f53938466bee70..593349be8b9dfce328d20ea3ca5d1b22e41da320 100644 (file)
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 
        if (i915.invert_brightness > 0 ||
            dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-               return panel->backlight.max - val;
+               return panel->backlight.max - val + panel->backlight.min;
        }
 
        return val;
index b638d192ce5e046cc29e87459cc36e988e498ea8..99d39b2aefa675941d42c86b3c9b5a4d2cda937b 100644 (file)
@@ -5,7 +5,7 @@ config DRM_MSM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
-       select QCOM_MDT_LOADER
+       select QCOM_MDT_LOADER if ARCH_QCOM
        select REGULATOR
        select DRM_KMS_HELPER
        select DRM_PANEL
index b4b54f1c24bc1995a032493838d99e8e31dff9e9..f9eae03aa1dcaef072974d60216fb6b09ef81e66 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/qcom_scm.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
 #include <linux/soc/qcom/mdt_loader.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
 
 #define GPU_PAS_ID 13
 
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 {
        const struct firmware *fw;
+       struct device_node *np;
+       struct resource r;
        phys_addr_t mem_phys;
        ssize_t mem_size;
        void *mem_region = NULL;
        int ret;
 
+       if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+               return -EINVAL;
+
+       np = of_get_child_by_name(dev->of_node, "zap-shader");
+       if (!np)
+               return -ENODEV;
+
+       np = of_parse_phandle(np, "memory-region", 0);
+       if (!np)
+               return -EINVAL;
+
+       ret = of_address_to_resource(np, 0, &r);
+       if (ret)
+               return ret;
+
+       mem_phys = r.start;
+       mem_size = resource_size(&r);
+
        /* Request the MDT file for the firmware */
        ret = request_firmware(&fw, fwname, dev);
        if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
        }
 
        /* Allocate memory for the firmware image */
-       mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+       mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
        if (!mem_region) {
                ret = -ENOMEM;
                goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
                DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 
 out:
+       if (mem_region)
+               memunmap(mem_region);
+
        release_firmware(fw);
 
        return ret;
 }
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
-       return -ENODEV;
-}
-#endif
 
 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        gpu->funcs->flush(gpu);
 }
 
-struct a5xx_hwcg {
+static const struct {
        u32 offset;
        u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
 };
 
-static const struct {
-       int (*test)(struct adreno_gpu *gpu);
-       const struct a5xx_hwcg *regs;
-       unsigned int count;
-} a5xx_hwcg_regs[] = {
-       { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
-               const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
 {
        unsigned int i;
 
-       for (i = 0; i < count; i++)
-               gpu_write(gpu, regs[i].offset, regs[i].value);
+       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+               gpu_write(gpu, a5xx_hwcg[i].offset,
+                       state ? a5xx_hwcg[i].value : 0);
 
-       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
-       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
-               if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
-                       _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
-                               a5xx_hwcg_regs[i].count);
-                       return;
-               }
-       }
+       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
 }
 
 static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
        return ret;
 }
 
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
-       struct device_node *node;
-       int ret;
-
-       if (dev->parent)
-               return 0;
-
-       /* Find the sub-node for the zap shader */
-       node = of_get_child_by_name(parent->of_node, "zap-shader");
-       if (!node) {
-               DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
-               return -ENODEV;
-       }
-
-       dev->parent = parent;
-       dev->of_node = node;
-       dev_set_name(dev, "adreno_zap_shader");
-
-       ret = device_register(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
-               goto out;
-       }
-
-       ret = of_reserved_mem_device_init(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
-               device_unregister(dev);
-       }
-
-out:
-       if (ret)
-               dev->parent = NULL;
-
-       return ret;
-}
-
 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 {
        static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
                return -ENODEV;
        }
 
-       ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
-       if (!ret)
-               ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
-                       adreno_gpu->info->zapfw);
+       ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
 
        loaded = !ret;
 
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
 
        /* Enable HWCG */
-       a5xx_enable_hwcg(gpu);
+       a5xx_set_hwcg(gpu, true);
 
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
 
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 
        DBG("%s", gpu->name);
 
-       if (a5xx_gpu->zap_dev.parent)
-               device_unregister(&a5xx_gpu->zap_dev);
-
        if (a5xx_gpu->pm4_bo) {
                if (a5xx_gpu->pm4_iova)
                        msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
        0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
-       0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
-       0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
-       0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
-       0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
-       0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
-       0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
-       0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
-       0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
-       0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
-       0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
-       0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
-       0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
-       0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
-       0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
-       0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
-       0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
-       0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
-       0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
-       0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
-       0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
-       0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
-       0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
-       0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
-       0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
-       ~0
+       0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+       0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+       0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+       0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+       0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+       0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+       0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+       0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+       0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+       0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+       0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+       0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+       0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+       0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+       0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+       0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+       0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+       0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+       0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+       0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+       0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+       0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+       0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+       0xB9A0, 0xB9BF, ~0
 };
 
 static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
 {
        seq_printf(m, "status:   %08x\n",
                        gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+       /*
+        * Temporarily disable hardware clock gating before going into
+        * adreno_show to avoid issues while reading the registers
+        */
+       a5xx_set_hwcg(gpu, false);
        adreno_show(gpu, m);
+       a5xx_set_hwcg(gpu, true);
 }
 #endif
 
index 6638bc85645dbad4adf3689bd7d9bae9441173c2..1137092241d593c34e4607e3c723acfb74861972 100644 (file)
@@ -36,8 +36,6 @@ struct a5xx_gpu {
        uint32_t gpmu_dwords;
 
        uint32_t lm_leakage;
-
-       struct device zap_dev;
 };
 
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
 }
 
 bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 
 #endif /* __A5XX_GPU_H__ */
index f1ab2703674a2f5d4f533828bb6c8b49df24f571..7414c6bbd582e9597e502305885f0dec909859be 100644 (file)
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                *value = adreno_gpu->base.fast_rate;
                return 0;
        case MSM_PARAM_TIMESTAMP:
-               if (adreno_gpu->funcs->get_timestamp)
-                       return adreno_gpu->funcs->get_timestamp(gpu, value);
+               if (adreno_gpu->funcs->get_timestamp) {
+                       int ret;
+
+                       pm_runtime_get_sync(&gpu->pdev->dev);
+                       ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+                       pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+                       return ret;
+               }
                return -EINVAL;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
index 9e9c5696bc03547b813ecf2ae56c535265e64bcd..c7b612c3d7717a02d8d64be21dea68f183163917 100644 (file)
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
        struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       int ret;
+
+       ret = dsi_calc_clk_rate(msm_host);
+       if (ret) {
+               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+               return;
+       }
 
        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
        clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                                        struct drm_display_mode *mode)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-       int ret;
 
        if (msm_host->mode) {
                drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                return -ENOMEM;
        }
 
-       ret = dsi_calc_clk_rate(msm_host);
-       if (ret) {
-               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
-               return ret;
-       }
-
        return 0;
 }
 
index cb5415d6c04b7ab6e1e80503d26b32891a934dee..735a87a699fafafb99b179752e0e7f3c19491389 100644 (file)
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
        struct mdp5_ctl *ctl = mdp5_cstate->ctl;
        uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
        unsigned long flags;
-       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
-       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
        int i, plane_cnt = 0;
        bool bg_alpha_enabled = false;
        u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!handle) {
                DBG("Cursor off");
                cursor_enable = false;
+               mdp5_enable(mdp5_kms);
                goto set_cursor;
        }
 
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
        crtc_flush(crtc, flush_mask);
 
 end:
+       mdp5_disable(mdp5_kms);
        if (old_bo) {
                drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
                /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
                        MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        crtc_flush(crtc, flush_mask);
 
+       mdp5_disable(mdp5_kms);
+
        return 0;
 }
 
index 97f3294fbfc6f9d26f453dac36d5dbb3cb00e93a..70bef51245af89d5bbb292f4495f2fd79dcc9e33 100644 (file)
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
        struct mdp5_interface *intf = mdp5_encoder->intf;
 
        if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
-               mdp5_cmd_encoder_disable(encoder);
+               mdp5_cmd_encoder_enable(encoder);
        else
                mdp5_vid_encoder_enable(encoder);
 }
index 5d13fa5381ee37705a0c282bf023b4782fc19268..1c603aef3c59cdff286ce38e84c3e6a4745dd0c2 100644 (file)
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name, bool mandatory)
 {
        struct device *dev = &pdev->dev;
-       struct clk *clk = devm_clk_get(dev, name);
+       struct clk *clk = msm_clk_get(pdev, name);
        if (IS_ERR(clk) && mandatory) {
                dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
                return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
        }
 
        /* mandatory clocks: */
-       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
        if (ret)
                goto fail;
 
        /* optional clocks: */
-       get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+       get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
 
        /* we need to set a default rate before enabling.  Set a safe
         * rate first, then figure out hw revision, and then set a
index fe3a4de1a4331ff86f0b4f0cc85a48b208bca3b5..61f39c86dd09e53a5860ce880b0f7955e5008c05 100644 (file)
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
        struct mdp5_hw_pipe *right_hwpipe;
        const struct mdp_format *format;
        uint32_t nplanes, config = 0;
-       struct phase_step step = { 0 };
-       struct pixel_ext pe = { 0 };
+       struct phase_step step = { { 0 } };
+       struct pixel_ext pe = { { 0 } };
        uint32_t hdecm = 0, vdecm = 0;
        uint32_t pix_format;
        unsigned int rotation;
index 65f35544c1ec8859018c2afb713fa5120fc43272..a0c60e738db8d7be5e841311832e82f0b45bd7fa 100644 (file)
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
                struct page **pages;
 
                vma = add_vma(obj, aspace);
-               if (IS_ERR(vma))
-                       return PTR_ERR(vma);
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
+                       goto unlock;
+               }
 
                pages = get_pages(obj);
                if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 
 fail:
        del_vma(vma);
-
+unlock:
        mutex_unlock(&msm_obj->lock);
        return ret;
 }
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        if (use_vram) {
                struct msm_gem_vma *vma;
                struct page **pages;
+               struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+               mutex_lock(&msm_obj->lock);
 
                vma = add_vma(obj, NULL);
+               mutex_unlock(&msm_obj->lock);
                if (IS_ERR(vma)) {
                        ret = PTR_ERR(vma);
                        goto fail;
index 6bfca74701410050b20d1a136ae5cdc4454b1306..8a75c0bd8a78b1481e30fdab63f2d14bfc64536d 100644 (file)
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
                struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
 {
        struct msm_gem_submit *submit;
-       uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
-               (nr_cmds * sizeof(submit->cmd[0]));
+       uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+               ((u64)nr_cmds * sizeof(submit->cmd[0]));
 
        if (sz > SIZE_MAX)
                return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+       if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
                ret = submit_fence_sync(submit);
                if (ret)
                        goto out;
index c36321bc87148864db09bd0af4fc38a39cb182f9..d34e331554f3903eaded86cf12fdd4a4ef24507a 100644 (file)
@@ -42,7 +42,7 @@ void
 msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma, struct sg_table *sgt)
 {
-       if (!vma->iova)
+       if (!aspace || !vma->iova)
                return;
 
        if (aspace->mmu) {
index c7c84d34d97e20308b926077e7ed4ce6e8d77281..88582af8bd89745b7c78332cf415dd0bc9f24e23 100644 (file)
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        /* Create output path objects for each VBIOS display path. */
        i = -1;
        while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+               if (ver < 0x40) /* No support for chipsets prior to NV50. */
+                       break;
                if (dcbE.type == DCB_OUTPUT_UNUSED)
                        continue;
                if (dcbE.type == DCB_OUTPUT_EOL)
index 5d450332c2fd79fd8c1052aca84d7d15c5db3ef8..2900f1410d959bc9f4002a6f0c38a3aae5a9c59a 100644 (file)
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int ret;
+       int ret, i;
 
        ret = pm_runtime_get_sync(vop->dev);
        if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
        }
 
        memcpy(vop->regs, vop->regsbak, vop->len);
+       /*
+        * We need to make sure that all windows are disabled before we
+        * enable the crtc. Otherwise we might try to scan from a destroyed
+        * buffer later.
+        */
+       for (i = 0; i < vop->data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win = vop_win->data;
+
+               spin_lock(&vop->reg_lock);
+               VOP_WIN_SET(vop, win, enable, 0);
+               spin_unlock(&vop->reg_lock);
+       }
+
        vop_cfg_done(vop);
 
        /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
 static void vop_crtc_disable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int i;
 
        WARN_ON(vop->event);
 
        rockchip_drm_psr_deactivate(&vop->crtc);
 
-       /*
-        * We need to make sure that all windows are disabled before we
-        * disable that crtc. Otherwise we might try to scan from a destroyed
-        * buffer later.
-        */
-       for (i = 0; i < vop->data->win_size; i++) {
-               struct vop_win *vop_win = &vop->win[i];
-               const struct vop_win_data *win = vop_win->data;
-
-               spin_lock(&vop->reg_lock);
-               VOP_WIN_SET(vop, win, enable, 0);
-               spin_unlock(&vop->reg_lock);
-       }
-
-       vop_cfg_done(vop);
-
        drm_crtc_vblank_off(crtc);
 
        /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
         * Src.x1 can be odd when do clip, but yuv plane start point
         * need align with 2 pixel.
         */
-       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+               DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, format, format);
-       VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+       VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
        if (is_yuv_support(fb->format->format)) {
                int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
 
                dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
-               VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+               VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
                VOP_WIN_SET(vop, win, uv_mst, dma_addr);
        }
 
index 9979fd0c22821d7efa3d7054468e0914619e0692..27eefbfcf3d05f3ad03e3b9f6fe2ae56f68e7684 100644 (file)
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
 
        act_height = (src_h + vskiplines - 1) / vskiplines;
 
+       if (act_height == dst_h)
+               return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
        return GET_SCL_FT_BILI_DN(act_height, dst_h);
 }
 
index 2c4817fb08902427df09223b7ab4046241fc6c77..8fe5b184b4e8a945d68c201a3a29be531594e2d2 100644 (file)
@@ -7,7 +7,6 @@ config DRM_STM
        select DRM_PANEL
        select VIDEOMODE_HELPERS
        select FB_PROVIDE_GET_FB_UNMAPPED_AREA
-       default y
 
        help
          Enable support for the on-chip display controller on
index 1006b230b236f1c977d1c1a7d9bf32a268b7e263..65fa29591d21641fd1bd4e4484d8daeef56f9bdb 100644 (file)
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
 
 config I2C_VERSATILE
        tristate "ARM Versatile/Realview I2C bus support"
-       depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+       depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
        select I2C_ALGOBIT
        help
          Say yes if you want to support the I2C serial bus on ARMs Versatile
index 2ea6d0d25a01a33069bce293ab6858947a0cb01f..143a8fd582b4aeb905ea25b416261a5c1f44a6e9 100644 (file)
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        }
 
        acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+       /* Some broken DSTDs use 1MiHz instead of 1MHz */
+       if (acpi_speed == 1048576)
+               acpi_speed = 1000000;
        /*
         * Find bus speed from the "clock-frequency" device property, ACPI
         * or by using fast mode if neither is set.
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        if (dev->clk_freq != 100000 && dev->clk_freq != 400000
            && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
                dev_err(&pdev->dev,
-                       "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+                       "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+                       dev->clk_freq);
                ret = -EINVAL;
                goto exit_reset;
        }
index 4842ec3a5451ed479446fc13352405aca45697d2..a9126b3cda61bc95f6a9d1282821ab7552484534 100644 (file)
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
                dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
 }
 
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       if (!(client && matches))
+               return NULL;
+
+       return acpi_match_device(matches, &client->dev);
+}
+
 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
                                           void *data, void **return_value)
 {
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
 
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
 {
        struct i2c_adapter *adapter = i2c_verify_adapter(dev);
 
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
        return ACPI_HANDLE(dev) == (acpi_handle)data;
 }
 
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
 {
        return ACPI_COMPANION(dev) == data;
 }
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
        struct device *dev;
 
        dev = bus_find_device(&i2c_bus_type, NULL, handle,
-                             i2c_acpi_match_adapter);
+                             i2c_acpi_find_match_adapter);
        return dev ? i2c_verify_adapter(dev) : NULL;
 }
 
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+       dev = bus_find_device(&i2c_bus_type, NULL, adev,
+                             i2c_acpi_find_match_device);
        return dev ? i2c_verify_client(dev) : NULL;
 }
 
index c89dac7fd2e7b793217119f2ccee849cf75ebcfe..12822a4b8f8f09b5c080f7338a89e0ea00cbb4f2 100644 (file)
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
         * Tree match table entry is supplied for the probing device.
         */
        if (!driver->id_table &&
+           !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
            !i2c_of_match_device(dev->driver->of_match_table, client))
                return -ENODEV;
 
index 3b63f5e5b89cbda662a580c387bfa2d23e2ebcef..3d3d9bf02101bddf06fc6597f107cc3ac8e3beb8 100644 (file)
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
 int i2c_check_7bit_addr_validity_strict(unsigned short addr);
 
 #ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client);
 void i2c_acpi_register_devices(struct i2c_adapter *adap);
 #else /* CONFIG_ACPI */
 static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+                     struct i2c_client *client)
+{
+       return NULL;
+}
 #endif /* CONFIG_ACPI */
 extern struct notifier_block i2c_acpi_notifier;
 
index 2c64d0e0740f0db0c4427af9d6602bc8aaa0ea24..17121329bb793a615e8969a15327e3f07035cdbb 100644 (file)
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
          different sets of pins at run-time.
 
          This driver can also be built as a module. If so, the module will be
-         called pinctrl-i2cmux.
+         called i2c-mux-pinctrl.
 
 config I2C_MUX_REG
        tristate "Register-based I2C multiplexer"
index b97188acc4f1006185a5f8cb4fb3ee0e5cf735cf..2d80fa8a0634aba34b366609d8bcc50f432bb31c 100644 (file)
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
 
        if (using_legacy_binding) {
                ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+               /*
+                * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+                * will allocate/initialise a new one. Thus we need to update fwspec for
+                * later use.
+                */
+               fwspec = dev->iommu_fwspec;
                if (ret)
                        goto out_free;
        } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
index e5938c791330c9be1203c2698bc511e4f98aaea4..f1bbfd389367ff4530137be199c4063c65f97f5c 100644 (file)
@@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
                 * from being accepted.
                 */
                card = md->queue.card;
+               spin_lock_irq(md->queue.queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+               spin_unlock_irq(md->queue.queue->queue_lock);
                blk_set_queue_dying(md->queue.queue);
                mmc_cleanup_queue(&md->queue);
                if (md->disk->flags & GENHD_FL_UP) {
index 4ffea14b7eb645d92a91d62907d64c97cf8a9998..2bae69e39544452dc323a9a3bd15ae9b3e7de1b9 100644 (file)
@@ -1289,7 +1289,7 @@ out_err:
 static int mmc_select_hs400es(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
-       int err = 0;
+       int err = -EINVAL;
        u8 val;
 
        if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
index 04ff3c97a535143933acec93caa8483a43f633c6..2ab4788d021f0512082c6bd67d1edaa57ba61379 100644 (file)
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        mmc->max_seg_size = mmc->max_req_size;
 
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
-                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
        mmc->caps |= mmc_pdata(host)->caps;
        if (mmc->caps & MMC_CAP_8_BIT_DATA)
index 5333601f855f88529c04e003eae5e3d19aa59f6d..dc3052751bc13ed2248c218de01849d865dbe952 100644 (file)
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                        p = (char *)&dev->stats;
                else
                        p = (char *)priv;
+
+               if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+                       continue;
+
                p += s->stat_offset;
                data[j] = *(unsigned long *)p;
                j++;
index de8156c6b2925741534a45a6c3a28a3afe9d1ad6..2bbda71818adb022853964dd6d51a14c26f7cd19 100644 (file)
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
        if (data[IFLA_GENEVE_ID]) {
                __u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
 
-               if (vni >= GENEVE_VID_MASK)
+               if (vni >= GENEVE_N_VID)
                        return -ERANGE;
        }
 
index af0cc3456dc1b48b1325c06c5edd2ca8cc22a640..b4b7eab2940024024c46ead23d6b1c415fa146f7 100644 (file)
@@ -4259,6 +4259,41 @@ int pci_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_reset_function);
 
+/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device.  This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset.  It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+       int rc;
+
+       rc = pci_probe_reset_function(dev);
+       if (rc)
+               return rc;
+
+       pci_dev_save_and_disable(dev);
+
+       rc = __pci_reset_function_locked(dev);
+
+       pci_dev_restore(dev);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
 /**
  * pci_try_reset_function - quiesce and reset a PCI device function
  * @dev: PCI device to reset
index 20f1b44939944614ff270c757fc7152f901e9f09..04e929fd0ffee494cc744cf495e5acd9e437ea6b 100644 (file)
@@ -1547,6 +1547,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
        },
+       {
+               .ident = "HP Chromebook 11 G5 (Setzer)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+               },
+       },
        {
                .ident = "Acer Chromebook R11 (Cyan)",
                .matches = {
index 4d4ef42a39b5faaa1969d20a5aeeedffef90074c..86c4b3fab7b0ea8f0abfdf36e5a2b035e024ec66 100644 (file)
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
 
 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
 static const unsigned int mrfld_pwm0_pins[] = { 144 };
 static const unsigned int mrfld_pwm1_pins[] = { 145 };
 static const unsigned int mrfld_pwm2_pins[] = { 132 };
index f024e25787fc603c3469ea75de53452743d6af16..0c6d7812d6fd981b95f9d526cb9d6645e4b7855d 100644 (file)
@@ -37,7 +37,7 @@
 #define IRQ_STATUS     0x10
 #define IRQ_WKUP       0x18
 
-#define NB_FUNCS 2
+#define NB_FUNCS 3
 #define GPIO_PER_REG   32
 
 /**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
                .funcs = {_func1, "gpio"}       \
        }
 
+#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
+       {                                       \
+               .name = _name,                  \
+               .start_pin = _start,            \
+               .npins = _nr,                   \
+               .reg_mask = _mask,              \
+               .val = {_v1, _v2, _v3}, \
+               .funcs = {_f1, _f2, "gpio"}     \
+       }
+
 #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
                      _f1, _f2)                         \
        {                                               \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
        PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
        PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
        PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
-       PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+       PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
        PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
        PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
        PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
        PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
-       PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"),
+       PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+                      "mii", "mii_err"),
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
 };
 
 const struct armada_37xx_pin_data armada_37xx_pin_sb = {
-       .nr_pins = 29,
+       .nr_pins = 30,
        .name = "GPIO2",
        .groups = armada_37xx_sb_groups,
        .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
 {
        int f;
 
-       for (f = 0; f < NB_FUNCS; f++)
+       for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
                if (!strcmp(grp->funcs[f], func))
                        return f;
 
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
                for (j = 0; j < grp->extra_npins; j++)
                        grp->pins[i+j] = grp->extra_pin + j;
 
-               for (f = 0; f < NB_FUNCS; f++) {
+               for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
                        int ret;
                        /* check for unique functions and count groups */
                        ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
                        struct armada_37xx_pin_group *gp = &info->groups[g];
                        int f;
 
-                       for (f = 0; f < NB_FUNCS; f++) {
+                       for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
                                if (strcmp(gp->funcs[f], name) == 0) {
                                        *groups = gp->name;
                                        groups++;
index 159580c04b14b138c5ec78b6768db2224d63fb58..47a392bc73c821203abe1c7cb1e966db9a40a3ba 100644 (file)
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
                  SUNXI_FUNCTION_VARIANT(0x3, "emac",   /* ETXD1 */
                                         PINCTRL_SUN7I_A20),
                  SUNXI_FUNCTION(0x4, "keypad"),        /* IN6 */
+                 SUNXI_FUNCTION(0x5, "sim"),           /* DET */
                  SUNXI_FUNCTION_IRQ(0x6, 16),          /* EINT16 */
                  SUNXI_FUNCTION(0x7, "csi1")),         /* D16 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
index a433a306a2d06ae11dd2a8c61830e0c76c4e7862..c75e094b2d90779f92570a534fd1c8d53e6a9e97 100644 (file)
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
 static const int usb1_muxvals[] = {0, 0};
 static const unsigned usb2_pins[] = {184, 185};
 static const int usb2_muxvals[] = {0, 0};
-static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_pins[] = {187, 188};
 static const int usb3_muxvals[] = {0, 0};
 static const unsigned port_range0_pins[] = {
        300, 301, 302, 303, 304, 305, 306, 307,         /* PORT0x */
index 787e3967bd5c5741aeb7a2cb96c18e38901092ed..f828ee340a98238052448d15a5f7604c286926dd 100644 (file)
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        struct zx_pinctrl_soc_info *info = zpctl->info;
        const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
        struct zx_pin_data *data = pindesc->drv_data;
-       struct zx_mux_desc *mux = data->muxes;
-       u32 mask = (1 << data->width) - 1;
-       u32 offset = data->offset;
-       u32 bitpos = data->bitpos;
+       struct zx_mux_desc *mux;
+       u32 mask, offset, bitpos;
        struct function_desc *func;
        unsigned long flags;
        u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
        if (!data)
                return -EINVAL;
 
+       mux = data->muxes;
+       mask = (1 << data->width) - 1;
+       offset = data->offset;
+       bitpos = data->bitpos;
+
        func = pinmux_generic_get_function(pctldev, func_selector);
        if (!func)
                return -EINVAL;
index c8989c62a2621b88cf8b9d0c3001a37a31d5e151..858fefd67ebed72dbe59cc4e07806be1b8895577 100644 (file)
@@ -1150,3 +1150,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
                        PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+       /*
+        * Our dear uPD72020{1,2} friend only partially resets when
+        * asked to via the XHCI interface, and may end up doing DMA
+        * at the wrong addresses, as it keeps the top 32bit of some
+        * addresses from its previous programming under obscure
+        * circumstances.
+        * Give it a good wack at probe time. Unfortunately, this
+        * needs to happen before we've had a chance to discover any
+        * quirk, or the system will be in a rather bad state.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+           (pdev->device == 0x0014 || pdev->device == 0x0015))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
index 6559944801987728a1db6ba31f09db51b92362e3..5582cbafecd4c1a3ddc5443d6cc9182a9b9bc89f 100644 (file)
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
 #else
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
index 5b0fa553c8bc940e88a6db731cf6dfeb0c9fb971..8071c8fdd15e741b008af64075cda3c87072bfb4 100644 (file)
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        driver = (struct hc_driver *)id->driver_data;
 
+       /* For some HW implementation, a XHCI reset is just not enough... */
+       if (usb_xhci_needs_pci_reset(dev)) {
+               dev_info(&dev->dev, "Resetting\n");
+               if (pci_reset_function_locked(dev))
+                       dev_warn(&dev->dev, "Reset failed");
+       }
+
        /* Prevent runtime suspending between USB-2 and USB-3 initialization */
        pm_runtime_get_noresume(&dev->dev);
 
index ff01bed7112f1566ca13b3e17330851bf02fec05..1e784adb89b17534ce31751b546ef20801b2427f 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/efi.h>
 
 static bool request_mem_succeeded = false;
+static bool nowc = false;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+                       else if (!strcmp(this_opt, "nowc"))
+                               nowc = true;
                }
        }
 
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (nowc)
+               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+       else
+               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
                pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
index c166e0725be5dab13e9a685a93ea7ea9c23a3351..ba82f97fb42b2d10fdbebd227fcb7e5eb19dcbdc 100644 (file)
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
        imxfb_disable_controller(fbi);
 
        unregister_framebuffer(info);
-
+       fb_dealloc_cmap(&info->cmap);
        pdata = dev_get_platdata(&pdev->dev);
        if (pdata && pdata->exit)
                pdata->exit(fbi->pdev);
-
-       fb_dealloc_cmap(&info->cmap);
-       kfree(info->pseudo_palette);
-       framebuffer_release(info);
-
        dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
                    fbi->map_dma);
-
        iounmap(fbi->regs);
        release_mem_region(res->start, resource_size(res));
+       kfree(info->pseudo_palette);
+       framebuffer_release(info);
 
        return 0;
 }
index eecf695c16f41b6996520e47b8fa7e1b71efbabc..09e5bb013d28071c69b63b1968ef6eca44252f1e 100644 (file)
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
 
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
-       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
 
        core.pdev = pdev;
index 3ee4fdc3da9ec359ad847afa36354240329a2da6..ab60051be6e533eb167a72e590494f0a46e3a488 100644 (file)
@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
 {
        struct fuse_file *ff;
 
-       ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+       ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
        if (unlikely(!ff))
                return NULL;
 
@@ -609,7 +609,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
        struct fuse_io_priv *io = req->io;
        ssize_t pos = -1;
 
-       fuse_release_user_pages(req, !io->write);
+       fuse_release_user_pages(req, io->should_dirty);
 
        if (io->write) {
                if (req->misc.write.in.size != req->misc.write.out.size)
@@ -1316,7 +1316,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
-       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1346,6 +1345,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        inode_unlock(inode);
        }
 
+       io->should_dirty = !write && iter_is_iovec(iter);
        while (count) {
                size_t nres;
                fl_owner_t owner = current->files;
@@ -1360,7 +1360,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, should_dirty);
+                       fuse_release_user_pages(req, io->should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
@@ -1669,6 +1669,7 @@ err_nofile:
 err_free:
        fuse_request_free(req);
 err:
+       mapping_set_error(page->mapping, error);
        end_page_writeback(page);
        return error;
 }
index 1bd7ffdad593977013c1ddd233b2a91093471471..bd4d2a3e1ec1b8cc0af29bcb8c26f13d82708804 100644 (file)
@@ -249,6 +249,7 @@ struct fuse_io_priv {
        size_t size;
        __u64 offset;
        bool write;
+       bool should_dirty;
        int err;
        struct kiocb *iocb;
        struct file *file;
index 8a428498d6b21f08c8c26ef184ff9f4332b5cdd0..509a61668d902b84f6756e2ed1bcb22a6d7020a5 100644 (file)
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_FILE_MAPPED));
        show_val_kb(m, "Shmem:          ", i.sharedram);
        show_val_kb(m, "Slab:           ",
-                   global_page_state(NR_SLAB_RECLAIMABLE) +
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 
        show_val_kb(m, "SReclaimable:   ",
-                   global_page_state(NR_SLAB_RECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE));
        show_val_kb(m, "SUnreclaim:     ",
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
index b836fd61ed878a38d25d5ffe44bb86e30066955c..fe8f3265e8779ac18a5694ef600c024f9e88f281 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
+#include <linux/uaccess.h>
 
 #include <asm/elf.h>
-#include <linux/uaccess.h>
+#include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        enum clear_refs_types type;
+       struct mmu_gather tlb;
        int itype;
        int rv;
 
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                }
 
                down_read(&mm->mmap_sem);
+               tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(mm, 0, -1);
-               flush_tlb_mm(mm);
+               tlb_finish_mmu(&tlb, 0, -1);
                up_read(&mm->mmap_sem);
 out_mm:
                mmput(mm);
index 06ea26b8c996f3cc7a9d6fd177260f89394fb325..b0d5897bc4e6d0e019c79f65b6d41df1d3b0d050 100644 (file)
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                                   uffdio_copy.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
                return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                                     uffdio_zeropage.range.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
                return -EFAULT;
index 8afa4335e5b2bfd0c42c00e1b1506d4e1f7377ac..faddde44de8c902e6884e64eeb8b22bd0d11b75a 100644 (file)
@@ -112,10 +112,11 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+       struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
-                                                       unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                        unsigned long start, unsigned long end, bool force);
 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
                                   int page_size);
 
index 723cd54b94da84f95cd18934d14198b6d21040dd..beabdbc0842059b36a2c6b22b882e04ddc968f75 100644 (file)
@@ -843,7 +843,7 @@ struct dev_links_info {
  *             hibernation, system resume and during runtime PM transitions
  *             along with subsystem-level and driver-level callbacks.
  * @pins:      For device pin management.
- *             See Documentation/pinctrl.txt for details.
+ *             See Documentation/driver-api/pinctl.rst for details.
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
index 00ca5b86a753f8023cad87ce02cbe90748255383..d501d3956f13f041864dc25f0d7e8724ea2b5210 100644 (file)
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
 #define I2C_CLASS_DDC          (1<<3)  /* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD          (1<<7)  /* Memory modules */
-#define I2C_CLASS_DEPRECATED   (1<<8)  /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED   (1<<8)
 
 /* Internal numbers to terminate lists */
 #define I2C_CLIENT_END         0xfffeU
index 7f384bb62d8ec6bc7eafa25828b0716be63c7ccb..3cadee0a350889f748e7b1a999b449ae003e9c3f 100644 (file)
@@ -487,14 +487,12 @@ struct mm_struct {
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
        /*
         * An operation with batched TLB flushing is going on. Anything that
         * can move process memory needs to flush the TLB when moving a
         * PROT_NONE or PROT_NUMA mapped page.
         */
-       bool tlb_flush_pending;
-#endif
+       atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        /* See flush_tlb_batched_pending() */
        bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+                               unsigned long start, unsigned long end);
+
 /*
  * Memory barriers to keep this state in sync are graciously provided by
  * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
  */
 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 {
-       barrier();
-       return mm->tlb_flush_pending;
+       return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+       return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+       atomic_set(&mm->tlb_flush_pending, 0);
 }
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 {
-       mm->tlb_flush_pending = true;
+       atomic_inc(&mm->tlb_flush_pending);
 
        /*
-        * Guarantee that the tlb_flush_pending store does not leak into the
+        * Guarantee that the tlb_flush_pending increase does not leak into the
         * critical section updating the page tables
         */
        smp_mb__before_spinlock();
 }
+
 /* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
-       barrier();
-       mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
-       return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 {
+       /*
+        * Guarantee that the tlb_flush_pending does not not leak into the
+        * critical section, since we must order the PTE change and changes to
+        * the pending TLB flush indication. We could have relied on TLB flush
+        * as a memory barrier, but this behavior is not clearly documented.
+        */
+       smp_mb__before_atomic();
+       atomic_dec(&mm->tlb_flush_pending);
 }
-#endif
 
 struct vm_fault;
 
index 4869e66dd659a6bc8fe4ad90df2ed9d3ff98ccac..a75c136738529db410baf870f3baafc6e178a5a0 100644 (file)
@@ -1067,6 +1067,7 @@ void pcie_flr(struct pci_dev *dev);
 int __pci_reset_function(struct pci_dev *dev);
 int __pci_reset_function_locked(struct pci_dev *dev);
 int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
 int pci_try_reset_function(struct pci_dev *dev);
 int pci_probe_reset_slot(struct pci_slot *slot);
 int pci_reset_slot(struct pci_slot *slot);
index 231d3075815adfa63d462a236e66f214c3216117..e91d1b6a260d5996583a4365d4020524ad57700a 100644 (file)
@@ -81,8 +81,8 @@
  *     it.
  * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
  *     value on the line. Use argument 1 to indicate high level, argument 0 to
- *     indicate low level. (Please see Documentation/pinctrl.txt, section
- *     "GPIO mode pitfalls" for a discussion around this parameter.)
+ *     indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ *     section "GPIO mode pitfalls" for a discussion around this parameter.)
  * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
  *     supplies, the argument to this parameter (on a custom format) tells
  *     the driver which alternative power source to use.
index 5726107963b2d9930d61c3cf5c486abfe244c18a..0ad87c434ae6a344984837e8dd053fe7705d21f1 100644 (file)
@@ -43,12 +43,13 @@ struct sync_file {
 #endif
 
        wait_queue_head_t       wq;
+       unsigned long           flags;
 
        struct dma_fence        *fence;
        struct dma_fence_cb cb;
 };
 
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
 
 struct sync_file *sync_file_create(struct dma_fence *fence);
 struct dma_fence *sync_file_get_fence(int fd);
index 26c54f6d595d4070c7708ef22daf7533468404a2..ad4eb2863e70ee195f9abc68e6b8c3c3020f27bc 100644 (file)
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
        __u32 size;           /* in, cmdstream size */
        __u32 pad;
        __u32 nr_relocs;      /* in, number of submit_reloc's */
-       __u64 __user relocs;  /* in, ptr to array of submit_reloc's */
+       __u64 relocs;         /* in, ptr to array of submit_reloc's */
 };
 
 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
        __u32 fence;          /* out */
        __u32 nr_bos;         /* in, number of submit_bo's */
        __u32 nr_cmds;        /* in, number of submit_cmd's */
-       __u64 __user bos;     /* in, ptr to array of submit_bo's */
-       __u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+       __u64 bos;            /* in, ptr to array of submit_bo's */
+       __u64 cmds;           /* in, ptr to array of submit_cmd's */
        __s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 };
 
index 17921b0390b4f91113bcf8c9ccac5c1225751460..e075b7780421dee1d8243b9dc178248398c5f189 100644 (file)
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_init_aio(mm);
        mm_init_owner(mm, p);
        mmu_notifier_mm_init(mm);
-       clear_tlb_flush_pending(mm);
+       init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
        mm->pmd_huge_pte = NULL;
 #endif
index 16dbe4c938953a70a49faf0a5264af8c19a9491f..f50b434756c18eb0c200ec6e3b4db16231062f24 100644 (file)
@@ -670,13 +670,14 @@ again:
                 * this reference was taken by ihold under the page lock
                 * pinning the inode in place so i_lock was unnecessary. The
                 * only way for this check to fail is if the inode was
-                * truncated in parallel so warn for now if this happens.
+                * truncated in parallel which is almost certainly an
+                * application bug. In such a case, just retry.
                 *
                 * We are not calling into get_futex_key_refs() in file-backed
                 * cases, therefore a successful atomic_inc return below will
                 * guarantee that get_futex_key() will still imply smp_mb(); (B).
                 */
-               if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+               if (!atomic_inc_not_zero(&inode->i_count)) {
                        rcu_read_unlock();
                        put_page(page);
 
index 222317721c5a09291c6b78fc839e722b2196b177..0972a8e09d082d99c7f197cbe6bd4fdb6475ba33 100644 (file)
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 {
        unsigned long size;
 
-       size = global_page_state(NR_SLAB_RECLAIMABLE)
+       size = global_node_page_state(NR_SLAB_RECLAIMABLE)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
index 7d315fdb9f13d9b17d8a2aa129c75790c7599bdb..cf7b129b0b2b08adcc1aae98f990c384761532dc 100644 (file)
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
        if (in_task()) {
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
 
-               if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
-                       goto fail;
+               if (fail_nth) {
+                       if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+                               goto fail;
 
-               return false;
+                       return false;
+               }
        }
 
        /* No need to check any other properties if the probability is 0 */
index 6c1d678bcf8b00ff7b2d2fc70747045e6c14327a..ff9148969b9233ba7502b992b026d31e100be497 100644 (file)
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_driver);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "driver:\tEMTPY\n");
+                               "driver:\tEMPTY\n");
 
        if (config->test_fs)
                len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_fs);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "fs:\tEMTPY\n");
+                               "fs:\tEMPTY\n");
 
        mutex_unlock(&test_dev->config_mutex);
 
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
                                                      strlen(test_str));
                break;
        case TEST_KMOD_FS_TYPE:
-               break;
                kfree_const(config->test_fs);
                config->test_driver = NULL;
                copied = config_copy_test_fs(config, test_str,
                                             strlen(test_str));
+               break;
        default:
                mutex_unlock(&test_dev->config_mutex);
                return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
                                            int (*test_sync)(struct kmod_test_device *test_dev))
 {
        int ret;
-       long new;
+       unsigned long new;
        unsigned int old_val;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
                                             unsigned int max)
 {
        int ret;
-       long new;
+       unsigned long new;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
        struct kmod_test_device *test_dev = NULL;
        int ret;
 
-       mutex_unlock(&reg_dev_mutex);
+       mutex_lock(&reg_dev_mutex);
 
        /* int should suffice for number of devices, test for wrap */
        if (unlikely(num_test_devs + 1) < 0) {
index 9075aa54e95517cdbb1094f04e72c36357401e52..b06d9fe23a28c14f71c3263daaa84965dadeee45 100644 (file)
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 {
        unsigned long flags;
        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
-                               __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
+                                      __GFP_NOMEMALLOC | __GFP_NORETRY);
        if (!page)
                return NULL;
 
index db1cd26d8752022b7f8b576cdff78f5412209d39..5715448ab0b53db5d8bd4b64d47706f7deaaf7a6 100644 (file)
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
                "tlb_flush_pending %d\n"
-#endif
                "def_flags: %#lx(%pGv)\n",
 
                mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-               mm->tlb_flush_pending,
-#endif
+               atomic_read(&mm->tlb_flush_pending),
                mm->def_flags, &mm->def_flags
        );
 }
index 86975dec0ba160feadfb8aa0d13b8f2be943638d..216114f6ef0b7f8c09378edd3615d6a39527ead0 100644 (file)
@@ -1495,6 +1495,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                goto clear_pmdnuma;
        }
 
+       /*
+        * The page_table_lock above provides a memory barrier
+        * with change_protection_range.
+        */
+       if (mm_tlb_flush_pending(vma->vm_mm))
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and access rights restored.
index a1a0ac0ad6f67ad479916fcbc43036973ddca824..31e207cb399bebd11371e46eb26f625a5b74487c 100644 (file)
@@ -4062,9 +4062,9 @@ out:
        return ret;
 out_release_unlock:
        spin_unlock(ptl);
-out_release_nounlock:
        if (vm_shared)
                unlock_page(page);
+out_release_nounlock:
        put_page(page);
        goto out;
 }
index 4dc92f138786988c4ef0f9d371ff8a48b2e6e905..db20f8436bc3c15bf05f86ccec5e7b1f80d807cc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                goto out_unlock;
 
        if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
+           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
+                                               mm_tlb_flush_pending(mm)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
index f65beaad319be4c597f9a071771e5f376234d753..e158f7ac67300b10b8827fe6825667506095f550 100644 (file)
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
        return true;
 }
 
-/* tlb_gather_mmu
- *     Called to initialize an (on-stack) mmu_gather structure for page-table
- *     tear-down from @mm. The @fullmm argument is used when @mm is without
- *     users and we're going to destroy the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
@@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
        struct mmu_gather_batch *batch, *next;
 
+       if (force)
+               __tlb_adjust_range(tlb, start, end - start);
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
+{
+       arch_tlb_gather_mmu(tlb, mm, start, end);
+       inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end)
+{
+       /*
+        * If there are parallel threads are doing PTE changes on same range
+        * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+        * flush by batching, a thread has stable TLB entry can fail to flush
+        * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+        * forcefully if we detect parallel PTE batching threads.
+        */
+       bool force = mm_tlb_flush_nested(tlb->mm);
+
+       arch_tlb_finish_mmu(tlb, start, end, force);
+       dec_tlb_flush_pending(tlb->mm);
+}
+
 /*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
index 62767155187356d54d1fa7333ad402e76183ca0b..d68a41da6abb0743d6b09cc49c5c9524463715c3 100644 (file)
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-       /*
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (mm_tlb_flush_pending(mm))
-               flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
index 4180ad8cc9c5e70c661efc8f30416af40e9c0066..bd0f409922cb2fc133f9fecba64a839380d4f937 100644 (file)
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       set_tlb_flush_pending(mm);
+       inc_tlb_flush_pending(mm);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        /* Only flush the TLB if we actually modified any entries: */
        if (pages)
                flush_tlb_range(vma, start, end);
-       clear_tlb_flush_pending(mm);
+       dec_tlb_flush_pending(mm);
 
        return pages;
 }
index fc32aa81f3593537cc2b11d5f63b5c5f517097a4..6d00f746c2fd96452661fde3f704289eed7f1f70 100644 (file)
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
         * Part of the reclaimable slab consists of items that are in use,
         * and cannot be freed. Cap this estimate at the low watermark.
         */
-       available += global_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+                        wmark_low);
 
        if (available < 0)
                available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_FILE_DIRTY),
                global_node_page_state(NR_WRITEBACK),
                global_node_page_state(NR_UNSTABLE_NFS),
-               global_page_state(NR_SLAB_RECLAIMABLE),
-               global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_node_page_state(NR_SLAB_RECLAIMABLE),
+               global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
@@ -7668,7 +7669,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
index c8993c63eb259b3a5302a058ce231d1290fc9b66..c1286d47aa1fad7fee7ea5bb865a2dc7efd672f2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                .flags = PVMW_SYNC,
        };
        int *cleaned = arg;
+       bool invalidation_needed = false;
 
        while (page_vma_mapped_walk(&pvmw)) {
                int ret = 0;
-               address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pte_dirty(*pte) && !pte_write(*pte))
                                continue;
 
-                       flush_cache_page(vma, address, pte_pfn(*pte));
-                       entry = ptep_clear_flush(vma, address, pte);
+                       flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, pvmw.address, pte);
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
-                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
-                       flush_cache_page(vma, address, page_to_pfn(page));
-                       entry = pmdp_huge_clear_flush(vma, address, pmd);
+                       flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
-                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                }
 
                if (ret) {
-                       mmu_notifier_invalidate_page(vma->vm_mm, address);
                        (*cleaned)++;
+                       invalidation_needed = true;
                }
        }
 
+       if (invalidation_needed) {
+               mmu_notifier_invalidate_range(vma->vm_mm, address,
+                               address + (1UL << compound_order(page)));
+       }
+
        return true;
 }
 
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        };
        pte_t pteval;
        struct page *subpage;
-       bool ret = true;
+       bool ret = true, invalidation_needed = false;
        enum ttu_flags flags = (enum ttu_flags)arg;
 
        /* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(!pvmw.pte, page);
 
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
 
                if (!(flags & TTU_IGNORE_ACCESS)) {
-                       if (ptep_clear_flush_young_notify(vma, address,
+                       if (ptep_clear_flush_young_notify(vma, pvmw.address,
                                                pvmw.pte)) {
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+               flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
                if (should_defer_flush(mm, flags)) {
                        /*
                         * We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * transition on a cached TLB entry is written through
                         * and traps if the PTE is unmapped.
                         */
-                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+                       pteval = ptep_get_and_clear(mm, pvmw.address,
+                                                   pvmw.pte);
 
                        set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
                }
 
                /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (PageHuge(page)) {
                                int nr = 1 << compound_order(page);
                                hugetlb_count_sub(nr, mm);
-                               set_huge_swap_pte_at(mm, address,
+                               set_huge_swap_pte_at(mm, pvmw.address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
                        } else {
                                dec_mm_counter(mm, mm_counter(page));
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                        }
 
                } else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * If the page was redirtied, it cannot be
                                 * discarded. Remap the page to page table.
                                 */
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                SetPageSwapBacked(page);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        }
 
                        if (swap_duplicate(entry) < 0) {
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else
                        dec_mm_counter(mm, mm_counter_file(page));
 discard:
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_page(mm, address);
+               invalidation_needed = true;
        }
+
+       if (invalidation_needed)
+               mmu_notifier_invalidate_range(mm, address,
+                               address + (1UL << compound_order(page)));
        return ret;
 }
 
index b0aa6075d164df9ae4766876cc823394abaebc6d..6540e598244412023db650412062604b704b58b3 100644 (file)
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                         */
                        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                spin_lock(&sbinfo->shrinklist_lock);
-                               if (list_empty(&info->shrinklist)) {
+                               /*
+                                * _careful to defend against unlocked access to
+                                * ->shrink_list in shmem_unused_huge_shrink()
+                                */
+                               if (list_empty_careful(&info->shrinklist)) {
                                        list_add_tail(&info->shrinklist,
                                                        &sbinfo->shrinklist);
                                        sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge:            page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
                         * to shrink under memory pressure.
                         */
                        spin_lock(&sbinfo->shrinklist_lock);
-                       if (list_empty(&info->shrinklist)) {
+                       /*
+                        * _careful to defend against unlocked access to
+                        * ->shrink_list in shmem_unused_huge_shrink()
+                        */
+                       if (list_empty_careful(&info->shrinklist)) {
                                list_add_tail(&info->shrinklist,
                                                &sbinfo->shrinklist);
                                sbinfo->shrinklist_len++;
index 7b07ec852e01fa931b2b302e8df5cff9f17f62d6..9ecddf568fe30e5cf1fba6db8eda3b7abe96d379 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += global_page_state(NR_SLAB_RECLAIMABLE);
+               free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
index 76c2077c3f5b697bf8e0d4b030b70dde8fc70345..2e548eca34898f51316275c918bb1f0f4a63526e 100644 (file)
@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 #endif
 
+       /* Some igmp sysctl, whose values are always used */
+       net->ipv4.sysctl_igmp_max_memberships = 20;
+       net->ipv4.sysctl_igmp_max_msf = 10;
+       /* IGMP reports for link-local multicast groups are enabled by default */
+       net->ipv4.sysctl_igmp_llm_reports = 1;
+       net->ipv4.sysctl_igmp_qrv = 2;
+
        return 0;
 }
 
index 28f14afd0dd3a392da3b84c5e791fffaf46ad254..498706b072fb70e1ffe6b5dba817816db5a4cfa7 100644 (file)
@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
                goto out_sock;
        }
 
-       /* Sysctl initialization */
-       net->ipv4.sysctl_igmp_max_memberships = 20;
-       net->ipv4.sysctl_igmp_max_msf = 10;
-       /* IGMP reports for link-local multicast groups are enabled by default */
-       net->ipv4.sysctl_igmp_llm_reports = 1;
-       net->ipv4.sysctl_igmp_qrv = 2;
        return 0;
 
 out_sock:
index 50c74cd890bc79ed6c85c958c5397d833e9aa74a..e153c40c2436109d4bca4a9caf34b90cbf000cd9 100644 (file)
@@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
@@ -1288,6 +1289,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EINVAL;
 
        if ((size + skb->len > mtu) &&
+           (skb_queue_len(&sk->sk_write_queue) == 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
                if (skb->ip_summed != CHECKSUM_PARTIAL)
index e6276fa3750b909615668fddf84495369bd7d369..a7c804f73990a0610bc85c02fc2dd76858973c22 100644 (file)
@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
        if (is_udplite)                                  /*     UDP-Lite      */
                csum = udplite_csum(skb);
 
-       else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
+       else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
index 162efba0d0cd851848363588318cf6ade4a5a62c..2dfe50d8d609a7a623edacbe40e93022dfac685e 100644 (file)
@@ -1381,11 +1381,12 @@ emsgsize:
         */
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : headersize)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : headersize)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen, exthdrlen,
                                          transhdrlen, mtu, flags, fl6);
index 0615c2a950fab992134d0071707b5b336f6fb231..008a45ca31124ed5fa54d666fce61c7982b12a2f 100644 (file)
@@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val > INT_MAX)
                        return -EINVAL;
-               po->tp_reserve = val;
-               return 0;
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_reserve = val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_LOSS:
        {
index 94ba5cfab86000f70d9939db2baaa733a5338754..d516ba8178b8099f5e8e180f2e60e7a61de37811 100644 (file)
@@ -49,9 +49,9 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
                return PTR_ERR(target);
 
        t->u.kernel.target = target;
+       memset(&par, 0, sizeof(par));
        par.net       = net;
        par.table     = table;
-       par.entryinfo = NULL;
        par.target    = target;
        par.targinfo  = t->data;
        par.hook_mask = hook;
index aeef8011ac7d82d828289f4085efe3acaa8a3945..9b4dcb6a16b50eefc04167dfdd1e509546b71bf6 100644 (file)
@@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        /* Initiate synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
-               if (!tipc_link_is_up(l)) {
-                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!tipc_link_is_up(l))
                        __tipc_node_link_up(n, bearer_id, xmitq);
-               }
                if (n->state == SELF_UP_PEER_UP) {
                        n->sync_point = syncpt;
                        tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);