]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'linus' into sched/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Sat, 24 Jun 2017 06:57:20 +0000 (08:57 +0200)
committerIngo Molnar <mingo@kernel.org>
Sat, 24 Jun 2017 06:57:20 +0000 (08:57 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
119 files changed:
Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
Documentation/devicetree/bindings/mfd/stm32-timers.txt
Documentation/devicetree/bindings/net/dsa/b53.txt
Documentation/devicetree/bindings/net/smsc911x.txt
arch/mips/kvm/tlb.c
arch/powerpc/include/asm/kprobes.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_interrupts.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/perf/perf_regs.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/s390/kvm/gaccess.c
arch/x86/include/asm/kvm_emulate.h
arch/x86/kvm/emulate.c
arch/x86/kvm/x86.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
drivers/acpi/scan.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/char/random.c
drivers/gpio/gpio-mvebu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/usbhid/hid-quirks.c
drivers/i2c/busses/i2c-imx.c
drivers/md/dm-integrity.c
drivers/md/dm-io.c
drivers/md/dm-raid1.c
drivers/mfd/arizona-core.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_main.c
fs/autofs4/dev-ioctl.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/xattr.c
fs/dax.c
fs/exec.c
fs/ocfs2/dlmglue.c
fs/ocfs2/xattr.c
fs/ufs/balloc.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/ufs_fs.h
fs/xfs/xfs_aops.c
include/acpi/acpi_bus.h
include/linux/blkdev.h
include/linux/slub_def.h
include/net/wext.h
kernel/livepatch/patch.c
kernel/livepatch/transition.c
lib/cmdline.c
mm/khugepaged.c
mm/mmap.c
mm/slub.c
mm/vmalloc.c
net/8021q/vlan.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/rtnetlink.c
net/decnet/dn_route.c
net/ipv4/igmp.c
net/ipv4/ip_tunnel.c
net/ipv6/addrconf.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_tunnel.c
net/rxrpc/key.c
net/sctp/endpointola.c
net/sctp/sctp_diag.c
net/sctp/socket.c
net/wireless/wext-core.c
sound/core/pcm_lib.c
sound/firewire/amdtp-stream.c
sound/firewire/amdtp-stream.h
sound/pci/hda/hda_intel.c

index 42c3bb2d53e88b651a7d39efe00e278c24d9c9b3..01e331a5f3e7491fba25188b5a12e91722057e42 100644 (file)
@@ -41,9 +41,9 @@ Required properties:
 Optional properties:
 
 In order to use the GPIO lines in PWM mode, some additional optional
-properties are required. Only Armada 370 and XP support these properties.
+properties are required.
 
-- compatible: Must contain "marvell,armada-370-xp-gpio"
+- compatible: Must contain "marvell,armada-370-gpio"
 
 - reg: an additional register set is needed, for the GPIO Blink
   Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
                };
 
                gpio1: gpio@18140 {
-                       compatible = "marvell,armada-370-xp-gpio";
+                       compatible = "marvell,armada-370-gpio";
                        reg = <0x18140 0x40>, <0x181c8 0x08>;
                        reg-names = "gpio", "pwm";
                        ngpios = <17>;
index bbd083f5600a786b23a0ec821f384e8e8a6a3553..1db6e0057a638e09a5346956a70620c31276fdf6 100644 (file)
@@ -31,7 +31,7 @@ Example:
                compatible = "st,stm32-timers";
                reg = <0x40010000 0x400>;
                clocks = <&rcc 0 160>;
-               clock-names = "clk_int";
+               clock-names = "int";
 
                pwm {
                        compatible = "st,stm32-pwm";
index d6c6e41648d4f17814ea43775e8591a40a4c0786..8ec2ca21adeb6ca840a7fd5d660a57748919af42 100644 (file)
@@ -34,7 +34,7 @@ Required properties:
       "brcm,bcm6328-switch"
       "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required and optional properties.
 
 Examples:
index 16c3a9501f5d689431451c52b91c86b8ae1395b8..acfafc8e143c4c8599510eb7d2cbcb35af7e6f9f 100644 (file)
@@ -27,6 +27,7 @@ Optional properties:
   of the device. On many systems this is wired high so the device goes
   out of reset at power-on, but if it is under program control, this
   optional GPIO can wake up in response to it.
+- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
 
 Examples:
 
index 7c6336dd2638ce9c12c4ff8be566ff6acb856137..7cd92166a0b9a9bf3c14fe1df33442442ac668ac 100644 (file)
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
                          bool user, bool kernel)
 {
-       int idx_user, idx_kernel;
+       /*
+        * Initialize idx_user and idx_kernel to workaround bogus
+        * maybe-initialized warning when using GCC 6.
+        */
+       int idx_user = 0, idx_kernel = 0;
        unsigned long flags, old_entryhi;
 
        local_irq_save(flags);
index a83821f33ea36f8c005dab8e80dfdfcfd26a7058..8814a7249cebe29852dd8b4588a61052decac88c 100644 (file)
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
                           struct kprobe_ctlblk *kcb);
index ae418b85c17c4bce805227a82350d322259cf06e..b886795060fd2dba727c54d8c7b5e2b47a888f8d 100644 (file)
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
        .balign IFETCH_ALIGN_BYTES
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
-       andis.  r0,r4,0xa410            /* weird error? */
+       andis.  r0,r4,0xa450            /* weird error? */
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
-       andis.  r0,r4,DSISR_DABRMATCH@h
-       bne-    handle_dabr_fault
        CURRENT_THREAD_INFO(r11, r1)
        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
 
        /* Error */
        blt-    13f
+
+       /* Reload DSISR into r4 for the DABR check below */
+       ld      r4,_DSISR(r1)
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
-11:    ld      r4,_DAR(r1)
+11:    andis.  r0,r4,DSISR_DABRMATCH@h
+       bne-    handle_dabr_fault
+       ld      r4,_DAR(r1)
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_page_fault
index fc4343514bed8b0f05a88e64caf0285c44ee8ea0..01addfb0ed0a42216d64c7692c3fea4694c528fb 100644 (file)
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 
+int is_current_kprobe_addr(unsigned long addr)
+{
+       struct kprobe *p = kprobe_running();
+       return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        return  (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 #endif
 
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
+
        return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
         * saved regs...
         */
        memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+       /* It's OK to start function graph tracing again */
+       unpause_graph_tracing();
        preempt_enable_no_resched();
        return 1;
 }
index a8c1f99e96072530cb1f2d9ed702dffd78665720..4640f6d64f8b406a636d60c4ea2263658dde5be6 100644 (file)
@@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
 }
 #endif
 
+/*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+       ti->task = NULL;
+       ti->cpu = cpu;
+       ti->preempt_count = 0;
+       ti->local_flags = 0;
+       ti->flags = 0;
+       klp_init_thread_info(ti);
+}
+
 /*
  * Stack space used when we detect a bad kernel stack pointer, and
  * early in SMP boots before relocation is enabled. Exclusive emergency
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
         * Since we use these as temporary stacks during secondary CPU
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
+        *
+        * The IRQ stacks allocated elsewhere in this file are zeroed and
+        * initialized in kernel/irq.c. These are initialized here in order
+        * to have emergency stacks available as early as possible.
         */
        limit = min(safe_stack_limit(), ppc64_rma_size);
 
        for_each_possible_cpu(i) {
                struct thread_info *ti;
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
                /* emergency stack for machine check exception handling. */
                ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-               klp_init_thread_info(ti);
+               memset(ti, 0, THREAD_SIZE);
+               emerg_stack_init_thread_info(ti, i);
                paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
        }
index 7c933a99f5d578bdfd1408b16d4e7fe33e95e4a9..c98e90b4ea7b1f15a2dd7157300376e370774cf2 100644 (file)
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
        stdu    r1,-SWITCH_FRAME_SIZE(r1)
 
        /* Save all gprs to pt_regs */
-       SAVE_8GPRS(0,r1)
-       SAVE_8GPRS(8,r1)
-       SAVE_8GPRS(16,r1)
-       SAVE_8GPRS(24,r1)
+       SAVE_GPR(0, r1)
+       SAVE_10GPRS(2, r1)
+       SAVE_10GPRS(12, r1)
+       SAVE_10GPRS(22, r1)
+
+       /* Save previous stack pointer (r1) */
+       addi    r8, r1, SWITCH_FRAME_SIZE
+       std     r8, GPR1(r1)
 
        /* Load special regs for save below */
        mfmsr   r8
@@ -95,18 +99,44 @@ ftrace_call:
        bl      ftrace_stub
        nop
 
-       /* Load ctr with the possibly modified NIP */
-       ld      r3, _NIP(r1)
-       mtctr   r3
+       /* Load the possibly modified NIP */
+       ld      r15, _NIP(r1)
+
 #ifdef CONFIG_LIVEPATCH
-       cmpd    r14,r3          /* has NIP been altered? */
+       cmpd    r14, r15        /* has NIP been altered? */
+#endif
+
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+       /* NIP has not been altered, skip over further checks */
+       beq     1f
+
+       /* Check if there is an active kprobe on us */
+       subi    r3, r14, 4
+       bl      is_current_kprobe_addr
+       nop
+
+       /*
+        * If r3 == 1, then this is a kprobe/jprobe.
+        * else, this is livepatched function.
+        *
+        * The conditional branch for livepatch_handler below will use the
+        * result of this comparison. For kprobe/jprobe, we just need to branch to
+        * the new NIP, not call livepatch_handler. The branch below is bne, so we
+        * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+        * CR0[EQ] = (r3 == 1).
+        */
+       cmpdi   r3, 1
+1:
 #endif
 
+       /* Load CTR with the possibly modified NIP */
+       mtctr   r15
+
        /* Restore gprs */
-       REST_8GPRS(0,r1)
-       REST_8GPRS(8,r1)
-       REST_8GPRS(16,r1)
-       REST_8GPRS(24,r1)
+       REST_GPR(0,r1)
+       REST_10GPRS(2,r1)
+       REST_10GPRS(12,r1)
+       REST_10GPRS(22,r1)
 
        /* Restore possibly modified LR */
        ld      r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
        addi r1, r1, SWITCH_FRAME_SIZE
 
 #ifdef CONFIG_LIVEPATCH
-        /* Based on the cmpd above, if the NIP was altered handle livepatch */
+        /*
+        * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+        * not on a kprobe/jprobe, then handle livepatch.
+        */
        bne-    livepatch_handler
 #endif
 
index 42b7a4fd57d9a557f8278a9f9a8c228f2758a1e8..8d1a365b8edc45fa9f655b77789a0d8602b462fd 100644 (file)
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
                break;
        case KVM_REG_PPC_TB_OFFSET:
+               /*
+                * POWER9 DD1 has an erratum where writing TBU40 causes
+                * the timebase to lose ticks.  So we don't let the
+                * timebase offset be changed on P9 DD1.  (It is
+                * initialized to zero.)
+                */
+               if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+                       break;
                /* round up to multiple of 2^24 */
                vcpu->arch.vcore->tb_offset =
                        ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
        int r;
        int srcu_idx;
+       unsigned long ebb_regs[3] = {}; /* shut up GCC */
+       unsigned long user_tar = 0;
+       unsigned int user_vrsave;
 
        if (!vcpu->arch.sane) {
                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                return -EINVAL;
        }
 
+       /*
+        * Don't allow entry with a suspended transaction, because
+        * the guest entry/exit code will lose it.
+        * If the guest has TM enabled, save away their TM-related SPRs
+        * (they will get restored by the TM unavailable interrupt).
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+           (current->thread.regs->msr & MSR_TM)) {
+               if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+                       run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+                       run->fail_entry.hardware_entry_failure_reason = 0;
+                       return -EINVAL;
+               }
+               current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+               current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+               current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+               current->thread.regs->msr &= ~MSR_TM;
+       }
+#endif
+
        kvmppc_core_prepare_to_enter(vcpu);
 
        /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
        flush_all_to_thread(current);
 
+       /* Save userspace EBB and other register values */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               ebb_regs[0] = mfspr(SPRN_EBBHR);
+               ebb_regs[1] = mfspr(SPRN_EBBRR);
+               ebb_regs[2] = mfspr(SPRN_BESCR);
+               user_tar = mfspr(SPRN_TAR);
+       }
+       user_vrsave = mfspr(SPRN_VRSAVE);
+
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
        vcpu->arch.pgdir = current->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                }
        } while (is_kvmppc_resume_guest(r));
 
+       /* Restore userspace EBB and other register values */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               mtspr(SPRN_EBBHR, ebb_regs[0]);
+               mtspr(SPRN_EBBRR, ebb_regs[1]);
+               mtspr(SPRN_BESCR, ebb_regs[2]);
+               mtspr(SPRN_TAR, user_tar);
+               mtspr(SPRN_FSCR, current->thread.fscr);
+       }
+       mtspr(SPRN_VRSAVE, user_vrsave);
+
  out:
        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
        atomic_dec(&vcpu->kvm->arch.vcpus_running);
index 0fdc4a28970b3c53d821088dae8eec5e2282dca4..404deb512844424d07bba8ead5bd77e70aee82af 100644 (file)
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
         * Put whatever is in the decrementer into the
         * hypervisor decrementer.
         */
+BEGIN_FTR_SECTION
+       ld      r5, HSTATE_KVM_VCORE(r13)
+       ld      r6, VCORE_KVM(r5)
+       ld      r9, KVM_HOST_LPCR(r6)
+       andis.  r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        mfspr   r8,SPRN_DEC
        mftb    r7
-       mtspr   SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+       /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+       bne     32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        extsw   r8,r8
+32:    mtspr   SPRN_HDEC,r8
        add     r8,r8,r7
        std     r8,HSTATE_DECEXP(r13)
 
index bdb3f76ceb6b9ff0e25e5b3b56c3be48dd6f65cc..4888dd494604f101a194a51ff168c44d85c4354d 100644 (file)
 #include <asm/opal.h>
 #include <asm/xive-regs.h>
 
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg)                       \
+BEGIN_FTR_SECTION;                             \
+       extsw   reg, reg;                       \
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 /* Values in HSTATE_NAPPING(r13) */
 #define NAPPING_CEDE   1
 #define NAPPING_NOVCPU 2
 
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS                    144
+#define STACK_SLOT_TRAP                (SFS-4)
+#define STACK_SLOT_TID         (SFS-16)
+#define STACK_SLOT_PSSCR       (SFS-24)
+#define STACK_SLOT_PID         (SFS-32)
+#define STACK_SLOT_IAMR                (SFS-40)
+#define STACK_SLOT_CIABR       (SFS-48)
+#define STACK_SLOT_DAWR                (SFS-56)
+#define STACK_SLOT_DAWRX       (SFS-64)
+
 /*
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 kvmppc_primary_no_guest:
        /* We handle this much like a ceded vcpu */
        /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+       /* HDEC may be larger than DEC for arch >= v3.00, but since the */
+       /* HDEC value came from DEC in the first place, it will fit */
        mfspr   r3, SPRN_HDEC
        mtspr   SPRN_DEC, r3
        /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
 
        /* See if our timeslice has expired (HDEC is negative) */
        mfspr   r0, SPRN_HDEC
+       EXTEND_HDEC(r0)
        li      r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-       cmpwi   r0, 0
+       cmpdi   r0, 0
        blt     kvm_novcpu_exit
 
        /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
        bl      kvmhv_accumulate_time
 #endif
 13:    mr      r3, r12
-       stw     r12, 112-4(r1)
+       stw     r12, STACK_SLOT_TRAP(r1)
        bl      kvmhv_commence_exit
        nop
-       lwz     r12, 112-4(r1)
+       lwz     r12, STACK_SLOT_TRAP(r1)
        b       kvmhv_switch_to_host
 
 /*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
        lbz     r4, HSTATE_PTID(r13)
        cmpwi   r4, 0
        bne     63f
-       lis     r6, 0x7fff
-       ori     r6, r6, 0xffff
+       LOAD_REG_ADDR(r6, decrementer_max)
+       ld      r6, 0(r6)
        mtspr   SPRN_HDEC, r6
        /* and set per-LPAR registers, if doing dynamic micro-threading */
        ld      r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  *                                                                            *
  *****************************************************************************/
 
-/* Stack frame offsets */
-#define STACK_SLOT_TID         (112-16)
-#define STACK_SLOT_PSSCR       (112-24)
-#define STACK_SLOT_PID         (112-32)
-
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
         */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -112(r1)
+       stdu    r1, -SFS(r1)
 
        /* Save R1 in the PACA */
        std     r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
        mfspr   r5, SPRN_TIDR
        mfspr   r6, SPRN_PSSCR
        mfspr   r7, SPRN_PID
+       mfspr   r8, SPRN_IAMR
        std     r5, STACK_SLOT_TID(r1)
        std     r6, STACK_SLOT_PSSCR(r1)
        std     r7, STACK_SLOT_PID(r1)
+       std     r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+       mfspr   r5, SPRN_CIABR
+       mfspr   r6, SPRN_DAWR
+       mfspr   r7, SPRN_DAWRX
+       std     r5, STACK_SLOT_CIABR(r1)
+       std     r6, STACK_SLOT_DAWR(r1)
+       std     r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 BEGIN_FTR_SECTION
        /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 
        /* Check if HDEC expires soon */
        mfspr   r3, SPRN_HDEC
-       cmpwi   r3, 512         /* 1 microsecond */
+       EXTEND_HDEC(r3)
+       cmpdi   r3, 512         /* 1 microsecond */
        blt     hdec_soon
 
 #ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
         * set by the guest could disrupt the host.
         */
        li      r0, 0
-       mtspr   SPRN_IAMR, r0
-       mtspr   SPRN_CIABR, r0
-       mtspr   SPRN_DAWRX, r0
+       mtspr   SPRN_PSPB, r0
        mtspr   SPRN_WORT, r0
 BEGIN_FTR_SECTION
+       mtspr   SPRN_IAMR, r0
        mtspr   SPRN_TCSCR, r0
        /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
        li      r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
        std     r6,VCPU_UAMOR(r9)
        li      r6,0
        mtspr   SPRN_AMR,r6
+       mtspr   SPRN_UAMOR, r6
 
        /* Switch DSCR back to host value */
        mfspr   r8, SPRN_DSCR
@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        ptesync
 
        /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+       ld      r5, STACK_SLOT_CIABR(r1)
+       ld      r6, STACK_SLOT_DAWR(r1)
+       ld      r7, STACK_SLOT_DAWRX(r1)
+       mtspr   SPRN_CIABR, r5
+       mtspr   SPRN_DAWR, r6
+       mtspr   SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
        ld      r5, STACK_SLOT_TID(r1)
        ld      r6, STACK_SLOT_PSSCR(r1)
        ld      r7, STACK_SLOT_PID(r1)
+       ld      r8, STACK_SLOT_IAMR(r1)
        mtspr   SPRN_TIDR, r5
        mtspr   SPRN_PSSCR, r6
        mtspr   SPRN_PID, r7
+       mtspr   SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 BEGIN_FTR_SECTION
        PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
        li      r0, KVM_GUEST_MODE_NONE
        stb     r0, HSTATE_IN_GUEST(r13)
 
-       ld      r0, 112+PPC_LR_STKOFF(r1)
-       addi    r1, r1, 112
+       ld      r0, SFS+PPC_LR_STKOFF(r1)
+       addi    r1, r1, SFS
        mtlr    r0
        blr
 
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
        mfspr   r3, SPRN_DEC
        mfspr   r4, SPRN_HDEC
        mftb    r5
-       cmpw    r3, r4
+       extsw   r3, r3
+       EXTEND_HDEC(r4)
+       cmpd    r3, r4
        ble     67f
        mtspr   SPRN_DEC, r4
 67:
        /* save expiry time of guest decrementer */
-       extsw   r3, r3
        add     r3, r3, r5
        ld      r4, HSTATE_KVM_VCPU(r13)
        ld      r5, HSTATE_KVM_VCORE(r13)
index cbd82fde57702e2a210608dc2e1800ae574465e0..09ceea6175ba9dc1d99b8b56eadae1367138b166 100644 (file)
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
                        struct pt_regs *regs_user_copy)
 {
        regs_user->regs = task_pt_regs(current);
-       regs_user->abi  = perf_reg_abi(current);
+       regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+                        PERF_SAMPLE_REGS_ABI_NONE;
 }
index e6f444b462079c3c4f4bea059337b92e700488f5..b5d960d6db3d0b18d33273b31ac67e03caebd02b 100644 (file)
@@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
        return mmio_atsd_reg;
 }
 
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
 {
        unsigned long launch;
 
@@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
        /* PID */
        launch |= pid << PPC_BITLSHIFT(38);
 
+       /* No flush */
+       launch |= !flush << PPC_BITLSHIFT(39);
+
        /* Invalidating the entire process doesn't use a va */
        return mmio_launch_invalidate(npu, launch, 0);
 }
 
 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
-                       unsigned long pid)
+                       unsigned long pid, bool flush)
 {
        unsigned long launch;
 
@@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
        /* PID */
        launch |= pid << PPC_BITLSHIFT(38);
 
+       /* No flush */
+       launch |= !flush << PPC_BITLSHIFT(39);
+
        return mmio_launch_invalidate(npu, launch, va);
 }
 
 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
 
+struct mmio_atsd_reg {
+       struct npu *npu;
+       int reg;
+};
+
+static void mmio_invalidate_wait(
+       struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+       struct npu *npu;
+       int i, reg;
+
+       /* Wait for all invalidations to complete */
+       for (i = 0; i <= max_npu2_index; i++) {
+               if (mmio_atsd_reg[i].reg < 0)
+                       continue;
+
+               /* Wait for completion */
+               npu = mmio_atsd_reg[i].npu;
+               reg = mmio_atsd_reg[i].reg;
+               while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+                       cpu_relax();
+
+               put_mmio_atsd_reg(npu, reg);
+
+               /*
+                * The GPU requires two flush ATSDs to ensure all entries have
+                * been flushed. We use PID 0 as it will never be used for a
+                * process on the GPU.
+                */
+               if (flush)
+                       mmio_invalidate_pid(npu, 0, true);
+       }
+}
+
 /*
  * Invalidate either a single address or an entire PID depending on
  * the value of va.
  */
 static void mmio_invalidate(struct npu_context *npu_context, int va,
-                       unsigned long address)
+                       unsigned long address, bool flush)
 {
-       int i, j, reg;
+       int i, j;
        struct npu *npu;
        struct pnv_phb *nphb;
        struct pci_dev *npdev;
-       struct {
-               struct npu *npu;
-               int reg;
-       } mmio_atsd_reg[NV_MAX_NPUS];
+       struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
        unsigned long pid = npu_context->mm->context.id;
 
        /*
@@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 
                        if (va)
                                mmio_atsd_reg[i].reg =
-                                       mmio_invalidate_va(npu, address, pid);
+                                       mmio_invalidate_va(npu, address, pid,
+                                                       flush);
                        else
                                mmio_atsd_reg[i].reg =
-                                       mmio_invalidate_pid(npu, pid);
+                                       mmio_invalidate_pid(npu, pid, flush);
 
                        /*
                         * The NPU hardware forwards the shootdown to all GPUs
@@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
         */
        flush_tlb_mm(npu_context->mm);
 
-       /* Wait for all invalidations to complete */
-       for (i = 0; i <= max_npu2_index; i++) {
-               if (mmio_atsd_reg[i].reg < 0)
-                       continue;
-
-               /* Wait for completion */
-               npu = mmio_atsd_reg[i].npu;
-               reg = mmio_atsd_reg[i].reg;
-               while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
-                       cpu_relax();
-               put_mmio_atsd_reg(npu, reg);
-       }
+       mmio_invalidate_wait(mmio_atsd_reg, flush);
+       if (flush)
+               /* Wait for the flush to complete */
+               mmio_invalidate_wait(mmio_atsd_reg, false);
 }
 
 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
         * There should be no more translation requests for this PID, but we
         * need to ensure any entries for it are removed from the TLB.
         */
-       mmio_invalidate(npu_context, 0, 0);
+       mmio_invalidate(npu_context, 0, 0, true);
 }
 
 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
 {
        struct npu_context *npu_context = mn_to_npu_context(mn);
 
-       mmio_invalidate(npu_context, 1, address);
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
 {
        struct npu_context *npu_context = mn_to_npu_context(mn);
 
-       mmio_invalidate(npu_context, 1, address);
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
        struct npu_context *npu_context = mn_to_npu_context(mn);
        unsigned long address;
 
-       for (address = start; address <= end; address += PAGE_SIZE)
-               mmio_invalidate(npu_context, 1, address);
+       for (address = start; address < end; address += PAGE_SIZE)
+               mmio_invalidate(npu_context, 1, address, false);
+
+       /* Do the flush only on the final addess == end */
+       mmio_invalidate(npu_context, 1, address, true);
 }
 
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                /* No nvlink associated with this GPU device */
                return ERR_PTR(-ENODEV);
 
-       if (!mm) {
-               /* kernel thread contexts are not supported */
+       if (!mm || mm->context.id == 0) {
+               /*
+                * Kernel thread contexts are not supported and context id 0 is
+                * reserved on the GPU.
+                */
                return ERR_PTR(-EINVAL);
        }
 
index 9da243d94cc3286c5e1dabcfae5e563f991326a0..3b297fa3aa67c59be7fdb2fd2953f431adfbc1d4 100644 (file)
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
        ptr = asce.origin * 4096;
        if (asce.r) {
                *fake = 1;
+               ptr = 0;
                asce.dt = ASCE_TYPE_REGION1;
        }
        switch (asce.dt) {
        case ASCE_TYPE_REGION1:
-               if (vaddr.rfx01 > asce.tl && !asce.r)
+               if (vaddr.rfx01 > asce.tl && !*fake)
                        return PGM_REGION_FIRST_TRANS;
                break;
        case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
                union region1_table_entry rfte;
 
                if (*fake) {
-                       /* offset in 16EB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+                       ptr += (unsigned long) vaddr.rfx << 53;
                        rfte.val = ptr;
                        goto shadow_r2t;
                }
@@ -1036,8 +1036,7 @@ shadow_r2t:
                union region2_table_entry rste;
 
                if (*fake) {
-                       /* offset in 8PB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+                       ptr += (unsigned long) vaddr.rsx << 42;
                        rste.val = ptr;
                        goto shadow_r3t;
                }
@@ -1064,8 +1063,7 @@ shadow_r3t:
                union region3_table_entry rtte;
 
                if (*fake) {
-                       /* offset in 4TB guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+                       ptr += (unsigned long) vaddr.rtx << 31;
                        rtte.val = ptr;
                        goto shadow_sgt;
                }
@@ -1101,8 +1099,7 @@ shadow_sgt:
                union segment_table_entry ste;
 
                if (*fake) {
-                       /* offset in 2G guest memory block */
-                       ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+                       ptr += (unsigned long) vaddr.sx << 20;
                        ste.val = ptr;
                        goto shadow_pgt;
                }
index 05596261577937d65afcad75574bab676a7effe9..722d0e56886342a3a9f65d7f419d0e240a9cc6ab 100644 (file)
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
 
        bool perm_ok; /* do not check permissions if true */
        bool ud;        /* inject an #UD if host doesn't support insn */
+       bool tf;        /* TF value before instruction (after for syscall/sysret) */
 
        bool have_exception;
        struct x86_exception exception;
index 0816ab2e8adcae2b45f83c95c51e8b95a245b07e..80890dee66cebf370a3815e28f7bd7c34025b0d4 100644 (file)
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
                ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
        }
 
+       ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
        return X86EMUL_CONTINUE;
 }
 
index 87d3cb901935f2b251857f54ed53ca73567f10ef..0e846f0cb83bb214811d0a12d2f700cc96a455f9 100644 (file)
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
        ctxt->eflags = kvm_get_rflags(vcpu);
+       ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
        ctxt->eip = kvm_rip_read(vcpu);
        ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
                     (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
        return dr6;
 }
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
 {
        struct kvm_run *kvm_run = vcpu->run;
 
-       /*
-        * rflags is the old, "raw" value of the flags.  The new value has
-        * not been saved yet.
-        *
-        * This is correct even for TF set by the guest, because "the
-        * processor will not generate this exception after the instruction
-        * that sets the TF flag".
-        */
-       if (unlikely(rflags & X86_EFLAGS_TF)) {
-               if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-                       kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
-                                                 DR6_RTM;
-                       kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
-                       kvm_run->debug.arch.exception = DB_VECTOR;
-                       kvm_run->exit_reason = KVM_EXIT_DEBUG;
-                       *r = EMULATE_USER_EXIT;
-               } else {
-                       /*
-                        * "Certain debug exceptions may clear bit 0-3.  The
-                        * remaining contents of the DR6 register are never
-                        * cleared by the processor".
-                        */
-                       vcpu->arch.dr6 &= ~15;
-                       vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
-                       kvm_queue_exception(vcpu, DB_VECTOR);
-               }
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+               kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+               kvm_run->debug.arch.exception = DB_VECTOR;
+               kvm_run->exit_reason = KVM_EXIT_DEBUG;
+               *r = EMULATE_USER_EXIT;
+       } else {
+               /*
+                * "Certain debug exceptions may clear bit 0-3.  The
+                * remaining contents of the DR6 register are never
+                * cleared by the processor".
+                */
+               vcpu->arch.dr6 &= ~15;
+               vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+               kvm_queue_exception(vcpu, DB_VECTOR);
        }
 }
 
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
        int r = EMULATE_DONE;
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
-       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+
+       /*
+        * rflags is the old, "raw" value of the flags.  The new value has
+        * not been saved yet.
+        *
+        * This is correct even for TF set by the guest, because "the
+        * processor will not generate this exception after the instruction
+        * that sets the TF flag".
+        */
+       if (unlikely(rflags & X86_EFLAGS_TF))
+               kvm_vcpu_do_singlestep(vcpu, &r);
        return r == EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
                kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE)
-                       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+               if (r == EMULATE_DONE &&
+                   (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+                       kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
                    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
                        __kvm_set_rflags(vcpu, ctxt->eflags);
index 1f5b692526ae1a7199ee9bbaef305c4b0a42e696..0ded5e846335667406d58ce08e8439360baeb312 100644 (file)
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
                __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_inc(&q->shared_hctx_restart);
+       } else
+               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+}
+
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return false;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_dec(&q->shared_hctx_restart);
+       } else
+               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
+       if (blk_mq_hctx_has_pending(hctx)) {
+               blk_mq_run_hw_queue(hctx, true);
+               return true;
+       }
+
+       return false;
+}
+
 struct request *blk_mq_sched_get_request(struct request_queue *q,
                                         struct bio *bio,
                                         unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
        return true;
 }
 
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
-               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-               if (blk_mq_hctx_has_pending(hctx)) {
-                       blk_mq_run_hw_queue(hctx, true);
-                       return true;
-               }
-       }
-       return false;
-}
-
 /**
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * @pos:    loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
        unsigned int i, j;
 
        if (set->flags & BLK_MQ_F_TAG_SHARED) {
+               /*
+                * If this is 0, then we know that no hardware queues
+                * have RESTART marked. We're done.
+                */
+               if (!atomic_read(&queue->shared_hctx_restart))
+                       return;
+
                rcu_read_lock();
                list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
                                           tag_set_list) {
index edafb5383b7bbdedfd5365ed38f9a5c373ec96ab..5007edece51aced038d3db8f0adbc722c49e3d38 100644 (file)
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
        return false;
 }
 
-/*
- * Mark a hardware queue as needing a restart.
- */
-static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
        return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
index 121aa1dbb19215ba773f4704655962bdc601dfd1..07b0a03c46e6ac0b901d38c57529b953c325b6d0 100644 (file)
@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
        }
 }
 
+/*
+ * Caller needs to ensure that we're either frozen/quiesced, or that
+ * the queue isn't live yet.
+ */
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_inc(&q->shared_hctx_restart);
                        hctx->flags |= BLK_MQ_F_TAG_SHARED;
-               else
+               } else {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_dec(&q->shared_hctx_restart);
                        hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+               }
        }
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
+                                       bool shared)
 {
        struct request_queue *q;
 
index 3a10d7573477e7dea0139c5f885e9514a1886a7a..d53162997f32002828a6db353bcd1926f2a8dc97 100644 (file)
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
        adev->flags.coherent_dma = cca;
 }
 
+static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+{
+       bool *is_spi_i2c_slave_p = data;
+
+       if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+               return 1;
+
+       /*
+        * devices that are connected to UART still need to be enumerated to
+        * platform bus
+        */
+       if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+               *is_spi_i2c_slave_p = true;
+
+        /* no need to do more checking */
+       return -1;
+}
+
+static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+{
+       struct list_head resource_list;
+       bool is_spi_i2c_slave = false;
+
+       INIT_LIST_HEAD(&resource_list);
+       acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
+                              &is_spi_i2c_slave);
+       acpi_dev_free_resource_list(&resource_list);
+
+       return is_spi_i2c_slave;
+}
+
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
                             int type, unsigned long long sta)
 {
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        acpi_bus_get_flags(device);
        device->flags.match_driver = false;
        device->flags.initialized = true;
+       device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
        acpi_device_clear_enumerated(device);
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
        return AE_OK;
 }
 
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
-{
-       bool *is_spi_i2c_slave_p = data;
-
-       if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
-               return 1;
-
-       /*
-        * devices that are connected to UART still need to be enumerated to
-        * platform bus
-        */
-       if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
-               *is_spi_i2c_slave_p = true;
-
-        /* no need to do more checking */
-       return -1;
-}
-
 static void acpi_default_enumeration(struct acpi_device *device)
 {
-       struct list_head resource_list;
-       bool is_spi_i2c_slave = false;
-
        /*
         * Do not enumerate SPI/I2C slaves as they will be enumerated by their
         * respective parents.
         */
-       INIT_LIST_HEAD(&resource_list);
-       acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
-                              &is_spi_i2c_slave);
-       acpi_dev_free_resource_list(&resource_list);
-       if (!is_spi_i2c_slave) {
+       if (!device->flags.spi_i2c_slave) {
                acpi_create_platform_device(device, NULL);
                acpi_device_set_enumerated(device);
        } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
                return;
 
        device->flags.match_driver = true;
-       if (ret > 0) {
+       if (ret > 0 && !device->flags.spi_i2c_slave) {
                acpi_device_set_enumerated(device);
                goto ok;
        }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
        if (ret < 0)
                return;
 
-       if (device->pnp.type.platform_id)
-               acpi_default_enumeration(device);
-       else
+       if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
                acpi_device_set_enumerated(device);
+       else
+               acpi_default_enumeration(device);
 
  ok:
        list_for_each_entry(child, &device->children, node)
index 726c32e35db9c542e6f050ff0a04e31e10fc2b7d..0e824091a12fac8757c2ade5d4e5dae6a1470cbd 100644 (file)
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
        unsigned long timeout;
        int ret;
 
-       xen_blkif_get(blkif);
-
        set_freezable();
        while (!kthread_should_stop()) {
                if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
                print_stats(ring);
 
        ring->xenblkd = NULL;
-       xen_blkif_put(blkif);
 
        return 0;
 }
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 static void make_response(struct xen_blkif_ring *ring, u64 id,
                          unsigned short op, int st)
 {
-       struct blkif_response  resp;
+       struct blkif_response *resp;
        unsigned long     flags;
        union blkif_back_rings *blk_rings;
        int notify;
 
-       resp.id        = id;
-       resp.operation = op;
-       resp.status    = st;
-
        spin_lock_irqsave(&ring->blk_ring_lock, flags);
        blk_rings = &ring->blk_rings;
        /* Place on the response ring for the relevant domain. */
        switch (ring->blkif->blk_protocol) {
        case BLKIF_PROTOCOL_NATIVE:
-               memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->native,
+                                        blk_rings->native.rsp_prod_pvt);
                break;
        case BLKIF_PROTOCOL_X86_32:
-               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+                                        blk_rings->x86_32.rsp_prod_pvt);
                break;
        case BLKIF_PROTOCOL_X86_64:
-               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-                      &resp, sizeof(resp));
+               resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+                                        blk_rings->x86_64.rsp_prod_pvt);
                break;
        default:
                BUG();
        }
+
+       resp->id        = id;
+       resp->operation = op;
+       resp->status    = st;
+
        blk_rings->common.rsp_prod_pvt++;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
        spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
index dea61f6ab8cbdbaffedceb4c64bda239b51a63a4..ecb35fe8ca8dbb54f36a85513a09064819acd67a 100644 (file)
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
 struct blkif_common_request {
        char dummy;
 };
-struct blkif_common_response {
-       char dummy;
-};
+
+/* i386 protocol version */
 
 struct blkif_x86_32_request_rw {
        uint8_t        nr_segments;  /* number of segments                   */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
        } u;
 } __attribute__((__packed__));
 
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
-       uint64_t        id;              /* copied from request */
-       uint8_t         operation;       /* copied from request */
-       int16_t         status;          /* BLKIF_RSP_???       */
-};
-#pragma pack(pop)
 /* x86_64 protocol version */
 
 struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
        } u;
 } __attribute__((__packed__));
 
-struct blkif_x86_64_response {
-       uint64_t       __attribute__((__aligned__(8))) id;
-       uint8_t         operation;       /* copied from request */
-       int16_t         status;          /* BLKIF_RSP_???       */
-};
-
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-                 struct blkif_common_response);
+                 struct blkif_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-                 struct blkif_x86_32_response);
+                 struct blkif_response __packed);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-                 struct blkif_x86_64_response);
+                 struct blkif_response);
 
 union blkif_back_rings {
        struct blkif_back_ring        native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
 
        wait_queue_head_t       wq;
        atomic_t                inflight;
+       bool                    active;
        /* One thread per blkif ring. */
        struct task_struct      *xenblkd;
        unsigned int            waiting_reqs;
index 1f3dfaa54d871a36897408898c1e0e9f22100bb1..792da683e70dafafa6f69e224b8e57272d3e6be1 100644 (file)
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
                init_waitqueue_head(&ring->shutdown_wq);
                ring->blkif = blkif;
                ring->st_print = jiffies;
-               xen_blkif_get(blkif);
+               ring->active = true;
        }
 
        return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                struct xen_blkif_ring *ring = &blkif->rings[r];
                unsigned int i = 0;
 
+               if (!ring->active)
+                       continue;
+
                if (ring->xenblkd) {
                        kthread_stop(ring->xenblkd);
                        wake_up(&ring->shutdown_wq);
-                       ring->xenblkd = NULL;
                }
 
                /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                BUG_ON(ring->free_pages_num != 0);
                BUG_ON(ring->persistent_gnt_c != 0);
                WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-               xen_blkif_put(blkif);
+               ring->active = false;
        }
        blkif->nr_ring_pages = 0;
        /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-
-       xen_blkif_disconnect(blkif);
+       WARN_ON(xen_blkif_disconnect(blkif));
        xen_vbd_free(&blkif->vbd);
+       kfree(blkif->be->mode);
+       kfree(blkif->be);
 
        /* Make sure everything is drained before shutting down */
        kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
                xen_blkif_put(be->blkif);
        }
 
-       kfree(be->mode);
-       kfree(be);
        return 0;
 }
 
index e870f329db888c58e06bb854e7cf55d78a8bd313..01a260f67437488b425372c12d33142fce699f84 100644 (file)
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
                p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
                cp++; crng_init_cnt++; len--;
        }
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
        if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
                invalidate_batched_entropy();
                crng_init = 1;
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: fast init done\n");
        }
-       spin_unlock_irqrestore(&primary_crng.lock, flags);
        return 1;
 }
 
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
        }
        memzero_explicit(&buf, sizeof(buf));
        crng->init_time = jiffies;
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
        if (crng == &primary_crng && crng_init < 2) {
                invalidate_batched_entropy();
                crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: crng init done\n");
        }
-       spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
 static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 {
        u64 ret;
-       bool use_lock = crng_init < 2;
-       unsigned long flags;
+       bool use_lock = READ_ONCE(crng_init) < 2;
+       unsigned long flags = 0;
        struct batched_entropy *batch;
 
 #if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 {
        u32 ret;
-       bool use_lock = crng_init < 2;
-       unsigned long flags;
+       bool use_lock = READ_ONCE(crng_init) < 2;
+       unsigned long flags = 0;
        struct batched_entropy *batch;
 
        if (arch_get_random_int(&ret))
index 5104b63981390adb878ed27f4ca2d0d758c65307..c83ea68be792df45a354f38dee2438a866a1f29d 100644 (file)
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
        u32 set;
 
        if (!of_device_is_compatible(mvchip->chip.of_node,
-                                    "marvell,armada-370-xp-gpio"))
+                                    "marvell,armada-370-gpio"))
                return 0;
 
        if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
                .data       = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
        },
        {
-               .compatible = "marvell,armada-370-xp-gpio",
+               .compatible = "marvell,armada-370-gpio",
                .data       = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
        },
        {
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                                                 mvchip);
        }
 
-       /* Armada 370/XP has simple PWM support for GPIO lines */
+       /* Some MVEBU SoCs have simple PWM support for GPIO lines */
        if (IS_ENABLED(CONFIG_PWM))
                return mvebu_pwm_probe(pdev, mvchip, id);
 
index 1cf78f4dd339f93ddd971088ec42a5146b9820fe..1e8e1123ddf416f18176cbc6e82fa791b3df9fb5 100644 (file)
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
                        DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
                                 adev->clock.default_dispclk / 100);
                        adev->clock.default_dispclk = 60000;
+               } else if (adev->clock.default_dispclk <= 60000) {
+                       DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+                                adev->clock.default_dispclk / 100);
+                       adev->clock.default_dispclk = 62500;
                }
                adev->clock.dp_extclk =
                        le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
index f2d705e6a75aa4f092d3d98ff739927e15b6f26b..ab6b0d0febab810ba4941e5a527435dd5d3161d8 100644 (file)
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        /* Vega 10 */
        {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
index 8c9bc75a9c2db63288f2c6765b02f68f63875194..8a0818b23ea40fadde57f6b469b2153330b710ab 100644 (file)
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-       ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+       ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
        memset(&args, 0, sizeof(args));
 
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 {
        int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-       ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+       ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
        memset(&args, 0, sizeof(args));
 
index 9f847615ac74ab012f6203a141627a5c6f5993e2..48ca2457df8c964977f3f7edae0980bc227b97cf 100644 (file)
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        if (!connector)
                return -ENOENT;
 
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       encoder = drm_connector_get_encoder(connector);
-       if (encoder)
-               out_resp->encoder_id = encoder->base.id;
-       else
-               out_resp->encoder_id = 0;
-
-       ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
-                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-                       &out_resp->count_props);
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-       if (ret)
-               goto out_unref;
-
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
                if (connector->encoder_ids[i] != 0)
                        encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
                                if (put_user(connector->encoder_ids[i],
                                             encoder_ptr + copied)) {
                                        ret = -EFAULT;
-                                       goto out_unref;
+                                       goto out;
                                }
                                copied++;
                        }
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
                        if (copy_to_user(mode_ptr + copied,
                                         &u_mode, sizeof(u_mode))) {
                                ret = -EFAULT;
+                               mutex_unlock(&dev->mode_config.mutex);
+
                                goto out;
                        }
                        copied++;
                }
        }
        out_resp->count_modes = mode_count;
-out:
        mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       encoder = drm_connector_get_encoder(connector);
+       if (encoder)
+               out_resp->encoder_id = encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /* Only grab properties after probing, to make sure EDID and other
+        * properties reflect the latest status. */
+       ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+                       (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+                       (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+                       &out_resp->count_props);
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
        drm_connector_put(connector);
 
        return ret;
index 462031cbd77f714b23a3b7645039c0d8dba71f40..615f0a855222f630d07311c92dce17d3bd371298 100644 (file)
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        unsigned int max_segment;
+       gfp_t noreclaim;
        int ret;
-       gfp_t gfp;
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
         * Fail silently without starting the shrinker
         */
        mapping = obj->base.filp->f_mapping;
-       gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       noreclaim = mapping_gfp_constraint(mapping,
+                                          ~(__GFP_IO | __GFP_RECLAIM));
+       noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
-               page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               if (unlikely(IS_ERR(page))) {
-                       i915_gem_shrink(dev_priv,
-                                       page_count,
-                                       I915_SHRINK_BOUND |
-                                       I915_SHRINK_UNBOUND |
-                                       I915_SHRINK_PURGEABLE);
+               const unsigned int shrink[] = {
+                       I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+                       0,
+               }, *s = shrink;
+               gfp_t gfp = noreclaim;
+
+               do {
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               }
-               if (unlikely(IS_ERR(page))) {
-                       gfp_t reclaim;
+                       if (likely(!IS_ERR(page)))
+                               break;
+
+                       if (!*s) {
+                               ret = PTR_ERR(page);
+                               goto err_sg;
+                       }
+
+                       i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+                       cond_resched();
 
                        /* We've tried hard to allocate the memory by reaping
                         * our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
                         * defer the oom here by reporting the ENOMEM back
                         * to userspace.
                         */
-                       reclaim = mapping_gfp_mask(mapping);
-                       reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
-
-                       page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
-                       if (IS_ERR(page)) {
-                               ret = PTR_ERR(page);
-                               goto err_sg;
+                       if (!*s) {
+                               /* reclaim and warn, but no oom */
+                               gfp = mapping_gfp_mask(mapping);
+
+                               /* Our bo are always dirty and so we require
+                                * kswapd to reclaim our pages (direct reclaim
+                                * does not effectively begin pageout of our
+                                * buffers on its own). However, direct reclaim
+                                * only waits for kswapd when under allocation
+                                * congestion. So as a result __GFP_RECLAIM is
+                                * unreliable and fails to actually reclaim our
+                                * dirty pages -- unless you try over and over
+                                * again with !__GFP_NORETRY. However, we still
+                                * want to fail this allocation rather than
+                                * trigger the out-of-memory killer and for
+                                * this we want the future __GFP_MAYFAIL.
+                                */
                        }
-               }
+               } while (1);
+
                if (!i ||
                    sg->length >= max_segment ||
                    page_to_pfn(page) != last_pfn + 1) {
@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 
        mapping = obj->base.filp->f_mapping;
        mapping_set_gfp_mask(mapping, mask);
+       GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
        i915_gem_object_init(obj, &i915_gem_object_ops);
 
index 5ddbc94997751adf5c9f04f7dd4a37a74d70de24..a74d0ac737cbeb7f9b9c5e93ea712a396e3c09d5 100644 (file)
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       req->head = req->ring->tail;
+       req->head = req->ring->emit;
 
        /* Check that we didn't interrupt ourselves with a new request */
        GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
index 1642fff9cf135d5edbe85864d1b327d59002c026..ab5140ba108ddcb2c9c5382cc6439223704f9fda 100644 (file)
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
        GEM_BUG_ON(freespace < wqi_size);
 
        /* The GuC firmware wants the tail index in QWords, not bytes */
-       tail = rq->tail;
-       assert_ring_tail_valid(rq->ring, rq->tail);
-       tail >>= 3;
+       tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
        GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
 
        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
index 96b0b01677e26b22f382868f4b8b4c6dd738a4b3..9106ea32b048cac4783ae316d7cc198a4bf8ae88 100644 (file)
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 static void skylake_pfit_enable(struct intel_crtc *crtc);
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
-static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                        struct drm_modeset_acquire_ctx *ctx);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 
 struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
        struct drm_crtc *crtc;
        int i, ret;
 
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, ctx);
        i915_redisable_vga(to_i915(dev));
 
        if (!state)
@@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
                intel_update_watermarks(intel_crtc);
 }
 
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+                                       struct drm_modeset_acquire_ctx *ctx)
 {
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
                return;
        }
 
-       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+       state->acquire_ctx = ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
        crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
        intel_setup_outputs(dev_priv);
 
        drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
        drm_modeset_unlock_all(dev);
 
        for_each_intel_crtc(dev, crtc) {
@@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
        return 0;
 }
 
-static void intel_enable_pipe_a(struct drm_device *dev)
+static void intel_enable_pipe_a(struct drm_device *dev,
+                               struct drm_modeset_acquire_ctx *ctx)
 {
        struct intel_connector *connector;
        struct drm_connector_list_iter conn_iter;
        struct drm_connector *crt = NULL;
        struct intel_load_detect_pipe load_detect_temp;
-       struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
        int ret;
 
        /* We can't just switch on the pipe A, we need to set things up with a
@@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
                (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
 }
 
-static void intel_sanitize_crtc(struct intel_crtc *crtc)
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+                               struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                plane = crtc->plane;
                crtc->base.primary->state->visible = true;
                crtc->plane = !plane;
-               intel_crtc_disable_noatomic(&crtc->base);
+               intel_crtc_disable_noatomic(&crtc->base, ctx);
                crtc->plane = plane;
        }
 
@@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * resume. Force-enable the pipe to fix this, the update_dpms
                 * call below we restore the pipe to the right state, but leave
                 * the required bits on. */
-               intel_enable_pipe_a(dev);
+               intel_enable_pipe_a(dev, ctx);
        }
 
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
        if (crtc->active && !intel_crtc_has_encoders(crtc))
-               intel_crtc_disable_noatomic(&crtc->base);
+               intel_crtc_disable_noatomic(&crtc->base, ctx);
 
        if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
                /*
@@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
  * and sanitizes it to the current state
  */
 static void
-intel_modeset_setup_hw_state(struct drm_device *dev)
+intel_modeset_setup_hw_state(struct drm_device *dev,
+                            struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
@@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
        for_each_pipe(dev_priv, pipe) {
                crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-               intel_sanitize_crtc(crtc);
+               intel_sanitize_crtc(crtc, ctx);
                intel_dump_pipe_config(crtc, crtc->config,
                                       "[setup_hw_state]");
        }
index 6532e226db29b63da766a8571de231de4f7261f6..40ba3134545ef7e339c5bfe347501eeb7715dac0 100644 (file)
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
        struct intel_panel *panel = &connector->panel;
 
-       intel_dp_aux_enable_backlight(connector);
-
        if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
                panel->backlight.max = 0xFFFF;
        else
index dac4e003c1f317ec402110132bad0c3a734bf52a..62f44d3e7c43c0d90df093050d5af6d3d68fe3a3 100644 (file)
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
                rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
        u32 *reg_state = ce->lrc_reg_state;
 
-       assert_ring_tail_valid(rq->ring, rq->tail);
-       reg_state[CTX_RING_TAIL+1] = rq->tail;
+       reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
 
        /* True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
                        ce->state->obj->mm.dirty = true;
                        i915_gem_object_unpin_map(ce->state->obj);
 
-                       ce->ring->head = ce->ring->tail = 0;
-                       intel_ring_update_space(ce->ring);
+                       intel_ring_reset(ce->ring, 0);
                }
        }
 }
index 66a2b8b83972691d04f2737337e7ea6cf6a72851..513a0f4b469b32c9d0ac2e87c089bb6f2e4907ba 100644 (file)
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
 
 void intel_ring_update_space(struct intel_ring *ring)
 {
-       ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+       ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
 }
 
 static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 
        i915_gem_request_submit(request);
 
-       assert_ring_tail_valid(request->ring, request->tail);
-       I915_WRITE_TAIL(request->engine, request->tail);
+       I915_WRITE_TAIL(request->engine,
+                       intel_ring_set_tail(request->ring, request->tail));
 }
 
 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
        return PTR_ERR(addr);
 }
 
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+       GEM_BUG_ON(!list_empty(&ring->request_list));
+       ring->tail = tail;
+       ring->head = tail;
+       ring->emit = tail;
+       intel_ring_update_space(ring);
+}
+
 void intel_ring_unpin(struct intel_ring *ring)
 {
        GEM_BUG_ON(!ring->vma);
        GEM_BUG_ON(!ring->vaddr);
 
+       /* Discard any unused bytes beyond that submitted to hw. */
+       intel_ring_reset(ring, ring->tail);
+
        if (i915_vma_is_map_and_fenceable(ring->vma))
                i915_vma_unpin_iomap(ring->vma);
        else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
+       /* Restart from the beginning of the rings for convenience */
        for_each_engine(engine, dev_priv, id)
-               engine->buffer->head = engine->buffer->tail;
+               intel_ring_reset(engine->buffer, 0);
 }
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
                unsigned space;
 
                /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ring->tail,
+               space = __intel_ring_space(target->postfix, ring->emit,
                                           ring->size);
                if (space >= bytes)
                        break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
        struct intel_ring *ring = req->ring;
-       int remain_actual = ring->size - ring->tail;
-       int remain_usable = ring->effective_size - ring->tail;
+       int remain_actual = ring->size - ring->emit;
+       int remain_usable = ring->effective_size - ring->emit;
        int bytes = num_dwords * sizeof(u32);
        int total_bytes, wait_bytes;
        bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 
        if (unlikely(need_wrap)) {
                GEM_BUG_ON(remain_actual > ring->space);
-               GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+               GEM_BUG_ON(ring->emit + remain_actual > ring->size);
 
                /* Fill the tail with MI_NOOP */
-               memset(ring->vaddr + ring->tail, 0, remain_actual);
-               ring->tail = 0;
+               memset(ring->vaddr + ring->emit, 0, remain_actual);
+               ring->emit = 0;
                ring->space -= remain_actual;
        }
 
-       GEM_BUG_ON(ring->tail > ring->size - bytes);
-       cs = ring->vaddr + ring->tail;
-       ring->tail += bytes;
+       GEM_BUG_ON(ring->emit > ring->size - bytes);
+       cs = ring->vaddr + ring->emit;
+       ring->emit += bytes;
        ring->space -= bytes;
        GEM_BUG_ON(ring->space < 0);
 
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
        int num_dwords =
-               (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+               (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        u32 *cs;
 
        if (num_dwords == 0)
index a82a0807f64dbd0624728fe3c65215abe3647565..f7144fe0961347826c62e620af879bb4db9f0d77 100644 (file)
@@ -145,6 +145,7 @@ struct intel_ring {
 
        u32 head;
        u32 tail;
+       u32 emit;
 
        int space;
        int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+void intel_ring_update_space(struct intel_ring *ring);
 void intel_ring_unpin(struct intel_ring *ring);
 void intel_ring_free(struct intel_ring *ring);
 
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
         * reserved for the command packet (i.e. the value passed to
         * intel_ring_begin()).
         */
-       GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+       GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
 }
 
 static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
        GEM_BUG_ON(tail >= ring->size);
 }
 
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+       /* Whilst writes to the tail are strictly order, there is no
+        * serialisation between readers and the writers. The tail may be
+        * read by i915_gem_request_retire() just as it is being updated
+        * by execlists, as although the breadcrumb is complete, the context
+        * switch hasn't been seen.
+        */
+       assert_ring_tail_valid(ring, tail);
+       ring->tail = tail;
+       return tail;
+}
 
 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
index 432480ff9d228857d57170b3353c143bf0501c3f..3178ba0c537c1915af3b857aad83efd6371f17ad 100644 (file)
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
            rdev->pdev->subsystem_vendor == 0x103c &&
            rdev->pdev->subsystem_device == 0x280a)
                return;
+       /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+        * - it hangs on resume inside the dynclk 1 table.
+        */
+       if (rdev->family == CHIP_RS400 &&
+           rdev->pdev->subsystem_vendor == 0x1179 &&
+           rdev->pdev->subsystem_device == 0xff31)
+               return;
 
        /* DYN CLK 1 */
        table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
index 6ecf42783d4b0c45539325edd2e5ffd2fc08e29f..0a6444d72000c434a6b494229a75c1a7d3e41631 100644 (file)
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
         */
        { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+       /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+        * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+        */
+       { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
        /* macbook pro 8.2 */
        { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
        { 0, 0, 0, 0, 0 },
index 8ca1e8ce0af24e325957526c125ccf55d9081eb8..4f9a3938189a020ca6257b85182e6c1b53426158 100644 (file)
 #define USB_VENDOR_ID_DELCOM           0x0fc5
 #define USB_DEVICE_ID_DELCOM_VISUAL_IND        0xb080
 
+#define USB_VENDOR_ID_DELL                             0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE    0x301a
+
 #define USB_VENDOR_ID_DELORME          0x1163
 #define USB_DEVICE_ID_DELORME_EARTHMATE        0x0100
 #define USB_DEVICE_ID_DELORME_EM_LT20  0x0200
index 1d6c997b300149269367d00fb5db66b7c2ea25b7..20b40ad2632503754685b84cc07d8787a4a44515 100644 (file)
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
-               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
+               __set_bit(BTN_TOOL_FINGER, input->keybit);
+               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+               __set_bit(BTN_TOUCH, input->keybit);
+               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
-       __set_bit(BTN_TOOL_FINGER, input->keybit);
-       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-       __set_bit(BTN_TOUCH, input->keybit);
-       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index 6316498b78128574ff63b0047772f009e6d0cfeb..a88e7c7bea0a0cb7d069c262c026231f3bc75dea 100644 (file)
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
index 95ed17183e73e904e06b13c383bee28410797172..54a47b40546f69c7ea0d3dbf033c22c95f106516 100644 (file)
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
                 * the first read operation, otherwise the first read cost
                 * one extra clock cycle.
                 */
-               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                temp |= I2CR_MTX;
-               writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        }
        msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
 
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
                                 * the first read operation, otherwise the first read cost
                                 * one extra clock cycle.
                                 */
-                               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+                               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                                temp |= I2CR_MTX;
-                               writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+                               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
                        }
                } else if (i == (msgs->len - 2)) {
                        dev_dbg(&i2c_imx->adapter.dev,
index 7910bfe50da4469c44b571363cc6696f74f5fa42..93b18108816813bd4d49e0ba3e6eb57be7ae3d9f 100644 (file)
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
 {
        struct bio *bio;
-       spin_lock_irq(&ic->endio_wait.lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ic->endio_wait.lock, flags);
        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
        bio_list_add(&ic->flush_bio_list, bio);
-       spin_unlock_irq(&ic->endio_wait.lock);
+       spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+
        queue_work(ic->commit_wq, &ic->commit_work);
 }
 
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                ti->error = "The device is too small";
                goto bad;
        }
+       if (ti->len > ic->provided_data_sectors) {
+               r = -EINVAL;
+               ti->error = "Not enough provided sectors for requested mapping size";
+               goto bad;
+       }
 
        if (!buffer_sectors)
                buffer_sectors = 1;
index 3702e502466d37a902c64a74a1f5ad7b516770bb..8d5ca30f655123611b5dcd2ba3d35b52e9c6c447 100644 (file)
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
        else if (op == REQ_OP_WRITE_SAME)
                special_cmd_max_sectors = q->limits.max_write_same_sectors;
        if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
-            op == REQ_OP_WRITE_SAME)  &&
-           special_cmd_max_sectors == 0) {
+            op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
+               atomic_inc(&io->count);
                dec_count(io, region, -EOPNOTSUPP);
                return;
        }
index e61c45047c25a9ba2683c313fbc2151c9051b178..4da8858856fb3019c16d5681c241f8733dca21ec 100644 (file)
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
 
 struct dm_raid1_bio_record {
        struct mirror *m;
+       /* if details->bi_bdev == NULL, details were not saved */
        struct dm_bio_details details;
        region_t write_region;
 };
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        struct dm_raid1_bio_record *bio_record =
          dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
 
+       bio_record->details.bi_bdev = NULL;
+
        if (rw == WRITE) {
                /* Save region for mirror_end_io() handler */
                bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
        }
 
        if (error == -EOPNOTSUPP)
-               return error;
+               goto out;
 
        if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+               goto out;
 
        if (unlikely(error)) {
+               if (!bio_record->details.bi_bdev) {
+                       /*
+                        * There wasn't enough memory to record necessary
+                        * information for a retry or there was no other
+                        * mirror in-sync.
+                        */
+                       DMERR_LIMIT("Mirror read failed.");
+                       return -EIO;
+               }
+
                m = bio_record->m;
 
                DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
+                       bio_record->details.bi_bdev = NULL;
                        bio->bi_error = 0;
 
                        queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                DMERR("All replicated volumes dead, failing I/O");
        }
 
+out:
+       bio_record->details.bi_bdev = NULL;
+
        return error;
 }
 
index 75488e65cd96cf6484e300d47280df77f1eec77a..8d46e3ad9529d46a2b2e27229668e517ef38552f 100644 (file)
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
        int ret;
 
        ret = regmap_read_poll_timeout(arizona->regmap,
-                                      ARIZONA_INTERRUPT_RAW_STATUS_5, val,
-                                      ((val & mask) == target),
+                                      reg, val, ((val & mask) == target),
                                       ARIZONA_REG_POLL_DELAY_US,
                                       timeout_ms * 1000);
        if (ret)
index ea1bfcf1870afbc5806e61dd6416669216f38ce7..53309f659951042ca6301c95e110910414031e70 100644 (file)
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
 {
        int err;
 
+       mutex_lock(&uld_mutex);
        err = setup_sge_queues(adap);
        if (err)
-               goto out;
+               goto rel_lock;
        err = setup_rss(adap);
        if (err)
                goto freeq;
@@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
                        goto irq_err;
        }
 
-       mutex_lock(&uld_mutex);
        enable_rx(adap);
        t4_sge_start(adap);
        t4_intr_enable(adap);
@@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
 #endif
        /* Initialize hash mac addr list*/
        INIT_LIST_HEAD(&adap->mac_hlist);
- out:
        return err;
+
  irq_err:
        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
  freeq:
        t4_free_sge_resources(adap);
-       goto out;
+ rel_lock:
+       mutex_unlock(&uld_mutex);
+       return err;
 }
 
 static void cxgb_down(struct adapter *adapter)
index 9a520e4f0df9a0d47b75f71f01557414ba3d4eab..290ad0563320d6bd9f59212d0cd70b765336441d 100644 (file)
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
 
        /* device used for DMA mapping */
-       arch_setup_dma_ops(dev, 0, 0, NULL, false);
+       set_dma_ops(dev, get_dma_ops(&pdev->dev));
        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
        if (err) {
                dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
index 0b31f8502adae2e86c292fb99437ee943c0794bf..6e67d22fd0d54f69e5ee3358717e7ed539fad952 100644 (file)
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
                goto no_mem;
        }
 
+       set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
+
        ret = platform_device_add_data(pdev, &data, sizeof(data));
        if (ret)
                goto err;
index b8fab149690f880394f0d973560aecca69d1171e..e95795b3c84160c6dd567c3a725d66f7886fe6fe 100644 (file)
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
 
                /* Force 1000M Link, Default is 0x0200 */
                phy_write(phy_dev, 7, 0x20C);
-               phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
 
-               /* Enable PHY loop-back */
+               /* Powerup Fiber */
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+               val = phy_read(phy_dev, COPPER_CONTROL_REG);
+               val &= ~PHY_POWER_DOWN;
+               phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
+               /* Enable Phy Loopback */
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
                val = phy_read(phy_dev, COPPER_CONTROL_REG);
                val |= PHY_LOOP_BACK;
                val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
                phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
                phy_write(phy_dev, 1, 0x400);
                phy_write(phy_dev, 7, 0x200);
+
+               phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+               val = phy_read(phy_dev, COPPER_CONTROL_REG);
+               val |= PHY_POWER_DOWN;
+               phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
                phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
                phy_write(phy_dev, 9, 0xF00);
 
index 8209affa75c3e5e0419634740d6985970f61fa67..16486dff14933807d47b2a99f970ffbe3996b3a2 100644 (file)
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
                                 SOF_TIMESTAMPING_RX_HARDWARE |
                                 SOF_TIMESTAMPING_RAW_HARDWARE;
 
-       info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
-                        (BIT(1) << HWTSTAMP_TX_ON);
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+                        BIT(HWTSTAMP_TX_ON);
 
-       info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
-                          (BIT(1) << HWTSTAMP_FILTER_ALL);
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_ALL);
 
        return 0;
 }
index 41cd22a223dccbd9dae460331525a1fc2414be9e..277f4de303751d6328a6bcf3282f28e6d5565e5d 100644 (file)
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
        return netdev;
 
 err_cleanup_nic:
-       profile->cleanup(priv);
+       if (profile->cleanup)
+               profile->cleanup(priv);
        free_netdev(netdev);
 
        return NULL;
index 79462c0368a0781ca7a38354726ed628097c0c89..46984a52a94bb7fad172704b177bdf1c81ef27b8 100644 (file)
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
        params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        params->num_tc                = 1;
        params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+       mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
 
 static void mlx5e_build_rep_netdev(struct net_device *netdev)
index ec63158ab64330c939ffa4ca8c001f8134f8e4e2..9df9fc0d26f5b89457b1406198fa5c51d50536b3 100644 (file)
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
        {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_source[4])},
        {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE,  2, offsetof(struct pedit_headers, eth.h_proto)},
 
-       {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
        {MLX5_ACTION_IN_FIELD_OUT_IP_TTL,  1, offsetof(struct pedit_headers, ip4.ttl)},
        {MLX5_ACTION_IN_FIELD_OUT_SIPV4,   4, offsetof(struct pedit_headers, ip4.saddr)},
        {MLX5_ACTION_IN_FIELD_OUT_DIPV4,   4, offsetof(struct pedit_headers, ip4.daddr)},
index f991f669047e5df2531f043c3934d248bf3099b8..a53e982a68634b5129324fb5af58776804fc5853 100644 (file)
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
        return 0;
 }
 
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int mlx5_devlink_eswitch_check(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev;
-       u16 cur_mlx5_mode, mlx5_mode = 0;
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
 
-       dev = devlink_priv(devlink);
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -EOPNOTSUPP;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
                return -EOPNOTSUPP;
 
-       cur_mlx5_mode = dev->priv.eswitch->mode;
-
-       if (cur_mlx5_mode == SRIOV_NONE)
+       if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
+       return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       u16 cur_mlx5_mode, mlx5_mode = 0;
+       int err;
+
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
+
+       cur_mlx5_mode = dev->priv.eswitch->mode;
+
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
 
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
-       struct mlx5_core_dev *dev;
-
-       dev = devlink_priv(devlink);
-
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       int err;
 
-       if (dev->priv.eswitch->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
-       int num_vports = esw->enabled_vports;
        int err, vport;
        u8 mlx5_mode;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
        case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        if (err)
                goto out;
 
-       for (vport = 1; vport < num_vports; vport++) {
+       for (vport = 1; vport < esw->enabled_vports; vport++) {
                err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
                if (err) {
                        esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
+       int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
            (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
+       int err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
-
-       if (esw->mode == SRIOV_NONE)
-               return -EOPNOTSUPP;
+       err = mlx5_devlink_eswitch_check(devlink);
+       if (err)
+               return err;
 
        *encap = esw->offloads.encap;
        return 0;
index 4f577a5abf884645910203aace25ad9605e171d8..13be264587f135737887688005ed7ba872b54157 100644 (file)
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
        },
 };
 
-#define FW_INIT_TIMEOUT_MILI   2000
-#define FW_INIT_WAIT_MS                2
+#define FW_INIT_TIMEOUT_MILI           2000
+#define FW_INIT_WAIT_MS                        2
+#define FW_PRE_INIT_TIMEOUT_MILI       10000
 
 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
 {
@@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
         */
        dev->state = MLX5_DEVICE_STATE_UP;
 
+       /* wait for firmware to accept initialization segments configurations
+        */
+       err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+       if (err) {
+               dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+                       FW_PRE_INIT_TIMEOUT_MILI);
+               goto out;
+       }
+
        err = mlx5_cmd_init(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
index b7e4345c990d55454f52cbcb9c7c4d5df66ef176..019cef1d3cf72ce2d34b3a36283cabf9227c9879 100644 (file)
@@ -661,8 +661,6 @@ restore_filters:
                up_write(&vf->efx->filter_sem);
                mutex_unlock(&vf->efx->mac_lock);
 
-               up_write(&vf->efx->filter_sem);
-
                rc2 = efx_net_open(vf->efx->net_dev);
                if (rc2)
                        goto reset_nic;
index d16d11bfc046467c41edab070bd3015bd932d338..6e4cbc6ce0efd9843a55341ec47eb76fd9932327 100644 (file)
@@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_skbuff_dma[first_entry].buf = des;
        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
-       tx_q->tx_skbuff[first_entry] = skb;
 
        first->des0 = cpu_to_le32(des);
 
@@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
 
+       /* Only the last descriptor gets to point to the skb. */
+       tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+
+       /* We've used all descriptors we need for this skb, however,
+        * advance cur_tx so that it references a fresh descriptor.
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
 
        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        first = desc;
 
-       tx_q->tx_skbuff[first_entry] = skb;
-
        enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
        if (enh_desc)
@@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                                                skb->len);
        }
 
-       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+       /* Only the last descriptor gets to point to the skb. */
+       tx_q->tx_skbuff[entry] = skb;
 
+       /* We've used all descriptors we need for this skb, however,
+        * advance cur_tx so that it references a fresh descriptor.
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+       entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
        tx_q->cur_tx = entry;
 
        if (netif_msg_pktdata(priv)) {
index c7c1e9906500fd5be3d386ea4c43848caaff6ac1..d231042f19d6462018bc10fee0c789196c51abf9 100644 (file)
@@ -442,7 +442,7 @@ struct brcmf_fw {
        const char *nvram_name;
        u16 domain_nr;
        u16 bus_nr;
-       void (*done)(struct device *dev, const struct firmware *fw,
+       void (*done)(struct device *dev, int err, const struct firmware *fw,
                     void *nvram_image, u32 nvram_len);
 };
 
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
        if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
                goto fail;
 
-       fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+       fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
        kfree(fwctx);
        return;
 
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
        release_firmware(fwctx->code);
-       device_release_driver(fwctx->dev);
+       fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
        kfree(fwctx);
 }
 
 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
 {
        struct brcmf_fw *fwctx = ctx;
-       int ret;
+       int ret = 0;
 
        brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-       if (!fw)
+       if (!fw) {
+               ret = -ENOENT;
                goto fail;
-
-       /* only requested code so done here */
-       if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
-               fwctx->done(fwctx->dev, fw, NULL, 0);
-               kfree(fwctx);
-               return;
        }
+       /* only requested code so done here */
+       if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+               goto done;
+
        fwctx->code = fw;
        ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
                                      fwctx->dev, GFP_KERNEL, fwctx,
                                      brcmf_fw_request_nvram_done);
 
-       if (!ret)
-               return;
-
-       brcmf_fw_request_nvram_done(NULL, fwctx);
+       /* pass NULL to nvram callback for bcm47xx fallback */
+       if (ret)
+               brcmf_fw_request_nvram_done(NULL, fwctx);
        return;
 
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
-       device_release_driver(fwctx->dev);
+done:
+       fwctx->done(fwctx->dev, ret, fw, NULL, 0);
        kfree(fwctx);
 }
 
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
                                const char *code, const char *nvram,
-                               void (*fw_cb)(struct device *dev,
+                               void (*fw_cb)(struct device *dev, int err,
                                              const struct firmware *fw,
                                              void *nvram_image, u32 nvram_len),
                                u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
                           const char *code, const char *nvram,
-                          void (*fw_cb)(struct device *dev,
+                          void (*fw_cb)(struct device *dev, int err,
                                         const struct firmware *fw,
                                         void *nvram_image, u32 nvram_len))
 {
index d3c9f0d52ae3326eb18a76aaf735bb602a211a35..8fa4b7e1ab3db71c2522aa9a275ac9e6393eb0a7 100644 (file)
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
  */
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
                                const char *code, const char *nvram,
-                               void (*fw_cb)(struct device *dev,
+                               void (*fw_cb)(struct device *dev, int err,
                                              const struct firmware *fw,
                                              void *nvram_image, u32 nvram_len),
                                u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
                           const char *code, const char *nvram,
-                          void (*fw_cb)(struct device *dev,
+                          void (*fw_cb)(struct device *dev, int err,
                                         const struct firmware *fw,
                                         void *nvram_image, u32 nvram_len));
 
index 72373e59308e8fe54cf14090efd135a045273f4a..f59642b2c935a503f36600f4573861a7597ad267 100644 (file)
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
        struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        struct brcmf_fws_mac_descriptor *entry;
 
-       if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
+       if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
                return;
 
        entry = &fws->desc.iface[ifp->ifidx];
index f36b96dc6acdfc2160ba35e187a46356a3b41c28..f878706613e679515410e573eb770f1b1a28ef26 100644 (file)
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
        .write32 = brcmf_pcie_buscore_write32,
 };
 
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+                            const struct firmware *fw,
                             void *nvram, u32 nvram_len)
 {
-       struct brcmf_bus *bus = dev_get_drvdata(dev);
-       struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
-       struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+       struct brcmf_bus *bus;
+       struct brcmf_pciedev *pcie_bus_dev;
+       struct brcmf_pciedev_info *devinfo;
        struct brcmf_commonring **flowrings;
-       int ret;
        u32 i;
 
+       /* check firmware loading result */
+       if (ret)
+               goto fail;
+
+       bus = dev_get_drvdata(dev);
+       pcie_bus_dev = bus->bus_priv.pcie;
+       devinfo = pcie_bus_dev->devinfo;
        brcmf_pcie_attach(devinfo);
 
        /* Some of the firmwares have the size of the memory of the device
index e03450059b06c0bfe510148f985c19668bcd3dff..5653d6dd38f6fe5c5132f2d7940facd31bef6549 100644 (file)
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
        .get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
                                         const struct firmware *code,
                                         void *nvram, u32 nvram_len)
 {
-       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-       struct brcmf_sdio *bus = sdiodev->bus;
-       int err = 0;
+       struct brcmf_bus *bus_if;
+       struct brcmf_sdio_dev *sdiodev;
+       struct brcmf_sdio *bus;
        u8 saveclk;
 
-       brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+       brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+       bus_if = dev_get_drvdata(dev);
+       sdiodev = bus_if->bus_priv.sdio;
+       if (err)
+               goto fail;
 
        if (!bus_if->drvr)
                return;
 
+       bus = sdiodev->bus;
+
        /* try to download image and nvram to the dongle */
        bus->alp_only = true;
        err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ release:
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
        device_release_driver(dev);
+       device_release_driver(&sdiodev->func[2]->dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
index e4d545f9edeef6f119a0b67f50db679bedfcd91f..0eea48e73331d57297099266b1725df2be35a565 100644 (file)
@@ -1159,17 +1159,18 @@ fail:
        return ret;
 }
 
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
                                   const struct firmware *fw,
                                   void *nvram, u32 nvlen)
 {
        struct brcmf_bus *bus = dev_get_drvdata(dev);
-       struct brcmf_usbdev_info *devinfo;
-       int ret;
+       struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+       if (ret)
+               goto error;
 
        brcmf_dbg(USB, "Start fw downloading\n");
 
-       devinfo = bus->bus_priv.usb->devinfo;
        ret = check_file(fw->data);
        if (ret < 0) {
                brcmf_err("invalid firmware\n");
index 1482d132fbb879ab39ec62dadcd48e9445feced9..e432ec887479d32b2be5afcc1a26e295909b04cb 100644 (file)
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
        .flags        = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void amd_gpio_irq_handler(struct irq_desc *desc)
+#define PIN_IRQ_PENDING        (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
+
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
 {
-       u32 i;
-       u32 off;
-       u32 reg;
-       u32 pin_reg;
-       u64 reg64;
-       int handled = 0;
-       unsigned int irq;
+       struct amd_gpio *gpio_dev = dev_id;
+       struct gpio_chip *gc = &gpio_dev->gc;
+       irqreturn_t ret = IRQ_NONE;
+       unsigned int i, irqnr;
        unsigned long flags;
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-       struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-       struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+       u32 *regs, regval;
+       u64 status, mask;
 
-       chained_irq_enter(chip, desc);
-       /*enable GPIO interrupt again*/
+       /* Read the wake status */
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-       reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
-       reg64 = reg;
-       reg64 = reg64 << 32;
-
-       reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
-       reg64 |= reg;
+       status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+       status <<= 32;
+       status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-       /*
-        * first 46 bits indicates interrupt status.
-        * one bit represents four interrupt sources.
-       */
-       for (off = 0; off < 46 ; off++) {
-               if (reg64 & BIT(off)) {
-                       for (i = 0; i < 4; i++) {
-                               pin_reg = readl(gpio_dev->base +
-                                               (off * 4 + i) * 4);
-                               if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
-                                       (pin_reg & BIT(WAKE_STS_OFF))) {
-                                       irq = irq_find_mapping(gc->irqdomain,
-                                                               off * 4 + i);
-                                       generic_handle_irq(irq);
-                                       writel(pin_reg,
-                                               gpio_dev->base
-                                               + (off * 4 + i) * 4);
-                                       handled++;
-                               }
-                       }
+       /* Bit 0-45 contain the relevant status bits */
+       status &= (1ULL << 46) - 1;
+       regs = gpio_dev->base;
+       for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
+               if (!(status & mask))
+                       continue;
+               status &= ~mask;
+
+               /* Each status bit covers four pins */
+               for (i = 0; i < 4; i++) {
+                       regval = readl(regs + i);
+                       if (!(regval & PIN_IRQ_PENDING))
+                               continue;
+                       irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+                       generic_handle_irq(irq);
+                       /* Clear interrupt */
+                       writel(regval, regs + i);
+                       ret = IRQ_HANDLED;
                }
        }
 
-       if (handled == 0)
-               handle_bad_irq(desc);
-
+       /* Signal EOI to the GPIO unit */
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-       reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
-       reg |= EOI_MASK;
-       writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+       regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+       regval |= EOI_MASK;
+       writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-       chained_irq_exit(chip, desc);
+       return ret;
 }
 
 static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       gpiochip_set_chained_irqchip(&gpio_dev->gc,
-                                &amd_gpio_irqchip,
-                                irq_base,
-                                amd_gpio_irq_handler);
+       ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
+                              KBUILD_MODNAME, gpio_dev);
+       if (ret)
+               goto out2;
+
        platform_set_drvdata(pdev, gpio_dev);
 
        dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
index d3c5f5dfbbd7974258e4105e3da7114ba7b2324a..222b6685b09f2d72f144daadbf3a3b67a06c1109 100644 (file)
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
                break;
        case PIN_CONFIG_OUTPUT:
                __stm32_gpio_set(bank, offset, arg);
-               ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+               ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
                break;
        default:
                ret = -EINVAL;
index 8bc7ee1a8ca81626829329e80831ffa2d57b8c63..507512cc478b1dd2632e033ac7d9a2dc99b8ab5d 100644 (file)
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
                QEDI_ERR(&qedi->dbg_ctx,
                         "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
                         protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
-               WARN_ON(1);
        }
 }
 
index 09a294634bc7e8898a2d209a9a5cef3d50eb8f32..879d3b7462f94f38bba4618aecf51dde947aa458 100644 (file)
@@ -1499,11 +1499,9 @@ err_idx:
 
 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
 {
-       if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+       if (!test_and_clear_bit(idx, qedi->task_idx_map))
                QEDI_ERR(&qedi->dbg_ctx,
                         "FW task context, already cleared, tid=0x%x\n", idx);
-               WARN_ON(1);
-       }
 }
 
 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
index 734cbf8d9676bd6f6f26561249504ccffd9f8360..dd9f1bebb5a3a980b55e5d0fb758c93e4f694722 100644 (file)
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
        int status;
 
        token = (autofs_wqt_t) param->fail.token;
-       status = param->fail.status ? param->fail.status : -ENOENT;
+       status = param->fail.status < 0 ? param->fail.status : -ENOENT;
        return autofs4_wait_release(sbi, token, status);
 }
 
index 0fd081bd2a2f5d3fb4ed18fdcb7a1371cf9f5627..fcef70602b278b48ffd74e97f10d2a57b6968cc3 100644 (file)
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
        if (!is_sync_kiocb(iocb))
                ctx->iocb = iocb;
 
-       if (to->type & ITER_IOVEC)
+       if (to->type == ITER_IOVEC)
                ctx->should_dirty = true;
 
        rc = setup_aio_ctx_iter(ctx, to, READ);
index b08531977daa4084f774c75de33204b2b6fa0902..3b147dc6af6344ee5c5e616466403f2cc211dbcb 100644 (file)
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 
        if (!pages) {
                pages = vmalloc(max_pages * sizeof(struct page *));
-               if (!bv) {
+               if (!pages) {
                        kvfree(bv);
                        return -ENOMEM;
                }
index 27bc360c7ffd7e1081f907c5f080dc4ba439fbfc..a723df3e01978cdca30afbf107772898ce66f33a 100644 (file)
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     struct cifs_fid *fid, __u16 search_flags,
                     struct cifs_search_info *srch_inf)
 {
-       return CIFSFindFirst(xid, tcon, path, cifs_sb,
-                            &fid->netfid, search_flags, srch_inf, true);
+       int rc;
+
+       rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+                          &fid->netfid, search_flags, srch_inf, true);
+       if (rc)
+               cifs_dbg(FYI, "find first failed=%d\n", rc);
+       return rc;
 }
 
 static int
index c58691834eb2b74fa34f3fe2661ed3e211c4d22e..7e48561abd299012616428d28f256906a7c5381f 100644 (file)
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
        kfree(utf16_path);
        if (rc) {
-               cifs_dbg(VFS, "open dir failed\n");
+               cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
                return rc;
        }
 
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
        if (rc) {
-               cifs_dbg(VFS, "query directory failed\n");
+               cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
                SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
        }
        return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 
        sg = init_sg(rqst, sign);
        if (!sg) {
-               cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc);
+               cifs_dbg(VFS, "%s: Failed to init sg", __func__);
+               rc = -ENOMEM;
                goto free_req;
        }
 
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
        iv = kzalloc(iv_len, GFP_KERNEL);
        if (!iv) {
                cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
+               rc = -ENOMEM;
                goto free_sg;
        }
        iv[0] = 3;
index 3cb5c9e2d4e78f641549818fbbad7681b193854d..de50e749ff058d79c67f7462962614c8c835ecdb 100644 (file)
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
        pcreatetime = (__u64 *)value;
        *pcreatetime = CIFS_I(inode)->createtime;
        return sizeof(__u64);
-
-       return rc;
 }
 
 
index 323ea481d4a8c84442c4848c2a26d4a795c9dc9f..33d05aa02aadbb85b75304d2a7b9040d526e1019 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                        if (ret < 0)
                                goto out;
                }
+               start_index = indices[pvec.nr - 1] + 1;
        }
 out:
        put_dax(dax_dev);
index 72934df6847150ba50dfbadad78fe10e01d2eadd..904199086490d5fdf05d0eda850d04a3ce572fa5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
        if (write) {
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+               unsigned long ptr_size;
                struct rlimit *rlim;
 
+               /*
+                * Since the stack will hold pointers to the strings, we
+                * must account for them as well.
+                *
+                * The size calculation is the entire vma while each arg page is
+                * built, so each time we get here it's calculating how far it
+                * is currently (rather than each call being just the newly
+                * added size from the arg page).  As a result, we need to
+                * always add the entire size of the pointers, so that on the
+                * last call to get_arg_page() we'll actually have the entire
+                * correct size.
+                */
+               ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+               if (ptr_size > ULONG_MAX - size)
+                       goto fail;
+               size += ptr_size;
+
                acct_arg_size(bprm, size / PAGE_SIZE);
 
                /*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                 *    to work from.
                 */
                rlim = current->signal->rlim;
-               if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
-                       put_page(page);
-                       return NULL;
-               }
+               if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+                       goto fail;
        }
 
        return page;
+
+fail:
+       put_page(page);
+       return NULL;
 }
 
 static void put_arg_page(struct page *page)
index 3b7c937a36b528e67511a23b136215ffaab6d8e4..4689940a953c2f7fc3b3a0e07a7d01c8c09e7d9a 100644 (file)
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
        struct ocfs2_lock_res *lockres;
 
        lockres = &OCFS2_I(inode)->ip_inode_lockres;
+       /* had_lock means that the currect process already takes the cluster
+        * lock previously. If had_lock is 1, we have nothing to do here, and
+        * it will get unlocked where we got the lock.
+        */
        if (!had_lock) {
                ocfs2_remove_holder(lockres, oh);
                ocfs2_inode_unlock(inode, ex);
index 3c5384d9b3a549f319b114a782c7daab966a2c28..f70c3778d600c6be63996572bee6fd46ac03440c 100644 (file)
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
                           void *buffer,
                           size_t buffer_size)
 {
-       int ret;
+       int ret, had_lock;
        struct buffer_head *di_bh = NULL;
+       struct ocfs2_lock_holder oh;
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 0);
-       if (ret < 0) {
-               mlog_errno(ret);
-               return ret;
+       had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
+       if (had_lock < 0) {
+               mlog_errno(had_lock);
+               return had_lock;
        }
        down_read(&OCFS2_I(inode)->ip_xattr_sem);
        ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
                                     name, buffer, buffer_size);
        up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
-       ocfs2_inode_unlock(inode, 0);
+       ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
 
        brelse(di_bh);
 
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
 {
        struct buffer_head *di_bh = NULL;
        struct ocfs2_dinode *di;
-       int ret, credits, ref_meta = 0, ref_credits = 0;
+       int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct inode *tl_inode = osb->osb_tl_inode;
        struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
        struct ocfs2_refcount_tree *ref_tree = NULL;
+       struct ocfs2_lock_holder oh;
 
        struct ocfs2_xattr_info xi = {
                .xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
                return -ENOMEM;
        }
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 1);
-       if (ret < 0) {
+       had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
+       if (had_lock < 0) {
+               ret = had_lock;
                mlog_errno(ret);
                goto cleanup_nolock;
        }
@@ -3670,7 +3673,7 @@ cleanup:
                if (ret)
                        mlog_errno(ret);
        }
-       ocfs2_inode_unlock(inode, 1);
+       ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
 cleanup_nolock:
        brelse(di_bh);
        brelse(xbs.xattr_bh);
index 0315fea1d589e104ac4f10bf4db3f9f705c3f7ad..f80be4c5df9d13b7009602dd5be2f4c58a5de1ac 100644 (file)
@@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        /*
         * allocate new block and move data
         */
-       switch (fs32_to_cpu(sb, usb1->fs_optim)) {
-           case UFS_OPTSPACE:
+       if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
                request = newcount;
-               if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
-                   > uspi->s_dsize * uspi->s_minfree / (2 * 100))
-                       break;
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-               break;
-           default:
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-       
-           case UFS_OPTTIME:
+               if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
+                       usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+       } else {
                request = uspi->s_fpb;
-               if (uspi->cs_total.cs_nffree < uspi->s_dsize *
-                   (uspi->s_minfree - 2) / 100)
-                       break;
-               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-               break;
+               if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
+                       usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
        }
        result = ufs_alloc_fragments (inode, cgno, goal, request, err);
        if (result) {
index 9f4590261134085cbabcea1ee9a72382239ba479..f36d6a53687d13fd817f65f99913a54d425e8048 100644 (file)
@@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
         */
        inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
        set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
-       if (inode->i_nlink == 0) {
-               ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-               return -1;
-       }
+       if (inode->i_nlink == 0)
+               return -ESTALE;
 
        /*
         * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -578,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
        i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 
        inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
-       inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
-       inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
-       inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
+       inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+       inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+       inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
        inode->i_mtime.tv_nsec = 0;
        inode->i_atime.tv_nsec = 0;
        inode->i_ctime.tv_nsec = 0;
@@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
         */
        inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
        set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
-       if (inode->i_nlink == 0) {
-               ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-               return -1;
-       }
+       if (inode->i_nlink == 0)
+               return -ESTALE;
 
         /*
          * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
        struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
        struct buffer_head * bh;
        struct inode *inode;
-       int err;
+       int err = -EIO;
 
        UFSD("ENTER, ino %lu\n", ino);
 
@@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
                err = ufs1_read_inode(inode,
                                      ufs_inode + ufs_inotofsbo(inode->i_ino));
        }
-
+       brelse(bh);
        if (err)
                goto bad_inode;
+
        inode->i_version++;
        ufsi->i_lastfrag =
                (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 
        ufs_set_inode_ops(inode);
 
-       brelse(bh);
-
        UFSD("EXIT\n");
        unlock_new_inode(inode);
        return inode;
 
 bad_inode:
        iget_failed(inode);
-       return ERR_PTR(-EIO);
+       return ERR_PTR(err);
 }
 
 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
index d5300adbfd79bafd05217cd7088d1221a4501187..0a4f58a5073cb63feb94c2174c56d9889d2a37a4 100644 (file)
@@ -1210,6 +1210,15 @@ magic_found:
 
        uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
                                              uspi->s_minfree, 100);
+       if (uspi->s_minfree <= 5) {
+               uspi->s_time_to_space = ~0ULL;
+               uspi->s_space_to_time = 0;
+               usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
+       } else {
+               uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
+               uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
+                                             uspi->s_minfree - 2, 100) - 1;
+       }
 
        /*
         * Compute another frequently used values
index 823d55a37586037f7ed02f9be5e1dbee890123d4..150eef6f12331a034dab0c48bee530fdc73c821e 100644 (file)
@@ -792,6 +792,8 @@ struct ufs_sb_private_info {
        __s32   fs_magic;       /* filesystem magic */
        unsigned int s_dirblksize;
        __u64   s_root_blocks;
+       __u64   s_time_to_space;
+       __u64   s_space_to_time;
 };
 
 /*
index 09af0f7cd55e278312881999755d3d8d0793d5c8..3b91faacc1baeaff2ac762e2438e5fcbe48cc76b 100644 (file)
@@ -1316,9 +1316,12 @@ xfs_vm_bmap(
         * The swap code (ab-)uses ->bmap to get a block mapping and then
         * bypasseÑ• the file system for actual I/O.  We really can't allow
         * that on reflinks inodes, so we have to skip out here.  And yes,
-        * 0 is the magic code for a bmap error..
+        * 0 is the magic code for a bmap error.
+        *
+        * Since we don't pass back blockdev info, we can't return bmap
+        * information for rt files either.
         */
-       if (xfs_is_reflink_inode(ip))
+       if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
                return 0;
 
        filemap_write_and_wait(mapping);
index 197f3fffc9a7151ed61d0b960f5e452f6beccb5c..408c7820e200f99e50d978c342dd67cc55166d0c 100644 (file)
@@ -210,7 +210,8 @@ struct acpi_device_flags {
        u32 of_compatible_ok:1;
        u32 coherent_dma:1;
        u32 cca_seen:1;
-       u32 reserved:20;
+       u32 spi_i2c_slave:1;
+       u32 reserved:19;
 };
 
 /* File System */
index b74a3edcb3da82903568981a5b49fbbf1f4269cb..1ddd36bd2173b98e925eabdf083a796bfcabdd07 100644 (file)
@@ -391,6 +391,8 @@ struct request_queue {
        int                     nr_rqs[2];      /* # allocated [a]sync rqs */
        int                     nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
 
+       atomic_t                shared_hctx_restart;
+
        struct blk_queue_stats  *stats;
        struct rq_wb            *rq_wb;
 
index 07ef550c662708035459293fe39fa895cca9fce5..93315d6b21a85fea729970574eecd66027f8f520 100644 (file)
@@ -84,6 +84,7 @@ struct kmem_cache {
        int red_left_pad;       /* Left redzone padding size */
 #ifdef CONFIG_SYSFS
        struct kobject kobj;    /* For sysfs */
+       struct work_struct kobj_remove_work;
 #endif
 #ifdef CONFIG_MEMCG
        struct memcg_cache_params memcg_params;
index 345911965dbb8f53289d63535828969454ae3333..454ff763eeba9b829051862d7cce278be7d77b42 100644 (file)
@@ -6,7 +6,7 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                      void __user *arg);
 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
                             unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 struct iw_statistics *get_wireless_stats(struct net_device *dev);
 int call_commit_handler(struct net_device *dev);
 #else
-static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                                    void __user *arg)
 {
        return -EINVAL;
index f8269036bf0b84f89faa2db3b5dc5fe4c259a019..52c4e907c14b0d4a7896d7ca9a6fa6fbc17451ba 100644 (file)
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
        ops = container_of(fops, struct klp_ops, fops);
 
-       rcu_read_lock();
+       /*
+        * A variant of synchronize_sched() is used to allow patching functions
+        * where RCU is not watching, see klp_synchronize_transition().
+        */
+       preempt_disable_notrace();
 
        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
                                      stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 unlock:
-       rcu_read_unlock();
+       preempt_enable_notrace();
 }
 
 /*
index adc0cc64aa4b6ae199e5f4a5b2af0ad59c382175..b004a1fb603236f31cd5780962c098bfcf8fe743 100644 (file)
@@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
 }
 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 
+/*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ */
+static void klp_sync(struct work_struct *work)
+{
+}
+
+/*
+ * We allow to patch also functions where RCU is not watching,
+ * e.g. before user_exit(). We can not rely on the RCU infrastructure
+ * to do the synchronization. Instead hard force the sched synchronization.
+ *
+ * This approach allows to use RCU functions for manipulating func_stack
+ * safely.
+ */
+static void klp_synchronize_transition(void)
+{
+       schedule_on_each_cpu(klp_sync);
+}
+
 /*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
                 * func->transition gets cleared, the handler may choose a
                 * removed function.
                 */
-               synchronize_rcu();
+               klp_synchronize_transition();
        }
 
        if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
 
        /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
        if (klp_target_state == KLP_PATCHED)
-               synchronize_rcu();
+               klp_synchronize_transition();
 
        read_lock(&tasklist_lock);
        for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
  */
 void klp_update_patch_state(struct task_struct *task)
 {
-       rcu_read_lock();
+       /*
+        * A variant of synchronize_sched() is used to allow patching functions
+        * where RCU is not watching, see klp_synchronize_transition().
+        */
+       preempt_disable_notrace();
 
        /*
         * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
        if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
                task->patch_state = READ_ONCE(klp_target_state);
 
-       rcu_read_unlock();
+       preempt_enable_notrace();
 }
 
 /*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
                clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
 
        /* Let any remaining calls to klp_update_patch_state() complete */
-       synchronize_rcu();
+       klp_synchronize_transition();
 
        klp_start_transition();
 }
index 3c6432df7e63466a24d41dead807c7ef14c0ab86..4c0888c4a68d9621717f9012d3ca4cc0b72df20b 100644 (file)
  *     the values[M, M+1, ..., N] into the ints array in get_options.
  */
 
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
 {
        int x, inc_counter, upper_range;
 
        (*str)++;
        upper_range = simple_strtol((*str), NULL, 0);
        inc_counter = upper_range - *pint;
-       for (x = *pint; x < upper_range; x++)
+       for (x = *pint; n && x < upper_range; x++, n--)
                *pint++ = x;
        return inc_counter;
 }
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
                        break;
                if (res == 3) {
                        int range_nums;
-                       range_nums = get_range((char **)&str, ints + i);
+                       range_nums = get_range((char **)&str, ints + i, nints - i);
                        if (range_nums < 0)
                                break;
                        /*
index 945fd1ca49b5af0bc3b87dbfe8098f0f602775a2..df4ebdb2b10a373723330dc0124957cd2cb1c021 100644 (file)
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        spin_unlock(ptl);
                        free_page_and_swap_cache(src_page);
                }
-               cond_resched();
        }
 }
 
index 8e07976d5e47727273e9b469992f76ab0c309d58..a5e3dcd75e79f40557cefc1bd5338d34eb4fa2f2 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1817,7 +1817,8 @@ check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
                        return -ENOMEM;
-               if (gap_end >= low_limit && gap_end - gap_start >= length)
+               if (gap_end >= low_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit right subtree if it looks promising */
@@ -1920,7 +1921,8 @@ check_current:
                gap_end = vm_start_gap(vma);
                if (gap_end < low_limit)
                        return -ENOMEM;
-               if (gap_start <= high_limit && gap_end - gap_start >= length)
+               if (gap_start <= high_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit left subtree if it looks promising */
@@ -2228,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /* Guard against wrapping around to address 0. */
+       /* Guard against exceeding limits of the address space. */
        address &= PAGE_MASK;
-       address += PAGE_SIZE;
-       if (!address)
+       if (address >= TASK_SIZE)
                return -ENOMEM;
+       address += PAGE_SIZE;
 
        /* Enforce stack_guard_gap */
        gap_addr = address + stack_guard_gap;
-       if (gap_addr < address)
-               return -ENOMEM;
+
+       /* Guard against overflow */
+       if (gap_addr < address || gap_addr > TASK_SIZE)
+               gap_addr = TASK_SIZE;
+
        next = vma->vm_next;
        if (next && next->vm_start < gap_addr) {
                if (!(next->vm_flags & VM_GROWSUP))
index 7449593fca724147cef5b8f7a46752333e5e0585..8addc535bcdc58794fe40e72a729e4589d44d2b6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
        return name;
 }
 
+static void sysfs_slab_remove_workfn(struct work_struct *work)
+{
+       struct kmem_cache *s =
+               container_of(work, struct kmem_cache, kobj_remove_work);
+
+       if (!s->kobj.state_in_sysfs)
+               /*
+                * For a memcg cache, this may be called during
+                * deactivation and again on shutdown.  Remove only once.
+                * A cache is never shut down before deactivation is
+                * complete, so no need to worry about synchronization.
+                */
+               return;
+
+#ifdef CONFIG_MEMCG
+       kset_unregister(s->memcg_kset);
+#endif
+       kobject_uevent(&s->kobj, KOBJ_REMOVE);
+       kobject_del(&s->kobj);
+       kobject_put(&s->kobj);
+}
+
 static int sysfs_slab_add(struct kmem_cache *s)
 {
        int err;
@@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
        struct kset *kset = cache_kset(s);
        int unmergeable = slab_unmergeable(s);
 
+       INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
+
        if (!kset) {
                kobject_init(&s->kobj, &slab_ktype);
                return 0;
@@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
                 */
                return;
 
-       if (!s->kobj.state_in_sysfs)
-               /*
-                * For a memcg cache, this may be called during
-                * deactivation and again on shutdown.  Remove only once.
-                * A cache is never shut down before deactivation is
-                * complete, so no need to worry about synchronization.
-                */
-               return;
-
-#ifdef CONFIG_MEMCG
-       kset_unregister(s->memcg_kset);
-#endif
-       kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
+       kobject_get(&s->kobj);
+       schedule_work(&s->kobj_remove_work);
 }
 
 void sysfs_slab_release(struct kmem_cache *s)
index 34a1c3e46ed72594b499e7f61e8aacdd4c5fe818..ecc97f74ab182fe9aeb7d7eda5166dfdb5b03095 100644 (file)
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
        if (p4d_none(*p4d))
                return NULL;
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+
+       /*
+        * Don't dereference bad PUD or PMD (below) entries. This will also
+        * identify huge mappings, which we may encounter on architectures
+        * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
+        * identified as vmalloc addresses by is_vmalloc_addr(), but are
+        * not [unambiguously] associated with a struct page, so there is
+        * no correct value to return for them.
+        */
+       WARN_ON_ONCE(pud_bad(*pud));
+       if (pud_none(*pud) || pud_bad(*pud))
                return NULL;
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       WARN_ON_ONCE(pmd_bad(*pmd));
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
                return NULL;
 
        ptep = pte_offset_map(pmd, addr);
index 467069b73ce1b89e5a7b72904a878ef1f3412c67..9649579b5b9f38aff6ce7a990d2dc1ddb1d85e12 100644 (file)
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
        return 0;
 
 out_free_newdev:
-       free_netdev(new_dev);
+       if (new_dev->reg_state == NETREG_UNINITIALIZED)
+               free_netdev(new_dev);
        return err;
 }
 
index 6d60149287a1868cd65fcc55a4e29edd7611def3..7243421c9783bf060dac1938fc1f2f59a4afafea 100644 (file)
@@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
        if (rc == BUSY_POLL_BUDGET)
                __napi_schedule(napi);
        local_bh_enable();
-       if (local_softirq_pending())
-               do_softirq();
 }
 
 void napi_busy_loop(unsigned int napi_id,
index b94b1d29350603e19c3db8e6fd740d3f89440771..27fad31784a83861f942f3b4f82f44985a946645 100644 (file)
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (cmd == SIOCGIFNAME)
                return dev_ifname(net, (struct ifreq __user *)arg);
 
+       /*
+        * Take care of Wireless Extensions. Unfortunately struct iwreq
+        * isn't a proper subset of struct ifreq (it's 8 byte shorter)
+        * so we need to treat it specially, otherwise applications may
+        * fault if the struct they're passing happens to land at the
+        * end of a mapped page.
+        */
+       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+               struct iwreq iwr;
+
+               if (copy_from_user(&iwr, arg, sizeof(iwr)))
+                       return -EFAULT;
+
+               return wext_handle_ioctl(net, &iwr, cmd, arg);
+       }
+
        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
                return -EFAULT;
 
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                                ret = -EFAULT;
                        return ret;
                }
-               /* Take care of Wireless Extensions */
-               if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-                       return wext_handle_ioctl(net, &ifr, cmd, arg);
                return -ENOTTY;
        }
 }
index f21c4d3aeae0cf59a8d9239cf1e581e6d136c740..3bba291c6c32e4359a6d626fbd492f7d07fd3e4c 100644 (file)
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct fib_rule_hdr *frh = nlmsg_data(nlh);
        struct fib_rules_ops *ops = NULL;
-       struct fib_rule *rule, *tmp;
+       struct fib_rule *rule, *r;
        struct nlattr *tb[FRA_MAX+1];
        struct fib_kuid_range range;
        int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                /*
                 * Check if this rule is a target to any of them. If so,
+                * adjust to the next one with the same preference or
                 * disable them. As this operation is eventually very
-                * expensive, it is only performed if goto rules have
-                * actually been added.
+                * expensive, it is only performed if goto rules, except
+                * current if it is goto rule, have actually been added.
                 */
                if (ops->nr_goto_rules > 0) {
-                       list_for_each_entry(tmp, &ops->rules_list, list) {
-                               if (rtnl_dereference(tmp->ctarget) == rule) {
-                                       RCU_INIT_POINTER(tmp->ctarget, NULL);
+                       struct fib_rule *n;
+
+                       n = list_next_entry(rule, list);
+                       if (&n->list == &ops->rules_list || n->pref != rule->pref)
+                               n = NULL;
+                       list_for_each_entry(r, &ops->rules_list, list) {
+                               if (rtnl_dereference(r->ctarget) != rule)
+                                       continue;
+                               rcu_assign_pointer(r->ctarget, n);
+                               if (!n)
                                        ops->unresolved_rules++;
-                               }
                        }
                }
 
index 5e61456f6bc795cfb75db2d530eb3b1872c82989..467a2f4510a74cad48209b696532838d1edcf96e 100644 (file)
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(1) /* IFLA_LINKMODE */
               + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
               + nla_total_size(4) /* IFLA_LINK_NETNSID */
+              + nla_total_size(4) /* IFLA_GROUP */
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_LINK_NETNSID]     = { .type = NLA_S32 },
        [IFLA_PROTO_DOWN]       = { .type = NLA_U8 },
        [IFLA_XDP]              = { .type = NLA_NESTED },
+       [IFLA_GROUP]            = { .type = NLA_U32 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
index 4b9518a0d2489494ed88b6808f95803cf1b543ad..6f95612b4d321d63e39b37f881f0e46331999ad2 100644 (file)
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
        call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
-static inline void dnrt_drop(struct dn_route *rt)
-{
-       dst_release(&rt->dst);
-       call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
 static void dn_dst_check_expire(unsigned long dummy)
 {
        int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
                        }
                        *rtp = rt->dst.dn_next;
                        rt->dst.dn_next = NULL;
-                       dnrt_drop(rt);
+                       dnrt_free(rt);
                        break;
                }
                spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
                        dst_use(&rth->dst, now);
                        spin_unlock_bh(&dn_rt_hash_table[hash].lock);
 
-                       dnrt_drop(rt);
+                       dst_free(&rt->dst);
                        *rp = rth;
                        return 0;
                }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
                for(; rt; rt = next) {
                        next = rcu_dereference_raw(rt->dst.dn_next);
                        RCU_INIT_POINTER(rt->dst.dn_next, NULL);
-                       dst_free((struct dst_entry *)rt);
+                       dnrt_free(rt);
                }
 
 nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
-       rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+       rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
        if (rt == NULL)
                goto e_nobufs;
 
index 8f6b5bbcbf69f54f354d3678cf6e5f8374602edb..ec9a396fa4660272f96602e78c5c3b0603068d66 100644 (file)
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
        if (!pmc)
                return;
+       spin_lock_init(&pmc->lock);
        spin_lock_bh(&im->lock);
        pmc->interface = im->interface;
        in_dev_hold(in_dev);
index b436d077563174c22b48a81a6a856f30dd831a5e..129d1a3616f838c9b487cf2392d1d9eb09dcfa5a 100644 (file)
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
        return 0;
 
 drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
        kfree_skb(skb);
        return 0;
 }
index 6a4fb1e629fb7609048156974ae2eb322cebddae..686c92375e81d50787adbfb423afba76903948ad 100644 (file)
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
                                   unsigned long delay)
 {
-       if (!delayed_work_pending(&ifp->dad_work))
-               in6_ifa_hold(ifp);
-       mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+       in6_ifa_hold(ifp);
+       if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+               in6_ifa_put(ifp);
 }
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
index eea23b57c6a5a000aec234cc9bf6f9411d98001f..ec849d88a66205742b1a58c4959c08eeffc3f6d7 100644 (file)
@@ -32,7 +32,6 @@ struct fib6_rule {
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
-       struct rt6_info *rt;
        struct fib_lookup_arg arg = {
                .lookup_ptr = lookup,
                .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        fib_rules_lookup(net->ipv6.fib6_rules_ops,
                         flowi6_to_flowi(fl6), flags, &arg);
 
-       rt = arg.result;
+       if (arg.result)
+               return arg.result;
 
-       if (!rt) {
-               dst_hold(&net->ipv6.ip6_null_entry->dst);
-               return &net->ipv6.ip6_null_entry->dst;
-       }
-
-       if (rt->rt6i_flags & RTF_REJECT &&
-           rt->dst.error == -EAGAIN) {
-               ip6_rt_put(rt);
-               rt = net->ipv6.ip6_null_entry;
-               dst_hold(&rt->dst);
-       }
-
-       return &rt->dst;
+       dst_hold(&net->ipv6.ip6_null_entry->dst);
+       return &net->ipv6.ip6_null_entry->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                        flp6->saddr = saddr;
                }
                err = rt->dst.error;
-               goto out;
+               if (err != -EAGAIN)
+                       goto out;
        }
 again:
        ip6_rt_put(rt);
index d4bf2c68a545b44873e433930e4e999920de78c9..e6b78ba0e6360ea40be58f07ea1c6657efcca93e 100644 (file)
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        struct rt6_info *rt;
 
        rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
-       if (rt->rt6i_flags & RTF_REJECT &&
-           rt->dst.error == -EAGAIN) {
+       if (rt->dst.error == -EAGAIN) {
                ip6_rt_put(rt);
                rt = net->ipv6.ip6_null_entry;
                dst_hold(&rt->dst);
index c3581973f5d7265a574ae69416a516526ed64e44..8c6c3c8e7eef26899ea22ff85997230c8fd17e7e 100644 (file)
@@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
        return 0;
 
 drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
        kfree_skb(skb);
        return 0;
 }
@@ -1246,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
-               dsfield = ip6_tclass(key->label);
+               dsfield =  key->tos;
        } else {
                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                        encap_limit = t->parms.encap_limit;
@@ -1317,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
-               dsfield = ip6_tclass(key->label);
+               dsfield = key->tos;
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
                /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
index 0a4e28477ad94012b4e7aeb526711076e6259995..54369225766ef8e43ff94f21b4fd670b2cc233a3 100644 (file)
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
                                       unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, n_parts, loop, tmp;
+       unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
 
        /* there must be at least one name, and at least #names+1 length
         * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
                toklen -= 4;
                if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
                        return -EINVAL;
-               if (tmp > toklen)
+               paddedlen = (tmp + 3) & ~3;
+               if (paddedlen > toklen)
                        return -EINVAL;
                princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
                if (!princ->name_parts[loop])
                        return -ENOMEM;
                memcpy(princ->name_parts[loop], xdr, tmp);
                princ->name_parts[loop][tmp] = 0;
-               tmp = (tmp + 3) & ~3;
-               toklen -= tmp;
-               xdr += tmp >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
        toklen -= 4;
        if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
                return -EINVAL;
-       if (tmp > toklen)
+       paddedlen = (tmp + 3) & ~3;
+       if (paddedlen > toklen)
                return -EINVAL;
        princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
        if (!princ->realm)
                return -ENOMEM;
        memcpy(princ->realm, xdr, tmp);
        princ->realm[tmp] = 0;
-       tmp = (tmp + 3) & ~3;
-       toklen -= tmp;
-       xdr += tmp >> 2;
+       toklen -= paddedlen;
+       xdr += paddedlen >> 2;
 
        _debug("%s/...@%s", princ->name_parts[0], princ->realm);
 
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
                                         unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, len;
+       unsigned int toklen = *_toklen, len, paddedlen;
 
        /* there must be at least one tag and one length word */
        if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
        toklen -= 8;
        if (len > max_data_size)
                return -EINVAL;
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > toklen)
+               return -EINVAL;
        td->data_len = len;
 
        if (len > 0) {
                td->data = kmemdup(xdr, len, GFP_KERNEL);
                if (!td->data)
                        return -ENOMEM;
-               len = (len + 3) & ~3;
-               toklen -= len;
-               xdr += len >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
                                    const __be32 **_xdr, unsigned int *_toklen)
 {
        const __be32 *xdr = *_xdr;
-       unsigned int toklen = *_toklen, len;
+       unsigned int toklen = *_toklen, len, paddedlen;
 
        /* there must be at least one length word */
        if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
        toklen -= 4;
        if (len > AFSTOKEN_K5_TIX_MAX)
                return -EINVAL;
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > toklen)
+               return -EINVAL;
        *_tktlen = len;
 
        _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
                *_ticket = kmemdup(xdr, len, GFP_KERNEL);
                if (!*_ticket)
                        return -ENOMEM;
-               len = (len + 3) & ~3;
-               toklen -= len;
-               xdr += len >> 2;
+               toklen -= paddedlen;
+               xdr += paddedlen >> 2;
        }
 
        *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
 {
        const __be32 *xdr = prep->data, *token;
        const char *cp;
-       unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+       unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
        size_t datalen = prep->datalen;
        int ret;
 
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
        if (len < 1 || len > AFSTOKEN_CELL_MAX)
                goto not_xdr;
        datalen -= 4;
-       tmp = (len + 3) & ~3;
-       if (tmp > datalen)
+       paddedlen = (len + 3) & ~3;
+       if (paddedlen > datalen)
                goto not_xdr;
 
        cp = (const char *) xdr;
        for (loop = 0; loop < len; loop++)
                if (!isprint(cp[loop]))
                        goto not_xdr;
-       if (len < tmp)
-               for (; loop < tmp; loop++)
-                       if (cp[loop])
-                               goto not_xdr;
+       for (; loop < paddedlen; loop++)
+               if (cp[loop])
+                       goto not_xdr;
        _debug("cellname: [%u/%u] '%*.*s'",
-              len, tmp, len, len, (const char *) xdr);
-       datalen -= tmp;
-       xdr += tmp >> 2;
+              len, paddedlen, len, len, (const char *) xdr);
+       datalen -= paddedlen;
+       xdr += paddedlen >> 2;
 
        /* get the token count */
        if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
                sec_ix = ntohl(*xdr);
                datalen -= 4;
                _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
-               if (toklen < 20 || toklen > datalen)
+               paddedlen = (toklen + 3) & ~3;
+               if (toklen < 20 || toklen > datalen || paddedlen > datalen)
                        goto not_xdr;
-               datalen -= (toklen + 3) & ~3;
-               xdr += (toklen + 3) >> 2;
+               datalen -= paddedlen;
+               xdr += paddedlen >> 2;
 
        } while (--loop > 0);
 
index 8c589230794f9394406d2a0ad2157b7a47d75757..3dcd0ecf3d99f74ec8ed4aad149bd32950ab23ef 100644 (file)
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
                if (sctp_sk(sk)->bind_hash)
                        sctp_put_port(sk);
 
+               sctp_sk(sk)->ep = NULL;
                sock_put(sk);
        }
 
index 048954eee984f28e599084b32fad87b2bd6989d0..9a647214a91ebc583660db320307e0df1e13e5be 100644 (file)
@@ -278,7 +278,6 @@ out:
 
 static int sctp_sock_dump(struct sock *sk, void *p)
 {
-       struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_comm_param *commp = p;
        struct sk_buff *skb = commp->skb;
        struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
        int err = 0;
 
        lock_sock(sk);
-       list_for_each_entry(assoc, &ep->asocs, asocs) {
+       if (!sctp_sk(sk)->ep)
+               goto release;
+       list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
                if (cb->args[4] < cb->args[1])
                        goto next;
 
index 30aa0a529215ae54e43bdcb54a6e1870761996c3..3a8318e518f1c10a375dc3ff6ba6596579bb85a1 100644 (file)
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
        if (err)
                return err;
 
-       sctp_transport_get_idx(net, &hti, pos);
-       obj = sctp_transport_get_next(net, &hti);
-       for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+       obj = sctp_transport_get_idx(net, &hti, pos + 1);
+       for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
                struct sctp_transport *transport = obj;
 
                if (!sctp_transport_hold(transport))
index 1a4db6790e2077d4ea922d1c32cde5352543fa2c..6cdb054484d66d40e4523965457a619bd1c9154f 100644 (file)
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
  * Main IOCTl dispatcher.
  * Check the type of IOCTL and call the appropriate wrapper...
  */
-static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
                                  unsigned int cmd,
                                  struct iw_request_info *info,
                                  wext_ioctl_func standard,
                                  wext_ioctl_func private)
 {
-       struct iwreq *iwr = (struct iwreq *) ifr;
        struct net_device *dev;
        iw_handler      handler;
 
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
         * The copy_to/from_user() of ifr is also dealt with in there */
 
        /* Make sure the device exist */
-       if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL)
+       if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
                return -ENODEV;
 
        /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                else if (private)
                        return private(dev, iwr, cmd, info, handler);
        }
-       /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl)
-               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
 }
 
 /* entry point from dev ioctl */
-static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
+static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
                               unsigned int cmd, struct iw_request_info *info,
                               wext_ioctl_func standard,
                               wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
        if (ret)
                return ret;
 
-       dev_load(net, ifr->ifr_name);
+       dev_load(net, iwr->ifr_name);
        rtnl_lock();
-       ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
+       ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
        rtnl_unlock();
 
        return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device *      dev,
 }
 
 
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
                      void __user *arg)
 {
        struct iw_request_info info = { .cmd = cmd, .flags = 0 };
        int ret;
 
-       ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
+       ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
                                  ioctl_standard_call,
                                  ioctl_private_call);
        if (ret >= 0 &&
            IW_IS_GET(cmd) &&
-           copy_to_user(arg, ifr, sizeof(struct iwreq)))
+           copy_to_user(arg, iwr, sizeof(struct iwreq)))
                return -EFAULT;
 
        return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
        info.cmd = cmd;
        info.flags = IW_REQUEST_FLAG_COMPAT;
 
-       ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
+       ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
                                  compat_standard_call,
                                  compat_private_call);
 
index dd5254077ef71bd54b67d5b16fb0975cde7d19d9..8771760670724337e366b61fc38ee08187a67ae6 100644 (file)
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
        struct snd_pcm_substream *substream;
        const struct snd_pcm_chmap_elem *map;
 
-       if (snd_BUG_ON(!info->chmap))
+       if (!info->chmap)
                return -EINVAL;
        substream = snd_pcm_chmap_substream(info, idx);
        if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
        unsigned int __user *dst;
        int c, count = 0;
 
-       if (snd_BUG_ON(!info->chmap))
+       if (!info->chmap)
                return -EINVAL;
        if (size < 8)
                return -ENOMEM;
index 9e6f54f8c45d2330ee339a7f4c7d1ac86c8e4774..1e26854b3425e23bd9d2942194f3665fa37e06cf 100644 (file)
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
                cycle = increment_cycle_count(cycle, 1);
                if (s->handle_packet(s, 0, cycle, i) < 0) {
                        s->packet_index = -1;
-                       amdtp_stream_pcm_abort(s);
+                       if (in_interrupt())
+                               amdtp_stream_pcm_abort(s);
+                       WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
                        return;
                }
        }
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
        /* Queueing error or detecting invalid payload. */
        if (i < packets) {
                s->packet_index = -1;
-               amdtp_stream_pcm_abort(s);
+               if (in_interrupt())
+                       amdtp_stream_pcm_abort(s);
+               WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
                return;
        }
 
index 7e88317228212a63ad38e6e9880655f6ea567c85..ea1a91e99875e1f06933142d607a8b9880d210b1 100644 (file)
@@ -135,7 +135,7 @@ struct amdtp_stream {
        /* For a PCM substream processing. */
        struct snd_pcm_substream *pcm;
        struct tasklet_struct period_tasklet;
-       unsigned int pcm_buffer_pointer;
+       snd_pcm_uframes_t pcm_buffer_pointer;
        unsigned int pcm_period_pointer;
 
        /* To wait for first packet. */
index 1770f085c2a694a398e2b30312cb79d6c3fd617a..01eb1dc7b5b3b5cf3c070a7656b81452890781e0 100644 (file)
@@ -370,10 +370,12 @@ enum {
 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
 #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
-                       IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)  || \
-                       IS_GLK(pci)
+#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
+                         IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
+                         IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Kabylake-H */
        { PCI_DEVICE(0x8086, 0xa2f0),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Coffelake */
+       { PCI_DEVICE(0x8086, 0xa348),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },