]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Aug 2010 16:44:47 +0000 (09:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Aug 2010 16:44:47 +0000 (09:44 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2:
  nilfs2: wait for discard to finish

142 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/hwmon/f71882fg
Documentation/powerpc/booting-without-of.txt
Documentation/powerpc/hvcs.txt
MAINTAINERS
arch/arm/plat-samsung/dev-hsmmc.c
arch/arm/plat-samsung/dev-hsmmc1.c
arch/arm/plat-samsung/dev-hsmmc2.c
arch/ia64/include/asm/unistd.h
arch/m68k/include/asm/ide.h
arch/m68knommu/kernel/process.c
arch/microblaze/kernel/prom_parse.c
arch/microblaze/pci/pci-common.c
arch/microblaze/pci/xilinx_pci.c
arch/um/include/asm/dma-mapping.h
arch/x86/Kconfig
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/trampoline.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/head_32.S
arch/x86/kernel/kprobes.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/trampoline.c
drivers/ata/sata_dwc_460ex.c
drivers/block/xsysace.c
drivers/char/pty.c
drivers/char/tty_io.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/hid/hid-core.c
drivers/hid/hid-egalax.c
drivers/hid/hid-ids.h
drivers/hid/hid-picolcd.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/Kconfig
drivers/hwmon/f71882fg.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mmc/core/host.c
drivers/mmc/host/Kconfig
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mtd/maps/physmap_of.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_nx.c
drivers/serial/of_serial.c
drivers/spi/coldfire_qspi.c
drivers/staging/pohmelfs/path_entry.c
drivers/video/matrox/matroxfb_base.h
fs/buffer.c
fs/cramfs/inode.c
fs/dcache.c
fs/exec.c
fs/fat/misc.c
fs/file_table.c
fs/fs_struct.c
fs/generic_acl.c
fs/hostfs/hostfs_kern.c
fs/internal.h
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/revoke.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/mbcache.c
fs/namei.c
fs/namespace.c
fs/nfs/Kconfig
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/nfs4proc.c
fs/nfs/super.c
fs/nfsd/Kconfig
fs/nilfs2/super.c
fs/open.c
fs/pnode.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/super.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/truncate.c
fs/ufs/util.c
fs/ufs/util.h
include/asm-generic/syscalls.h
include/linux/buffer_head.h
include/linux/fs.h
include/linux/fs_struct.h
include/linux/kfifo.h
include/linux/lglock.h [new file with mode: 0644]
include/linux/mm_types.h
include/linux/spi/spi.h
include/linux/tty.h
include/sound/emu10k1.h
include/trace/events/workqueue.h [new file with mode: 0644]
kernel/fork.c
kernel/kfifo.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
kernel/workqueue.c
lib/Kconfig.debug
lib/radix-tree.c
mm/memory.c
mm/mlock.c
mm/mmap.c
mm/nommu.c
mm/oom_kill.c
mm/page-writeback.c
net/sunrpc/Kconfig
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtsock.c
samples/kfifo/bytestream-example.c
samples/kfifo/dma-example.c
samples/kfifo/inttype-example.c
samples/kfifo/record-example.c
scripts/recordmcount.pl
security/apparmor/path.c
security/selinux/hooks.c
sound/core/pcm_native.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emupcm.c
sound/pci/emu10k1/memory.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/riptide/riptide.c
sound/soc/codecs/wm8776.c
tools/perf/Makefile
tools/perf/feature-tests.mak
tools/perf/util/ui/browsers/annotate.c

index 084f6ad7b7a0a4729b85aea3cc5c3e6eb6557689..0b1a3f97f285361a4075c8e267d42b2053747d9a 100644 (file)
@@ -1922,9 +1922,12 @@ machines due to caching.
       <function>mutex_lock()</function>
       </para>
       <para>
-       There is a <function>mutex_trylock()</function> which can be
-       used inside interrupt context, as it will not sleep.
+       There is a <function>mutex_trylock()</function> which does not
+       sleep.  Still, it must not be used inside interrupt context since
+       its implementation is not safe for that.
        <function>mutex_unlock()</function> will also never sleep.
+       It cannot be used in interrupt context either since a mutex
+       must be released by the same task that acquired it.
       </para>
      </listitem>
     </itemizedlist>
index 1a07fd674cd0e64198373459f04ad9e16f6fe0e6..a7952c2bd959338d4358a8fbdb51ee80c62fa61d 100644 (file)
@@ -2,10 +2,6 @@ Kernel driver f71882fg
 ======================
 
 Supported chips:
-  * Fintek F71808E
-    Prefix: 'f71808fg'
-    Addresses scanned: none, address read from Super I/O config space
-    Datasheet: Not public
   * Fintek F71858FG
     Prefix: 'f71858fg'
     Addresses scanned: none, address read from Super I/O config space
index 568fa08e82e54d03322810eab5bb26ae192f2065..302db5da49b37812eb19bf79ea0e2952e5f3a21f 100644 (file)
@@ -49,40 +49,13 @@ Table of Contents
       f) MDIO on GPIOs
       g) SPI busses
 
-  VII - Marvell Discovery mv64[345]6x System Controller chips
-    1) The /system-controller node
-    2) Child nodes of /system-controller
-      a) Marvell Discovery MDIO bus
-      b) Marvell Discovery ethernet controller
-      c) Marvell Discovery PHY nodes
-      d) Marvell Discovery SDMA nodes
-      e) Marvell Discovery BRG nodes
-      f) Marvell Discovery CUNIT nodes
-      g) Marvell Discovery MPSCROUTING nodes
-      h) Marvell Discovery MPSCINTR nodes
-      i) Marvell Discovery MPSC nodes
-      j) Marvell Discovery Watch Dog Timer nodes
-      k) Marvell Discovery I2C nodes
-      l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
-      m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
-      n) Marvell Discovery GPP (General Purpose Pins) nodes
-      o) Marvell Discovery PCI host bridge node
-      p) Marvell Discovery CPU Error nodes
-      q) Marvell Discovery SRAM Controller nodes
-      r) Marvell Discovery PCI Error Handler nodes
-      s) Marvell Discovery Memory Controller nodes
-
-  VIII - Specifying interrupt information for devices
+  VII - Specifying interrupt information for devices
     1) interrupts property
     2) interrupt-parent property
     3) OpenPIC Interrupt Controllers
     4) ISA Interrupt Controllers
 
-  IX - Specifying GPIO information for devices
-    1) gpios property
-    2) gpio-controller nodes
-
-  X - Specifying device power management information (sleep property)
+  VIII - Specifying device power management information (sleep property)
 
   Appendix A - Sample SOC node for MPC8540
 
index f93462c5db25f7828a17897424604bf7d84e3ea2..6d8be3468d7dd3ab5a376b22ba7b3a5900ded38e 100644 (file)
@@ -560,7 +560,7 @@ The proper channel for reporting bugs is either through the Linux OS
 distribution company that provided your OS or by posting issues to the
 PowerPC development mailing list at:
 
-linuxppc-dev@ozlabs.org
+linuxppc-dev@lists.ozlabs.org
 
 This request is to provide a documented and searchable public exchange
 of the problems and solutions surrounding this driver for the benefit of
index b5b8baa1d70eacc4ddfdff02f184f1bedb9dd639..433f353857563f596df86cf95bc4b8114c45d3ea 100644 (file)
@@ -456,7 +456,7 @@ F:  drivers/infiniband/hw/amso1100/
 
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:     Johannes Berg <johannes@sipsolutions.net>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/aoa/
@@ -1472,8 +1472,8 @@ F:        include/linux/can/platform/
 
 CELL BROADBAND ENGINE ARCHITECTURE
 M:     Arnd Bergmann <arnd@arndb.de>
-L:     linuxppc-dev@ozlabs.org
-L:     cbe-oss-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
+L:     cbe-oss-dev@lists.ozlabs.org
 W:     http://www.ibm.com/developerworks/power/cell/
 S:     Supported
 F:     arch/powerpc/include/asm/cell*.h
@@ -2371,13 +2371,13 @@ F:      include/linux/fb.h
 FREESCALE DMA DRIVER
 M:     Li Yang <leoli@freescale.com>
 M:     Zhang Wei <zw@zh-kernel.org>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/dma/fsldma.*
 
 FREESCALE I2C CPM DRIVER
 M:     Jochen Friedrich <jochen@scram.de>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-cpm.c
@@ -2393,7 +2393,7 @@ F:        drivers/video/imxfb.c
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
 M:     Vitaly Bordug <vbordug@ru.mvista.com>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/fs_enet/
@@ -2401,7 +2401,7 @@ F:        include/linux/fs_enet_pd.h
 
 FREESCALE QUICC ENGINE LIBRARY
 M:     Timur Tabi <timur@freescale.com>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 F:     arch/powerpc/sysdev/qe_lib/
 F:     arch/powerpc/include/asm/*qe.h
@@ -2409,27 +2409,27 @@ F:      arch/powerpc/include/asm/*qe.h
 FREESCALE USB PERIPHERAL DRIVERS
 M:     Li Yang <leoli@freescale.com>
 L:     linux-usb@vger.kernel.org
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/usb/gadget/fsl*
 
 FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
 M:     Li Yang <leoli@freescale.com>
 L:     netdev@vger.kernel.org
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/net/ucc_geth*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
 M:     Timur Tabi <timur@freescale.com>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 F:     drivers/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:     Timur Tabi <timur@freescale.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 F:     sound/soc/fsl/fsl*
 F:     sound/soc/fsl/mpc8610_hpcd.c
@@ -2564,7 +2564,7 @@ F:        mm/memory-failure.c
 F:     mm/hwpoison-inject.c
 
 HYPERVISOR VIRTUAL CONSOLE DRIVER
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Odd Fixes
 F:     drivers/char/hvc_*
 
@@ -3476,7 +3476,7 @@ F:        drivers/usb/misc/legousbtower.c
 
 LGUEST
 M:     Rusty Russell <rusty@rustcorp.com.au>
-L:     lguest@ozlabs.org
+L:     lguest@lists.ozlabs.org
 W:     http://lguest.ozlabs.org/
 S:     Maintained
 F:     Documentation/lguest/
@@ -3495,7 +3495,7 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
 M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 M:     Paul Mackerras <paulus@samba.org>
 W:     http://www.penguinppc.org/
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 Q:     http://patchwork.ozlabs.org/project/linuxppc-dev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
 S:     Supported
@@ -3505,14 +3505,14 @@ F:      arch/powerpc/
 LINUX FOR POWER MACINTOSH
 M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 W:     http://www.penguinppc.org/
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     arch/powerpc/platforms/powermac/
 F:     drivers/macintosh/
 
 LINUX FOR POWERPC EMBEDDED MPC5XXX
 M:     Grant Likely <grant.likely@secretlab.ca>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 T:     git git://git.secretlab.ca/git/linux-2.6.git
 S:     Maintained
 F:     arch/powerpc/platforms/512x/
@@ -3522,7 +3522,7 @@ LINUX FOR POWERPC EMBEDDED PPC4XX
 M:     Josh Boyer <jwboyer@linux.vnet.ibm.com>
 M:     Matt Porter <mporter@kernel.crashing.org>
 W:     http://www.penguinppc.org/
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
 S:     Maintained
 F:     arch/powerpc/platforms/40x/
@@ -3531,7 +3531,7 @@ F:        arch/powerpc/platforms/44x/
 LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
 M:     Grant Likely <grant.likely@secretlab.ca>
 W:     http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 T:     git git://git.secretlab.ca/git/linux-2.6.git
 S:     Maintained
 F:     arch/powerpc/*/*virtex*
@@ -3541,20 +3541,20 @@ LINUX FOR POWERPC EMBEDDED PPC8XX
 M:     Vitaly Bordug <vitb@kernel.crashing.org>
 M:     Marcelo Tosatti <marcelo@kvack.org>
 W:     http://www.penguinppc.org/
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     arch/powerpc/platforms/8xx/
 
 LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
 M:     Kumar Gala <galak@kernel.crashing.org>
 W:     http://www.penguinppc.org/
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     arch/powerpc/platforms/83xx/
 
 LINUX FOR POWERPC PA SEMI PWRFICIENT
 M:     Olof Johansson <olof@lixom.net>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     arch/powerpc/platforms/pasemi/
 F:     drivers/*/*pasemi*
@@ -4601,14 +4601,14 @@ F:      drivers/ata/sata_promise.*
 PS3 NETWORK SUPPORT
 M:     Geoff Levand <geoff@infradead.org>
 L:     netdev@vger.kernel.org
-L:     cbe-oss-dev@ozlabs.org
+L:     cbe-oss-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/net/ps3_gelic_net.*
 
 PS3 PLATFORM SUPPORT
 M:     Geoff Levand <geoff@infradead.org>
-L:     linuxppc-dev@ozlabs.org
-L:     cbe-oss-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
+L:     cbe-oss-dev@lists.ozlabs.org
 S:     Maintained
 F:     arch/powerpc/boot/ps3*
 F:     arch/powerpc/include/asm/lv1call.h
@@ -4622,7 +4622,7 @@ F:        sound/ppc/snd_ps3*
 
 PS3VRAM DRIVER
 M:     Jim Paris <jim@jtan.com>
-L:     cbe-oss-dev@ozlabs.org
+L:     cbe-oss-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/block/ps3vram.c
 
@@ -5068,7 +5068,7 @@ F:        drivers/mmc/host/sdhci.*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
 M:     Anton Vorontsov <avorontsov@ru.mvista.com>
-L:     linuxppc-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
 F:     drivers/mmc/host/sdhci-of.*
@@ -5485,8 +5485,8 @@ F:        drivers/net/spider_net*
 
 SPU FILE SYSTEM
 M:     Jeremy Kerr <jk@ozlabs.org>
-L:     linuxppc-dev@ozlabs.org
-L:     cbe-oss-dev@ozlabs.org
+L:     linuxppc-dev@lists.ozlabs.org
+L:     cbe-oss-dev@lists.ozlabs.org
 W:     http://www.ibm.com/developerworks/power/cell/
 S:     Supported
 F:     Documentation/filesystems/spufs.txt
index b0f93f11e281877dbfb69407d8506f7c1433051d..9d2be0941410946877f042c6d36976bc93103b92 100644 (file)
@@ -70,4 +70,6 @@ void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd)
                set->cfg_gpio = pd->cfg_gpio;
        if (pd->cfg_card)
                set->cfg_card = pd->cfg_card;
+       if (pd->host_caps)
+               set->host_caps = pd->host_caps;
 }
index 1504fd802865c3917fea63059604b95ee302db1b..a6c8295840afdb23fa61d91815915a902dc42602 100644 (file)
@@ -70,4 +70,6 @@ void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd)
                set->cfg_gpio = pd->cfg_gpio;
        if (pd->cfg_card)
                set->cfg_card = pd->cfg_card;
+       if (pd->host_caps)
+               set->host_caps = pd->host_caps;
 }
index b28ef173444d282676e86370737d41da5d073e7c..cb0d7143381a1fe06cfbc75181339548d1309c82 100644 (file)
@@ -71,4 +71,6 @@ void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd)
                set->cfg_gpio = pd->cfg_gpio;
        if (pd->cfg_card)
                set->cfg_card = pd->cfg_card;
+       if (pd->host_caps)
+               set->host_caps = pd->host_caps;
 }
index 87f1bd1efc82b7c46eb7a1e35c7e337c83625411..954d398a54b4e7f60de2aaeabef172a0774fd93f 100644 (file)
@@ -356,8 +356,6 @@ asmlinkage unsigned long sys_mmap2(
                                int fd, long pgoff);
 struct pt_regs;
 struct sigaction;
-long sys_execve(const char __user *filename, char __user * __user *argv,
-                          char __user * __user *envp, struct pt_regs *regs);
 asmlinkage long sys_ia64_pipe(void);
 asmlinkage long sys_rt_sigaction(int sig,
                                 const struct sigaction __user *act,
index 3958726664bad268c0e185a462f6859bfe3e67f5..492fee8a1ab2e6491f52c961b1a93017eef49a41 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/include/asm-m68k/ide.h
- *
  *  Copyright (C) 1994-1996  Linus Torvalds & authors
  */
 
@@ -34,6 +32,8 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#ifdef CONFIG_MMU
+
 /*
  * Get rid of defs from io.h - ide has its private and conflicting versions
  * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
 #define __ide_mm_outsw(port, addr, n)  raw_outsw((u16 *)port, addr, n)
 #define __ide_mm_outsl(port, addr, n)  raw_outsl((u32 *)port, addr, n)
 
+#else
+
+#define __ide_mm_insw(port, addr, n)   io_insw((unsigned int)port, addr, n)
+#define __ide_mm_insl(port, addr, n)   io_insl((unsigned int)port, addr, n)
+#define __ide_mm_outsw(port, addr, n)  io_outsw((unsigned int)port, addr, n)
+#define __ide_mm_outsl(port, addr, n)  io_outsl((unsigned int)port, addr, n)
+
+#endif /* CONFIG_MMU */
+
 #endif /* __KERNEL__ */
 #endif /* _M68K_IDE_H */
index 4d090d3c08971dbeb45c2258d2d2d60805219b59..6d3390590e5ba24be497b5c4a42d0327dbae6cf9 100644 (file)
@@ -316,14 +316,14 @@ void dump(struct pt_regs *fp)
                fp->d0, fp->d1, fp->d2, fp->d3);
        printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
                fp->d4, fp->d5, fp->a0, fp->a1);
-       printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %08x\n",
-               (unsigned int) rdusp(), (unsigned int) fp);
+       printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %p\n",
+               (unsigned int) rdusp(), fp);
 
        printk(KERN_EMERG "\nCODE:");
        tp = ((unsigned char *) fp->pc) - 0x20;
        for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
@@ -332,7 +332,7 @@ void dump(struct pt_regs *fp)
        tp = ((unsigned char *) fp) - 0x40;
        for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
@@ -341,7 +341,7 @@ void dump(struct pt_regs *fp)
        tp = (unsigned char *) (rdusp() - 0x10);
        for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
index d33ba17601fa20d61c86c3ea982aab72ca6508e1..99d9b61cccb592cb34985bf79fb777b0521a36bf 100644 (file)
@@ -73,7 +73,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
                /* We can only get here if we hit a P2P bridge with no node,
                 * let's do standard swizzling and try again
                 */
-               lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+               lspec = pci_swizzle_interrupt_pin(pdev, lspec);
                pdev = ppdev;
        }
 
index 23be25fec4d67bf7e48d612b50e1da9f766371ae..55ef532f32be6fc3c174e1280a7971ffc5329406 100644 (file)
 #include <linux/irq.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 
@@ -1077,7 +1078,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
                struct dev_archdata *sd = &dev->dev.archdata;
 
                /* Setup OF node pointer in archdata */
-               sd->of_node = pci_device_to_OF_node(dev);
+               dev->dev.of_node = pci_device_to_OF_node(dev);
 
                /* Fixup NUMA node as it may not be setup yet by the generic
                 * code and is needed by the DMA init
index 7869a41b0f94cadff95ad17dc4eaf5a713b536c3..0687a42a5bd475166afed6e816c721a51517b4c9 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/ioport.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <asm/io.h>
 
index 17a2cb5a4178ba6d99a1f2d5c56b6deaf5525fa3..1f469e80fdd3e1ab737071bb91cf8aceee1010e6 100644 (file)
@@ -95,13 +95,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline int
-dma_get_cache_alignment(void)
-{
-       BUG();
-       return(0);
-}
-
 static inline void
 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
index a84fc34c8f777c54eb707abe6b43c19a025c75f0..cea0cd9a316fb987bfa611a1dffa06cdba1f0332 100644 (file)
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
 
 config KTIME_SCALAR
        def_bool X86_32
+
+config ARCH_CPU_PROBE_RELEASE
+       def_bool y
+       depends on HOTPLUG_CPU
+
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -749,11 +754,11 @@ config IOMMU_API
        def_bool (AMD_IOMMU || DMAR)
 
 config MAXSMP
-       bool "Configure Maximum number of SMP Processors and NUMA Nodes"
+       bool "Enable Maximum number of SMP Processors and NUMA Nodes"
        depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
        select CPUMASK_OFFSTACK
        ---help---
-         Configure maximum number of CPUS and NUMA Nodes for this architecture.
+         Enable maximum number of CPUS and NUMA Nodes for this architecture.
          If unsure, say N.
 
 config NR_CPUS
index 2984a25ff383d5db7a3dffac2d38dca3f9628d69..f686f49e8b7b5ca5a0d630877bccb079d5022845 100644 (file)
@@ -26,6 +26,7 @@ struct mm_struct;
 struct vm_area_struct;
 
 extern pgd_t swapper_pg_dir[1024];
+extern pgd_t trampoline_pg_dir[1024];
 
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
index cb507bb05d7923a32d9134e61a938fdba184a2c9..4dde797c057800b0e377ac02eb16101430f865ef 100644 (file)
@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
 
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
+extern unsigned long initial_page_table;
 extern unsigned long initial_gs;
 
 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 
 extern unsigned long setup_trampoline(void);
+extern void __init setup_trampoline_page_table(void);
 extern void __init reserve_trampoline_memory(void);
 #else
-static inline void reserve_trampoline_memory(void) {};
+static inline void setup_trampoline_page_table(void) {}
+static inline void reserve_trampoline_memory(void) {}
 #endif /* CONFIG_X86_TRAMPOLINE */
 
 #endif /* __ASSEMBLY__ */
index 4dc0084ec1b108c6325f7df782c5a4b562035f63..f1efebaf55105fa835ac7938c1654295fdd81562 100644 (file)
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
                struct irq_pin_list *entry;
 
                cfg = desc->chip_data;
+               if (!cfg)
+                       continue;
                entry = cfg->irq_2_pin;
                if (!entry)
                        continue;
index 60a57b13082d2658e32da03193a071a4b314cc73..ba5f62f45f01e136e849894076917684ffcd8c40 100644 (file)
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
        }
 
        /* OSVW unavailable or ID unknown, match family-model-stepping range */
-       ms = (cpu->x86_model << 8) | cpu->x86_mask;
+       ms = (cpu->x86_model << 4) | cpu->x86_mask;
        while ((range = *erratum++))
                if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
                    (ms >= AMD_MODEL_RANGE_START(range)) &&
index 214ac860ebe0d26df806999f880633919d1ad816..d8d86d01400866c6320001fb715301276747cd52 100644 (file)
@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added)
  *   Intel Errata AAP53  (model 30)
  *   Intel Errata BD53   (model 44)
  *
- * These chips need to be 'reset' when adding counters by programming
- * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
- * either in sequence on the same PMC or on different PMCs.
+ * The official story:
+ *   These chips need to be 'reset' when adding counters by programming the
+ *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
+ *   in sequence on the same PMC or on different PMCs.
+ *
+ * In practise it appears some of these events do in fact count, and
+ * we need to programm all 4 events.
  */
-static void intel_pmu_nhm_enable_all(int added)
+static void intel_pmu_nhm_workaround(void)
 {
-       if (added) {
-               struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-               int i;
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       static const unsigned long nhm_magic[4] = {
+               0x4300B5,
+               0x4300D2,
+               0x4300B1,
+               0x4300B1
+       };
+       struct perf_event *event;
+       int i;
+
+       /*
+        * The Errata requires below steps:
+        * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
+        * 2) Configure 4 PERFEVTSELx with the magic events and clear
+        *    the corresponding PMCx;
+        * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
+        * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
+        * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
+        */
+
+       /*
+        * The real steps we choose are a little different from above.
+        * A) To reduce MSR operations, we don't run step 1) as they
+        *    are already cleared before this function is called;
+        * B) Call x86_perf_event_update to save PMCx before configuring
+        *    PERFEVTSELx with magic number;
+        * C) With step 5), we do clear only when the PERFEVTSELx is
+        *    not used currently.
+        * D) Call x86_perf_event_set_period to restore PMCx;
+        */
 
-               wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
-               wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
-               wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
+       /* We always operate 4 pairs of PERF Counters */
+       for (i = 0; i < 4; i++) {
+               event = cpuc->events[i];
+               if (event)
+                       x86_perf_event_update(event);
+       }
 
-               wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
-               wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+       for (i = 0; i < 4; i++) {
+               wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
+               wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
+       }
 
-               for (i = 0; i < 3; i++) {
-                       struct perf_event *event = cpuc->events[i];
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
 
-                       if (!event)
-                               continue;
+       for (i = 0; i < 4; i++) {
+               event = cpuc->events[i];
 
+               if (event) {
+                       x86_perf_event_set_period(event);
                        __x86_pmu_enable_event(&event->hw,
-                                              ARCH_PERFMON_EVENTSEL_ENABLE);
-               }
+                                       ARCH_PERFMON_EVENTSEL_ENABLE);
+               } else
+                       wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
        }
+}
+
+static void intel_pmu_nhm_enable_all(int added)
+{
+       if (added)
+               intel_pmu_nhm_workaround();
        intel_pmu_enable_all(added);
 }
 
index ff4c453e13f3807344fff90bd2741078dc90481c..fa8c1b8e09fb9f65b832c1fb15c84ee4c39b6336 100644 (file)
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
 /*
  * Enable paging
  */
-       movl $pa(swapper_pg_dir),%eax
+       movl pa(initial_page_table), %eax
        movl %eax,%cr3          /* set the page table pointer.. */
        movl %cr0,%eax
        orl  $X86_CR0_PG,%eax
@@ -614,6 +614,8 @@ ignore_int:
 .align 4
 ENTRY(initial_code)
        .long i386_start_kernel
+ENTRY(initial_page_table)
+       .long pa(swapper_pg_dir)
 
 /*
  * BSS section
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
 #endif
 swapper_pg_fixmap:
        .fill 1024,4,0
+#ifdef CONFIG_X86_TRAMPOLINE
+ENTRY(trampoline_pg_dir)
+       .fill 1024,4,0
+#endif
 ENTRY(empty_zero_page)
        .fill 4096,1,0
 
index 1bfb6cf4dd55d67aeeebbbf89d0bb60283a94c94..770ebfb349e93efe3367cf0c6caff93b61b8b884 100644 (file)
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
        struct hlist_node *node, *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+       kprobe_opcode_t *correct_ret_addr = NULL;
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
                        /* another task is sharing our hash bucket */
                        continue;
 
+               orig_ret_address = (unsigned long)ri->ret_addr;
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+       correct_ret_addr = ri->ret_addr;
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __get_cpu_var(current_kprobe) = &ri->rp->kp;
                        get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+                       ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
                        __get_cpu_var(current_kprobe) = NULL;
                }
 
-               orig_ret_address = (unsigned long)ri->ret_addr;
                recycle_rp_inst(ri, &empty_rp);
 
                if (orig_ret_address != trampoline_address)
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
                        break;
        }
 
-       kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
        kretprobe_hash_unlock(current, &flags);
 
        hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
index b008e7883207abe580a696e9b418dbddec84dabb..c3a4fbb2b996d00277d6523cb76b74e2c5944621 100644 (file)
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
        paging_init();
        x86_init.paging.pagetable_setup_done(swapper_pg_dir);
 
+       setup_trampoline_page_table();
+
        tboot_probe();
 
 #ifdef CONFIG_X86_64
index a5e928b0cb5fafad8d476fdf0f34407f8cf86983..8b3bfc4dd70872680ff4b451a8b03903bd68727b 100644 (file)
@@ -73,7 +73,6 @@
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
-static int low_mappings;
 #endif
 
 /* State of each CPU */
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
+
+/*
+ * We need this for trampoline_base protection from concurrent accesses when
+ * off- and onlining cores wildly.
+ */
+static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+void cpu_hotplug_driver_lock()
+{
+        mutex_lock(&x86_cpu_hotplug_driver_mutex);
+}
+
+void cpu_hotplug_driver_unlock()
+{
+        mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+}
+
+ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
+ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 #else
 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
         * fragile that we want to limit the things done here to the
         * most necessary things.
         */
+
+#ifdef CONFIG_X86_32
+       /*
+        * Switch away from the trampoline page-table
+        *
+        * Do this before cpu_init() because it needs to access per-cpu
+        * data which may not be mapped in the trampoline page-table.
+        */
+       load_cr3(swapper_pg_dir);
+       __flush_tlb_all();
+#endif
+
        vmi_bringup();
        cpu_init();
        preempt_disable();
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
                legacy_pic->chip->unmask(0);
        }
 
-#ifdef CONFIG_X86_32
-       while (low_mappings)
-               cpu_relax();
-       __flush_tlb_all();
-#endif
-
        /* This must be done before setting cpu_online_mask */
        set_cpu_sibling_map(raw_smp_processor_id());
        wmb();
@@ -750,6 +774,7 @@ do_rest:
 #ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
        irq_ctx_init(cpu);
+       initial_page_table = __pa(&trampoline_pg_dir);
 #else
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
-#ifdef CONFIG_X86_32
-       /* init low mem mapping */
-       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-               min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-       flush_tlb_all();
-       low_mappings = 1;
-
        err = do_boot_cpu(apicid, cpu);
 
-       zap_low_mappings(false);
-       low_mappings = 0;
-#else
-       err = do_boot_cpu(apicid, cpu);
-#endif
        if (err) {
                pr_debug("do_boot_cpu failed %d\n", err);
                return -EIO;
index c652ef62742df62340d03465231e88885c8d5329..a874495b3673baeb27467d144995d885f2f94ebc 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/io.h>
 
 #include <asm/trampoline.h>
+#include <asm/pgtable.h>
 #include <asm/e820.h>
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
        memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
        return virt_to_phys(trampoline_base);
 }
+
+void __init setup_trampoline_page_table(void)
+{
+#ifdef CONFIG_X86_32
+       /* Copy kernel address range */
+       clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       min_t(unsigned long, KERNEL_PGD_PTRS,
+                             KERNEL_PGD_BOUNDARY));
+
+       /* Initialize low mappings */
+       clone_pgd_range(trampoline_pg_dir,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       min_t(unsigned long, KERNEL_PGD_PTRS,
+                             KERNEL_PGD_BOUNDARY));
+#endif
+}
index ea24c1e51be221e167ebc7fc7c2bd2658aff8e58..2673a3d1480654ceec39f2ab144b15cde72ba72e 100644 (file)
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
        },
 };
 
-static int sata_dwc_probe(struct of_device *ofdev,
+static int sata_dwc_probe(struct platform_device *ofdev,
                        const struct of_device_id *match)
 {
        struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ error_out:
        return err;
 }
 
-static int sata_dwc_remove(struct of_device *ofdev)
+static int sata_dwc_remove(struct platform_device *ofdev)
 {
        struct device *dev = &ofdev->dev;
        struct ata_host *host = dev_get_drvdata(dev);
index 2982b3ee9465d1521a6296186c52be9c3a8dc462..057413bb16e294d20d476d49ea4770b56a8e986c 100644 (file)
@@ -94,6 +94,7 @@
 #include <linux/hdreg.h>
 #include <linux/platform_device.h>
 #if defined(CONFIG_OF)
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
index ad46eae1f9bb207847fd8ac2bf80b58d4f3922d8..c350d01716bdace6ef510809e964a57c7129134b 100644 (file)
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        }
 
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-       filp->private_data = tty;
-       file_move(filp, &tty->tty_files);
+
+       tty_add_file(tty, filp);
 
        retval = devpts_pty_new(inode, tty->link);
        if (retval)
index 0350c42375a217c0337cdce4855fb9ad54a9455c..949067a0bd4743151515382b07ccf7aecad11316 100644 (file)
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers);                     /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 EXPORT_SYMBOL(tty_mutex);
 
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
 static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
 ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
        kfree(tty);
 }
 
+static inline struct tty_struct *file_tty(struct file *file)
+{
+       return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+       struct tty_file_private *priv;
+
+       /* XXX: must implement proper error handling in callers */
+       priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
+
+       priv->tty = tty;
+       priv->file = file;
+       file->private_data = priv;
+
+       spin_lock(&tty_files_lock);
+       list_add(&priv->list, &tty->tty_files);
+       spin_unlock(&tty_files_lock);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+       struct tty_file_private *priv = file->private_data;
+
+       spin_lock(&tty_files_lock);
+       list_del(&priv->list);
+       spin_unlock(&tty_files_lock);
+       file->private_data = NULL;
+       kfree(priv);
+}
+
+
 #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
 
 /**
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
        struct list_head *p;
        int count = 0;
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        list_for_each(p, &tty->tty_files) {
                count++;
        }
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_SLAVE &&
            tty->link && tty->link->count)
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
        struct file *cons_filp = NULL;
        struct file *filp, *f = NULL;
        struct task_struct *p;
+       struct tty_file_private *priv;
        int    closecount = 0, n;
        unsigned long flags;
        int refs = 0;
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
 
 
        spin_lock(&redirect_lock);
-       if (redirect && redirect->private_data == tty) {
+       if (redirect && file_tty(redirect) == tty) {
                f = redirect;
                redirect = NULL;
        }
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
           workqueue with the lock held */
        check_tty_count(tty, "tty_hangup");
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
-       list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+       list_for_each_entry(priv, &tty->tty_files, list) {
+               filp = priv->file;
                if (filp->f_op->write == redirected_tty_write)
                        cons_filp = filp;
                if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
                __tty_fasync(-1, filp, 0);      /* can't block */
                filp->f_op = &hung_up_tty_fops;
        }
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
 
        tty_ldisc_hangup(tty);
 
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
        int i;
-       struct tty_struct *tty;
-       struct inode *inode;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
 
-       tty = file->private_data;
-       inode = file->f_path.dentry->d_inode;
        if (tty_paranoia_check(tty, inode, "tty_read"))
                return -EIO;
        if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
 static ssize_t tty_write(struct file *file, const char __user *buf,
                                                size_t count, loff_t *ppos)
 {
-       struct tty_struct *tty;
        struct inode *inode = file->f_path.dentry->d_inode;
+       struct tty_struct *tty = file_tty(file);
+       struct tty_ldisc *ld;
        ssize_t ret;
-       struct tty_ldisc *ld;
 
-       tty = file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_write"))
                return -EIO;
        if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
        tty_driver_kref_put(driver);
        module_put(driver->owner);
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        list_del_init(&tty->tty_files);
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
 
        put_pid(tty->pgrp);
        put_pid(tty->session);
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
 
 int tty_release(struct inode *inode, struct file *filp)
 {
-       struct tty_struct *tty, *o_tty;
+       struct tty_struct *tty = file_tty(filp);
+       struct tty_struct *o_tty;
        int     pty_master, tty_closing, o_tty_closing, do_sleep;
        int     devpts;
        int     idx;
        char    buf[64];
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, inode, "tty_release_dev"))
                return 0;
 
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
         *  - do_tty_hangup no longer sees this file descriptor as
         *    something that needs to be handled for hangups.
         */
-       file_kill(filp);
-       filp->private_data = NULL;
+       tty_del_file(filp);
 
        /*
         * Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@ got_driver:
                return PTR_ERR(tty);
        }
 
-       filp->private_data = tty;
-       file_move(filp, &tty->tty_files);
+       tty_add_file(tty, filp);
+
        check_tty_count(tty, "tty_open");
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@ got_driver:
 
 static unsigned int tty_poll(struct file *filp, poll_table *wait)
 {
-       struct tty_struct *tty;
+       struct tty_struct *tty = file_tty(filp);
        struct tty_ldisc *ld;
        int ret = 0;
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
                return 0;
 
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
 
 static int __tty_fasync(int fd, struct file *filp, int on)
 {
-       struct tty_struct *tty;
+       struct tty_struct *tty = file_tty(filp);
        unsigned long flags;
        int retval = 0;
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
                goto out;
 
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
  */
 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct tty_struct *tty, *real_tty;
+       struct tty_struct *tty = file_tty(file);
+       struct tty_struct *real_tty;
        void __user *p = (void __user *)arg;
        int retval;
        struct tty_ldisc *ld;
        struct inode *inode = file->f_dentry->d_inode;
 
-       tty = file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_ioctl"))
                return -EINVAL;
 
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg)
 {
        struct inode *inode = file->f_dentry->d_inode;
-       struct tty_struct *tty = file->private_data;
+       struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        int retval = -ENOIOCTLCMD;
 
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
                                if (!filp)
                                        continue;
                                if (filp->f_op->read == tty_read &&
-                                   filp->private_data == tty) {
+                                   file_tty(filp) == tty) {
                                        printk(KERN_NOTICE "SAK: killed process %d"
                                            " (%s): fd#%d opened to the tty\n",
                                            task_pid_nr(p), p->comm, i);
index 0ed763cd2e77499471bc6f4abc0da36e199ec3dd..b663d573aad99ed5257f9ab324d566309e70ac37 100644 (file)
@@ -94,6 +94,7 @@
 
 #ifdef CONFIG_OF
 /* For open firmware. */
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
index e635199a0cd258298d0f50fd658fbcaec647f569..0c52899be9643d85af5c7ac6be8c87bc67d6e6ce 100644 (file)
@@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
index f44bdc084cb297eed86ea504c711eec6f91ebaf0..8ca7f65cf2f804d32120fadfa81cc9b900dfd94a 100644 (file)
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
 {
        struct egalax_data *td = hid_get_drvdata(hid);
 
+       /* Note, eGalax has two product lines: the first is resistive and
+        * uses a standard parallel multitouch protocol (product ID ==
+        * 48xx).  The second is capacitive and uses an unusual "serial"
+        * protocol with a different message for each multitouch finger
+        * (product ID == 72xx).  We do not yet generate a correct event
+        * sequence for the capacitive/serial protocol.
+        */
        if (hid->claimed & HID_CLAIMED_INPUT) {
                struct input_dev *input = field->hidinput->input;
 
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev)
 static const struct hid_device_id egalax_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
                        USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, egalax_devices);
index d3fc13ae094da4fcd5ef93671fbfab73332ff606..85c6d13c9ffa9369fca613eda828133b2b69a3e2 100644 (file)
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER   0x0001
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH   0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1  0x720c
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
index 346f0e34987e68454dddd3393c2b24cc1f640729..bc2e0774062864667ca92c52e3b4e7a23d923f53 100644 (file)
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
        ref_cnt--;
        mutex_lock(&info->lock);
        (*ref_cnt)--;
-       may_release = !ref_cnt;
+       may_release = !*ref_cnt;
        mutex_unlock(&info->lock);
        if (may_release) {
-               framebuffer_release(info);
                vfree((u8 *)info->fix.smem_start);
+               framebuffer_release(info);
        }
 }
 
index 254a003af048dd7a909e68f4a96ffcaae391bc96..0a29c51114aaf0d36c64f6c8c25195db5e2f3747 100644 (file)
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file)
 {
        struct hiddev_list *list;
        struct usb_interface *intf;
+       struct hid_device *hid;
        struct hiddev *hiddev;
        int res;
 
        intf = usb_find_interface(&hiddev_driver, iminor(inode));
        if (!intf)
                return -ENODEV;
-       hiddev = usb_get_intfdata(intf);
+       hid = usb_get_intfdata(intf);
+       hiddev = hid->hiddev;
 
        if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
                return -ENOMEM;
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct hiddev_list *list = file->private_data;
        struct hiddev *hiddev = list->hiddev;
        struct hid_device *hid = hiddev->hid;
-       struct usb_device *dev = hid_to_usb_dev(hid);
+       struct usb_device *dev;
        struct hiddev_collection_info cinfo;
        struct hiddev_report_info rinfo;
        struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        /* Called without BKL by compat methods so no BKL taken */
 
        /* FIXME: Who or what stop this racing with a disconnect ?? */
-       if (!hiddev->exist)
+       if (!hiddev->exist || !hid)
                return -EIO;
 
+       dev = hid_to_usb_dev(hid);
+
        switch (cmd) {
 
        case HIDIOCGVERSION:
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
        hid->hiddev = hiddev;
        hiddev->hid = hid;
        hiddev->exist = 1;
-       usb_set_intfdata(usbhid->intf, usbhid);
        retval = usb_register_dev(usbhid->intf, &hiddev_class);
        if (retval) {
                err_hid("Not able to get a minor for this device.");
index 0fba829431258c5122b56f794a662096541d41a5..4d4d09bdec0a7a7cb043725b2fb94dc1fff23dc7 100644 (file)
@@ -332,11 +332,11 @@ config SENSORS_F71805F
          will be called f71805f.
 
 config SENSORS_F71882FG
-       tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000"
+       tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
        depends on EXPERIMENTAL
        help
-         If you say yes here you get support for hardware monitoring features
-         of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
+         If you say yes here you get support for hardware monitoring
+         features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
          F71889FG and F8000 Super-I/O chips.
 
          This driver can also be built as a module.  If so, the module
index 6207120dcd4df05df282b357fc53cabbbde06b61..537841ef44b99d179318f7510dbf28dddedb0ed8 100644 (file)
@@ -45,7 +45,6 @@
 #define SIO_REG_ADDR           0x60    /* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID          0x1934  /* Manufacturers ID */
-#define SIO_F71808_ID          0x0901  /* Chipset ID */
 #define SIO_F71858_ID          0x0507  /* Chipset ID */
 #define SIO_F71862_ID          0x0601  /* Chipset ID */
 #define SIO_F71882_ID          0x0541  /* Chipset ID */
@@ -97,10 +96,9 @@ static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
 
 static const char *f71882fg_names[] = {
-       "f71808fg",
        "f71858fg",
        "f71862fg",
        "f71882fg",
@@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
        SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
 };
 
-/* In attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
        SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
        SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
        SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
        SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
        SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
        SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* In attr for the f71808fg */
-static struct sensor_device_attribute_2 f71808_in_attr[] = {
-       SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
-       SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
-       SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
-       SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
-       SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
-       SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
-       SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
-       SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
        SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
        SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
                store_temp_max, 0, 1),
@@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
                store_temp_beep, 0, 6),
        SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
        SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
-};
-
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 f71862_temp_attr[] = {
        SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
        SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
                store_temp_max, 0, 3),
@@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
                                data->temp_type[1] = 6;
                                break;
                        }
-               } else if (data->type == f71808fg) {
-                       reg  = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
-                       data->temp_type[1] = (reg & 0x02) ? 2 : 4;
-                       data->temp_type[2] = (reg & 0x04) ? 2 : 4;
-
                } else {
                        reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
                        if ((reg2 & 0x03) == 0x01)
@@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
 
        val /= 1000;
 
-       if (data->type == f71889fg
-        || data->type == f71808fg)
+       if (data->type == f71889fg)
                val = SENSORS_LIMIT(val, -128, 127);
        else
                val = SENSORS_LIMIT(val, 0, 127);
@@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                        /* fall through! */
                case f71862fg:
                        err = f71882fg_create_sysfs_files(pdev,
-                                       f71862_temp_attr,
-                                       ARRAY_SIZE(f71862_temp_attr));
-                       if (err)
-                               goto exit_unregister_sysfs;
-                       err = f71882fg_create_sysfs_files(pdev,
-                                       fxxxx_in_attr,
-                                       ARRAY_SIZE(fxxxx_in_attr));
-                       if (err)
-                               goto exit_unregister_sysfs;
-                       err = f71882fg_create_sysfs_files(pdev,
-                                       fxxxx_temp_attr,
-                                       ARRAY_SIZE(fxxxx_temp_attr));
-                       break;
-               case f71808fg:
-                       err = f71882fg_create_sysfs_files(pdev,
-                                       f71808_in_attr,
-                                       ARRAY_SIZE(f71808_in_attr));
-                       if (err)
-                               goto exit_unregister_sysfs;
-                       err = f71882fg_create_sysfs_files(pdev,
-                                       fxxxx_temp_attr,
-                                       ARRAY_SIZE(fxxxx_temp_attr));
+                                       fxxxx_in_temp_attr,
+                                       ARRAY_SIZE(fxxxx_in_temp_attr));
                        break;
                case f8000:
                        err = f71882fg_create_sysfs_files(pdev,
@@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71862fg:
                        err = (data->pwm_enable & 0x15) != 0x15;
                        break;
-               case f71808fg:
                case f71882fg:
                case f71889fg:
                        err = 0;
@@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                                        f8000_auto_pwm_attr,
                                        ARRAY_SIZE(f8000_auto_pwm_attr));
                        break;
-               case f71808fg:
                case f71889fg:
                        for (i = 0; i < nr_fans; i++) {
                                data->pwm_auto_point_mapping[i] =
@@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev)
                        /* fall through! */
                case f71862fg:
                        f71882fg_remove_sysfs_files(pdev,
-                                       f71862_temp_attr,
-                                       ARRAY_SIZE(f71862_temp_attr));
-                       f71882fg_remove_sysfs_files(pdev,
-                                       fxxxx_in_attr,
-                                       ARRAY_SIZE(fxxxx_in_attr));
-                       f71882fg_remove_sysfs_files(pdev,
-                                       fxxxx_temp_attr,
-                                       ARRAY_SIZE(fxxxx_temp_attr));
-                       break;
-               case f71808fg:
-                       f71882fg_remove_sysfs_files(pdev,
-                                       f71808_in_attr,
-                                       ARRAY_SIZE(f71808_in_attr));
-                       f71882fg_remove_sysfs_files(pdev,
-                                       fxxxx_temp_attr,
-                                       ARRAY_SIZE(fxxxx_temp_attr));
+                                       fxxxx_in_temp_attr,
+                                       ARRAY_SIZE(fxxxx_in_temp_attr));
                        break;
                case f8000:
                        f71882fg_remove_sysfs_files(pdev,
@@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
 
        devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
        switch (devid) {
-       case SIO_F71808_ID:
-               sio_data->type = f71808fg;
-               break;
        case SIO_F71858_ID:
                sio_data->type = f71858fg;
                break;
index 11567c7999a243d3d95ebecdb623c26de49705ce..c148b630215484f9689bf9257d6acf286685c37a 100644 (file)
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
         * with the rest of the array)
         */
        mdk_rdev_t *rdev;
-
-       /* First make sure individual recovery_offsets are correct */
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
-               if (rdev->raid_disk >= 0 &&
-                   mddev->delta_disks >= 0 &&
-                   !test_bit(In_sync, &rdev->flags) &&
-                   mddev->curr_resync_completed > rdev->recovery_offset)
-                               rdev->recovery_offset = mddev->curr_resync_completed;
-
-       }       
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
@@ -2167,12 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
        int sync_req;
        int nospares = 0;
 
-       mddev->utime = get_seconds();
-       if (mddev->external)
-               return;
 repeat:
+       /* First make sure individual recovery_offsets are correct */
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk >= 0 &&
+                   mddev->delta_disks >= 0 &&
+                   !test_bit(In_sync, &rdev->flags) &&
+                   mddev->curr_resync_completed > rdev->recovery_offset)
+                               rdev->recovery_offset = mddev->curr_resync_completed;
+
+       }       
+       if (mddev->external || !mddev->persistent) {
+               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               wake_up(&mddev->sb_wait);
+               return;
+       }
+
        spin_lock_irq(&mddev->write_lock);
 
+       mddev->utime = get_seconds();
+
        set_bit(MD_CHANGE_PENDING, &mddev->flags);
        if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
                force_change = 1;
@@ -2221,19 +2226,6 @@ repeat:
                MD_BUG();
                mddev->events --;
        }
-
-       /*
-        * do not write anything to disk if using
-        * nonpersistent superblocks
-        */
-       if (!mddev->persistent) {
-               if (!mddev->external)
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
-               spin_unlock_irq(&mddev->write_lock);
-               wake_up(&mddev->sb_wait);
-               return;
-       }
        sync_sbs(mddev, nospares);
        spin_unlock_irq(&mddev->write_lock);
 
index 73cc74ffc26bd5eefb7166aa37130e4c47daa56a..ad83a4dcadc3ed7cafa914d2e4dcb7ef1a939fdf 100644 (file)
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        struct bio_list bl;
        struct page **behind_pages = NULL;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = (bio->bi_rw & REQ_SYNC);
-       bool do_barriers;
+       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+       unsigned long do_barriers;
        mdk_rdev_t *blocked_rdev;
 
        /*
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
 {
        int i;
        conf_t *conf = mddev->private;
+       int count = 0;
+       unsigned long flags;
 
        /*
         * Find all failed disks within the RAID1 configuration 
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
                if (rdev
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
 
        print_conf(conf);
-       return 0;
+       return count;
 }
 
 
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
                         * We already have a nr_pending reference on these rdevs.
                         */
                        int i;
-                       const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
+                       const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
                        clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
                        clear_bit(R1BIO_Barrier, &r1_bio->state);
                        for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
                                       (unsigned long long)r1_bio->sector);
                                raid_end_bio_io(r1_bio);
                        } else {
-                               const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
+                               const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
                                r1_bio->bios[r1_bio->read_disk] =
                                        mddev->ro ? IO_BLOCKED : NULL;
                                r1_bio->read_disk = disk;
index a88aeb5198c76a6c3a5ed58693d71f751ae975a7..84718383124d665f2c9382f5149d99773acde408 100644 (file)
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = (bio->bi_rw & REQ_SYNC);
+       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        struct bio_list bl;
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
        int i;
        conf_t *conf = mddev->private;
        mirror_info_t *tmp;
+       int count = 0;
+       unsigned long flags;
 
        /*
         * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
                if (tmp->rdev
                    && !test_bit(Faulty, &tmp->rdev->flags)
                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
 
        print_conf(conf);
-       return 0;
+       return count;
 }
 
 
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
                                raid_end_bio_io(r10_bio);
                                bio_put(bio);
                        } else {
-                               const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+                               const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
                                bio_put(bio);
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
index 866d4b5a144c465daf21e439b9b0e6ef36571d6a..69b0a169e43d483094200d88cd7d4e5ae05e9d19 100644 (file)
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
        int i;
        raid5_conf_t *conf = mddev->private;
        struct disk_info *tmp;
+       int count = 0;
+       unsigned long flags;
 
        for (i = 0; i < conf->raid_disks; i++) {
                tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
                    && tmp->rdev->recovery_offset == MaxSector
                    && !test_bit(Faulty, &tmp->rdev->flags)
                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        print_raid5_conf(conf);
-       return 0;
+       return count;
 }
 
 static int raid5_remove_disk(mddev_t *mddev, int number)
index 0efe631e50cab2ddda5e6a98022e5b8e49f46705..d80cfdc8edd2663841894348fa33e7dc4e597b1e 100644 (file)
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        init_waitqueue_head(&host->wq);
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
        INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+#ifdef CONFIG_PM
        host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
 
        /*
         * By default, hosts do not support SGIO or large requests.
index 283190bc2a40c11b7cea3defafc31cc314a4d683..68d12794cfd9b16af4ca4a1f98f57649cdcdc1e3 100644 (file)
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX
 
 config MMC_SDHCI_S3C
        tristate "SDHCI support on Samsung S3C SoC"
-       depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+       depends on MMC_SDHCI && PLAT_SAMSUNG
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          often referrered to as the HSMMC block in some of the Samsung S3C
index 0a7f2614c6f00305eb65fc18fc30dcf0b9f3648a..71ad4163b95e12b45aab7d41379c8022995d8ac4 100644 (file)
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
 {
        struct sdhci_host *host = platform_get_drvdata(dev);
        if (host) {
-               mutex_lock(&host->lock);
+               spin_lock(&host->lock);
                if (state) {
                        dev_dbg(&dev->dev, "card inserted.\n");
                        host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
                        host->flags |= SDHCI_DEVICE_DEAD;
                        host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
                }
-               sdhci_card_detect(host);
-               mutex_unlock(&host->lock);
+               tasklet_schedule(&host->card_tasklet);
+               spin_unlock(&host->lock);
        }
 }
 
index 785512133b508a593149576d7a37a9988c8d2338..401527d273b5811e7a1981af12751ba099b490a8 100644 (file)
@@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        else
                ctrl &= ~SDHCI_CTRL_4BITBUS;
 
-       if (ios->timing == MMC_TIMING_SD_HS)
+       if (ios->timing == MMC_TIMING_SD_HS &&
+           !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
                ctrl |= SDHCI_CTRL_HISPD;
        else
                ctrl &= ~SDHCI_CTRL_HISPD;
index 036cfae763685f041cc0bf39609e571f303bfe18..d316bc79b63689048c751daff28b806a7ebdfb0f 100644 (file)
@@ -245,6 +245,8 @@ struct sdhci_host {
 #define SDHCI_QUIRK_MISSING_CAPS                       (1<<27)
 /* Controller uses Auto CMD12 command to stop the transfer */
 #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12             (1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT                       (1<<29)
 
        int                     irq;            /* Device IRQ */
        void __iomem *          ioaddr;         /* Mapped address */
index 00af55d7afba60785b61b011814aba2e46a6f0df..fe63f6bd663c1f7a5ac8db57a0014c1c4a019da0 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/concat.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 
index a3c7473dd409c51f9dd1b215eb09673a3fd858e8..d551ddd9537a34d17fe3c13b7604855fc5c06f5b 100644 (file)
@@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
                 */
                if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
                                id_data[0] == NAND_MFR_SAMSUNG &&
+                               (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
                                id_data[5] != 0x00) {
                        /* Calc pagesize */
                        mtd->writesize = 2048 << (extid & 0x03);
@@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
                chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
 
        /* Set the bad block position */
-       if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO ||
-                               (*maf_id == NAND_MFR_SAMSUNG &&
-                                mtd->writesize == 512) ||
-                               *maf_id == NAND_MFR_AMD))
-               chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
-       else
+       if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
                chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
-
+       else
+               chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
 
        /* Get chip options, preserve non chip based options */
        chip->options &= ~NAND_CHIPOPTIONS_MSK;
index e02fa4f0e3c9a0a81c03bdf80e54d071ed1e079f..4d89f37802075a26e602cf4ba6f42de54d07cfc8 100644 (file)
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
 #define tAR_NDTR1(r)   (((r) >> 0) & 0xf)
 
 /* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk)      (int)(((ns) * (clk / 1000000) / 1000) - 1)
+#define ns2cycle(ns, clk)      (int)((ns) * (clk / 1000000) / 1000)
 
 /* convert nand flash controller clock cycles to nano-seconds */
 #define cycle2ns(c, clk)       ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
index f065204e401b55cba2010557ab8ca81105a66ccb..95a26fb1626c8c21af2473cc02c75fa654de08ce 100644 (file)
@@ -132,7 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
 int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
-inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdiscoverywait;
index e031a734836ed37a372e2f0cf105a029dad46980..5d4a3822382d704ad604c962ed37d14f1d120199 100644 (file)
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
        return QLA_SUCCESS;
 }
 
-inline void
+void
 qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
 {
        uint32_t drv_active;
index 659a695bdad6474e832c9d4ffbef7648fe45c4d5..2af8fd1131234be29fd612b207da23543c78870b 100644 (file)
 #include <linux/slab.h>
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/nwpserial.h>
 
-#include <asm/prom.h>
-
 struct of_serial_info {
        int type;
        int line;
index 59be3efe063621e2fffb7b576e8ea0d8daa10b32..052b3c7fa6a0f644d26c613e67e69ffe00ad05be 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/io.h>
index cdc4dd50d638a51abc240c780989b4433856b27c..8ec83d2dffb75ec23c67d35cce2c5ee272943c1a 100644 (file)
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
                return -ENOENT;
        }
 
-       read_lock(&current->fs->lock);
+       spin_lock(&current->fs->lock);
        path.mnt = mntget(current->fs->root.mnt);
-       read_unlock(&current->fs->lock);
+       spin_unlock(&current->fs->lock);
 
        path.dentry = d;
 
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
                return -ENOENT;
        }
 
-       read_lock(&current->fs->lock);
+       spin_lock(&current->fs->lock);
        root = dget(current->fs->root.dentry);
-       read_unlock(&current->fs->lock);
+       spin_unlock(&current->fs->lock);
 
        spin_lock(&dcache_lock);
 
index f3a4e15672d929dfbedc0ff3bbed375a9bc27b57..f96a471cb1a86d018e6ce435063ecf89b2159383 100644 (file)
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
 static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
 #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
        /*
-        * memcpy_toio works for us if:
+        * iowrite32_rep works for us if:
         *  (1) Copies data as 32bit quantities, not byte after byte,
         *  (2) Performs LE ordered stores, and
         *  (3) It copes with unaligned source (destination is guaranteed to be page
         *      aligned and length is guaranteed to be multiple of 4).
         */
-       memcpy_toio(va.vaddr, src, len);
+       iowrite32_rep(va.vaddr, src, len >> 2);
 #else
         u_int32_t __iomem* addr = va.vaddr;
 
index 50efa339e051f7b7a5d417160ff528ca94e3adfa..3e7dca279d1c0dff3fdb7e0e2e7d8d236af7d4c9 100644 (file)
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                spin_unlock(lock);
                                /*
                                 * Ensure any pending I/O completes so that
-                                * ll_rw_block() actually writes the current
-                                * contents - it is a noop if I/O is still in
-                                * flight on potentially older contents.
+                                * write_dirty_buffer() actually writes the
+                                * current contents - it is a noop if I/O is
+                                * still in flight on potentially older
+                                * contents.
                                 */
-                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+                               write_dirty_buffer(bh, WRITE_SYNC_PLUG);
 
                                /*
                                 * Kick off IO for the previous mapping. Note
@@ -2911,13 +2912,6 @@ int submit_bh(int rw, struct buffer_head * bh)
        BUG_ON(buffer_delay(bh));
        BUG_ON(buffer_unwritten(bh));
 
-       /*
-        * Mask in barrier bit for a write (could be either a WRITE or a
-        * WRITE_SYNC
-        */
-       if (buffer_ordered(bh) && (rw & WRITE))
-               rw |= WRITE_BARRIER;
-
        /*
         * Only clear out a write error when rewriting
         */
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
- * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
- * are sent to disk. The fourth %READA option is described in the documentation
- * for generic_make_request() which ll_rw_block() calls.
+ * %READA option is described in the documentation for generic_make_request()
+ * which ll_rw_block() calls.
  *
  * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
- * clean when doing a write request, and any buffer that appears to be
- * up-to-date when doing read request.  Further it marks as clean buffers that
- * are processed for writing (the buffer cache won't assume that they are
- * actually clean until the buffer gets unlocked).
+ * BH_Lock state bit), any buffer that appears to be clean when doing a write
+ * request, and any buffer that appears to be up-to-date when doing read
+ * request.  Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean
+ * until the buffer gets unlocked).
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
-                       lock_buffer(bh);
-               else if (!trylock_buffer(bh))
+               if (!trylock_buffer(bh))
                        continue;
-
-               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
-                   rw == SWRITE_SYNC_PLUG) {
+               if (rw == WRITE) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
-                               if (rw == SWRITE_SYNC)
-                                       submit_bh(WRITE_SYNC, bh);
-                               else
-                                       submit_bh(WRITE, bh);
+                               submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 }
 EXPORT_SYMBOL(ll_rw_block);
 
+void write_dirty_buffer(struct buffer_head *bh, int rw)
+{
+       lock_buffer(bh);
+       if (!test_clear_buffer_dirty(bh)) {
+               unlock_buffer(bh);
+               return;
+       }
+       bh->b_end_io = end_buffer_write_sync;
+       get_bh(bh);
+       submit_bh(rw, bh);
+}
+EXPORT_SYMBOL(write_dirty_buffer);
+
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
  * and then start new I/O and then wait upon it.  The caller must have a ref on
  * the buffer_head.
  */
-int sync_dirty_buffer(struct buffer_head *bh)
+int __sync_dirty_buffer(struct buffer_head *bh, int rw)
 {
        int ret = 0;
 
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE_SYNC, bh);
+               ret = submit_bh(rw, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
        }
        return ret;
 }
+EXPORT_SYMBOL(__sync_dirty_buffer);
+
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+       return __sync_dirty_buffer(bh, WRITE_SYNC);
+}
 EXPORT_SYMBOL(sync_dirty_buffer);
 
 /*
index a53b130b366c738c654a7ec71a3e4b01ee6d8ec7..1e7a33028d33908807d776d05840637cfd7e4f22 100644 (file)
@@ -80,7 +80,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
                }
        } else {
                inode = iget_locked(sb, CRAMINO(cramfs_inode));
-               if (inode) {
+               if (inode && (inode->i_state & I_NEW)) {
                        setup_inode(inode, cramfs_inode);
                        unlock_new_inode(inode);
                }
index 4d13bf50b7b159774c592ce886c7fd9208372d2a..83293be4814965373d4c81e5b7d91bc63f3a55c6 100644 (file)
@@ -1332,31 +1332,13 @@ EXPORT_SYMBOL(d_add_ci);
  * d_lookup - search for a dentry
  * @parent: parent dentry
  * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
  *
- * Searches the children of the parent dentry for the name in question. If
- * the dentry is found its reference count is incremented and the dentry
- * is returned. The caller must use dput to free the entry when it has
- * finished using it. %NULL is returned on failure.
- *
- * __d_lookup is dcache_lock free. The hash list is protected using RCU.
- * Memory barriers are used while updating and doing lockless traversal. 
- * To avoid races with d_move while rename is happening, d_lock is used.
- *
- * Overflows in memcmp(), while d_move, are avoided by keeping the length
- * and name pointer in one structure pointed by d_qstr.
- *
- * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
- * lookup is going on.
- *
- * The dentry unused LRU is not updated even if lookup finds the required dentry
- * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
- * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
- * acquisition.
- *
- * d_lookup() is protected against the concurrent renames in some unrelated
- * directory using the seqlockt_t rename_lock.
+ * d_lookup searches the children of the parent dentry for the name in
+ * question. If the dentry is found its reference count is incremented and the
+ * dentry is returned. The caller must use dput to free the entry when it has
+ * finished using it. %NULL is returned if the dentry does not exist.
  */
-
 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 {
        struct dentry * dentry = NULL;
@@ -1372,6 +1354,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 }
 EXPORT_SYMBOL(d_lookup);
 
+/*
+ * __d_lookup - search for a dentry (racy)
+ * @parent: parent dentry
+ * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
+ *
+ * __d_lookup is like d_lookup, however it may (rarely) return a
+ * false-negative result due to unrelated rename activity.
+ *
+ * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
+ * however it must be used carefully, eg. with a following d_lookup in
+ * the case of failure.
+ *
+ * __d_lookup callers must be commented.
+ */
 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 {
        unsigned int len = name->len;
@@ -1382,6 +1379,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
        struct hlist_node *node;
        struct dentry *dentry;
 
+       /*
+        * The hash list is protected using RCU.
+        *
+        * Take d_lock when comparing a candidate dentry, to avoid races
+        * with d_move().
+        *
+        * It is possible that concurrent renames can mess up our list
+        * walk here and result in missing our dentry, resulting in the
+        * false-negative result. d_lookup() protects against concurrent
+        * renames using rename_lock seqlock.
+        *
+        * See Documentation/vfs/dcache-locking.txt for more details.
+        */
        rcu_read_lock();
        
        hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
@@ -1396,8 +1406,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 
                /*
                 * Recheck the dentry after taking the lock - d_move may have
-                * changed things.  Don't bother checking the hash because we're
-                * about to compare the whole name anyway.
+                * changed things. Don't bother checking the hash because
+                * we're about to compare the whole name anyway.
                 */
                if (dentry->d_parent != parent)
                        goto next;
@@ -1925,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root,
        bool slash = false;
        int error = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -1954,7 +1964,7 @@ out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return error;
 
 global_root:
@@ -2292,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2)
        struct vfsmount *mnt = path1->mnt;
        struct dentry *dentry = path1->dentry;
        int res;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        if (mnt != path2->mnt) {
                for (;;) {
                        if (mnt->mnt_parent == mnt) {
-                               spin_unlock(&vfsmount_lock);
+                               br_read_unlock(vfsmount_lock);
                                return 0;
                        }
                        if (mnt->mnt_parent == path2->mnt)
@@ -2306,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2)
                dentry = mnt->mnt_mountpoint;
        }
        res = is_subdir(dentry, path2->dentry);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return res;
 }
 EXPORT_SYMBOL(path_is_under);
index 05c7d6b84df7c770f07aa7148b9f47e6aacd060f..2d9455282744bce582e48e0ecec4f4a6d332a28c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1118,7 +1118,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
        bprm->unsafe = tracehook_unsafe_exec(p);
 
        n_fs = 1;
-       write_lock(&p->fs->lock);
+       spin_lock(&p->fs->lock);
        rcu_read_lock();
        for (t = next_thread(p); t != p; t = next_thread(t)) {
                if (t->fs == p->fs)
@@ -1135,7 +1135,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
                        res = 1;
                }
        }
-       write_unlock(&p->fs->lock);
+       spin_unlock(&p->fs->lock);
 
        return res;
 }
index 1fa23f6ffba5b39a9921bc6f0b5eac0bfd46fecb..1736f23563888b2f0225a6add77d45c984270231 100644 (file)
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
 {
        int i, err = 0;
 
-       ll_rw_block(SWRITE, nr_bhs, bhs);
+       for (i = 0; i < nr_bhs; i++)
+               write_dirty_buffer(bhs[i], WRITE);
+
        for (i = 0; i < nr_bhs; i++) {
                wait_on_buffer(bhs[i]);
                if (buffer_eopnotsupp(bhs[i])) {
index edecd36fed9bdcf7f49411a3bcf2f97283bd077b..a04bdd81c11ca3799d4429d8c5eb3579621e0628 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/cdev.h>
 #include <linux/fsnotify.h>
 #include <linux/sysctl.h>
+#include <linux/lglock.h>
 #include <linux/percpu_counter.h>
+#include <linux/percpu.h>
 #include <linux/ima.h>
 
 #include <asm/atomic.h>
@@ -32,8 +34,8 @@ struct files_stat_struct files_stat = {
        .max_files = NR_FILE
 };
 
-/* public. Not pretty! */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
+DECLARE_LGLOCK(files_lglock);
+DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
 static struct kmem_cache *filp_cachep __read_mostly;
@@ -249,7 +251,7 @@ static void __fput(struct file *file)
                cdev_put(inode->i_cdev);
        fops_put(file->f_op);
        put_pid(file->f_owner.pid);
-       file_kill(file);
+       file_sb_list_del(file);
        if (file->f_mode & FMODE_WRITE)
                drop_file_write_access(file);
        file->f_path.dentry = NULL;
@@ -328,41 +330,107 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
        return file;
 }
 
-
 void put_filp(struct file *file)
 {
        if (atomic_long_dec_and_test(&file->f_count)) {
                security_file_free(file);
-               file_kill(file);
+               file_sb_list_del(file);
                file_free(file);
        }
 }
 
-void file_move(struct file *file, struct list_head *list)
+static inline int file_list_cpu(struct file *file)
 {
-       if (!list)
-               return;
-       file_list_lock();
-       list_move(&file->f_u.fu_list, list);
-       file_list_unlock();
+#ifdef CONFIG_SMP
+       return file->f_sb_list_cpu;
+#else
+       return smp_processor_id();
+#endif
+}
+
+/* helper for file_sb_list_add to reduce ifdefs */
+static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
+{
+       struct list_head *list;
+#ifdef CONFIG_SMP
+       int cpu;
+       cpu = smp_processor_id();
+       file->f_sb_list_cpu = cpu;
+       list = per_cpu_ptr(sb->s_files, cpu);
+#else
+       list = &sb->s_files;
+#endif
+       list_add(&file->f_u.fu_list, list);
 }
 
-void file_kill(struct file *file)
+/**
+ * file_sb_list_add - add a file to the sb's file list
+ * @file: file to add
+ * @sb: sb to add it to
+ *
+ * Use this function to associate a file with the superblock of the inode it
+ * refers to.
+ */
+void file_sb_list_add(struct file *file, struct super_block *sb)
+{
+       lg_local_lock(files_lglock);
+       __file_sb_list_add(file, sb);
+       lg_local_unlock(files_lglock);
+}
+
+/**
+ * file_sb_list_del - remove a file from the sb's file list
+ * @file: file to remove
+ * @sb: sb to remove it from
+ *
+ * Use this function to remove a file from its superblock.
+ */
+void file_sb_list_del(struct file *file)
 {
        if (!list_empty(&file->f_u.fu_list)) {
-               file_list_lock();
+               lg_local_lock_cpu(files_lglock, file_list_cpu(file));
                list_del_init(&file->f_u.fu_list);
-               file_list_unlock();
+               lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
        }
 }
 
+#ifdef CONFIG_SMP
+
+/*
+ * These macros iterate all files on all CPUs for a given superblock.
+ * files_lglock must be held globally.
+ */
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       int i;                                                  \
+       for_each_possible_cpu(i) {                              \
+               struct list_head *list;                         \
+               list = per_cpu_ptr((__sb)->s_files, i);         \
+               list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+       }                                                       \
+}
+
+#else
+
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       struct list_head *list;                                 \
+       list = &(sb)->s_files;                                  \
+       list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+}
+
+#endif
+
 int fs_may_remount_ro(struct super_block *sb)
 {
        struct file *file;
-
        /* Check that no files are currently opened for writing. */
-       file_list_lock();
-       list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, file) {
                struct inode *inode = file->f_path.dentry->d_inode;
 
                /* File with pending delete? */
@@ -372,11 +440,11 @@ int fs_may_remount_ro(struct super_block *sb)
                /* Writeable file? */
                if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
                        goto too_bad;
-       }
-       file_list_unlock();
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
        return 1; /* Tis' cool bro. */
 too_bad:
-       file_list_unlock();
+       lg_global_unlock(files_lglock);
        return 0;
 }
 
@@ -392,8 +460,8 @@ void mark_files_ro(struct super_block *sb)
        struct file *f;
 
 retry:
-       file_list_lock();
-       list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, f) {
                struct vfsmount *mnt;
                if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
                       continue;
@@ -408,16 +476,13 @@ retry:
                        continue;
                file_release_write(f);
                mnt = mntget(f->f_path.mnt);
-               file_list_unlock();
-               /*
-                * This can sleep, so we can't hold
-                * the file_list_lock() spinlock.
-                */
+               /* This can sleep, so we can't hold the spinlock. */
+               lg_global_unlock(files_lglock);
                mnt_drop_write(mnt);
                mntput(mnt);
                goto retry;
-       }
-       file_list_unlock();
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -437,5 +502,6 @@ void __init files_init(unsigned long mempages)
        if (files_stat.max_files < NR_FILE)
                files_stat.max_files = NR_FILE;
        files_defer_init();
+       lg_lock_init(files_lglock);
        percpu_counter_init(&nr_files, 0);
 } 
index 1ee40eb9a2c05cc1751828af612c6942758b4c72..ed45a9cf5f3de46ae08e90d581a500bed9eb4dbf 100644 (file)
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
 {
        struct path old_root;
 
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        old_root = fs->root;
        fs->root = *path;
        path_get(path);
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
        if (old_root.dentry)
                path_put(&old_root);
 }
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
 {
        struct path old_pwd;
 
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        old_pwd = fs->pwd;
        fs->pwd = *path;
        path_get(path);
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 
        if (old_pwd.dentry)
                path_put(&old_pwd);
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
                task_lock(p);
                fs = p->fs;
                if (fs) {
-                       write_lock(&fs->lock);
+                       spin_lock(&fs->lock);
                        if (fs->root.dentry == old_root->dentry
                            && fs->root.mnt == old_root->mnt) {
                                path_get(new_root);
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
                                fs->pwd = *new_root;
                                count++;
                        }
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                }
                task_unlock(p);
        } while_each_thread(g, p);
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk)
        if (fs) {
                int kill;
                task_lock(tsk);
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                tsk->fs = NULL;
                kill = !--fs->users;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
                task_unlock(tsk);
                if (kill)
                        free_fs_struct(fs);
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
        if (fs) {
                fs->users = 1;
                fs->in_exec = 0;
-               rwlock_init(&fs->lock);
+               spin_lock_init(&fs->lock);
                fs->umask = old->umask;
                get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
        }
@@ -121,10 +121,10 @@ int unshare_fs_struct(void)
                return -ENOMEM;
 
        task_lock(current);
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        kill = !--fs->users;
        current->fs = new_fs;
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
        task_unlock(current);
 
        if (kill)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask);
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
        .users          = 1,
-       .lock           = __RW_LOCK_UNLOCKED(init_fs.lock),
+       .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
        .umask          = 0022,
 };
 
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void)
 
                task_lock(current);
 
-               write_lock(&init_fs.lock);
+               spin_lock(&init_fs.lock);
                init_fs.users++;
-               write_unlock(&init_fs.lock);
+               spin_unlock(&init_fs.lock);
 
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                current->fs = &init_fs;
                kill = !--fs->users;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
 
                task_unlock(current);
                if (kill)
index 99800e564157ed5d3bb78b6e80c6d7f5b7d32051..6bc9e3a5a693b0fa2d5d8f09349f42b9d44ea72f 100644 (file)
@@ -94,6 +94,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
                        if (error < 0)
                                goto failed;
                        inode->i_mode = mode;
+                       inode->i_ctime = CURRENT_TIME;
                        if (error == 0) {
                                posix_acl_release(acl);
                                acl = NULL;
index dd1e55535a4e8a65e4405a2419d1a9bcde389f8c..f7dc9b5f9ef8c80560cb545d937aa9818a3b646f 100644 (file)
@@ -104,7 +104,7 @@ static char *__dentry_name(struct dentry *dentry, char *name)
                __putname(name);
                return NULL;
        }
-       strncpy(name, root, PATH_MAX);
+       strlcpy(name, root, PATH_MAX);
        if (len > p - name) {
                __putname(name);
                return NULL;
@@ -876,7 +876,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
                char *path = dentry_name(dentry);
                int err = -ENOMEM;
                if (path) {
-                       int err = hostfs_do_readlink(path, link, PATH_MAX);
+                       err = hostfs_do_readlink(path, link, PATH_MAX);
                        if (err == PATH_MAX)
                                err = -E2BIG;
                        __putname(path);
index 6b706bc60a66bbb3cecb7477f2ea72e0a091aa84..a6910e91cee8799196e991c7cab98ad11206cf01 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/lglock.h>
+
 struct super_block;
 struct linux_binprm;
 struct path;
@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
 
 extern void __init mnt_init(void);
 
-extern spinlock_t vfsmount_lock;
+DECLARE_BRLOCK(vfsmount_lock);
+
 
 /*
  * fs_struct.c
@@ -80,6 +83,8 @@ extern void chroot_fs_refs(struct path *, struct path *);
 /*
  * file_table.c
  */
+extern void file_sb_list_add(struct file *f, struct super_block *sb);
+extern void file_sb_list_del(struct file *f);
 extern void mark_files_ro(struct super_block *);
 extern struct file *get_empty_filp(void);
 
index b0435dd0654d16acce790332fbb258ed378a14d3..05a38b9c4c0ecbe749ef73931933c0e089fe15ba 100644 (file)
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
 {
        int i;
 
-       ll_rw_block(SWRITE, *batch_count, bhs);
+       for (i = 0; i < *batch_count; i++)
+               write_dirty_buffer(bhs[i], WRITE);
+
        for (i = 0; i < *batch_count; i++) {
                struct buffer_head *bh = bhs[i];
                clear_buffer_jwrite(bh);
index 28a9ddaa0c496f85625ccab2d543fc5e0886872b..95d8c11c929ea3563b72f69e130674d6f49d5162 100644 (file)
@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
        struct buffer_head *bh;
        journal_header_t *header;
        int ret;
-       int barrier_done = 0;
 
        if (is_journal_aborted(journal))
                return 0;
@@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal,
 
        JBUFFER_TRACE(descriptor, "write commit block");
        set_buffer_dirty(bh);
+
        if (journal->j_flags & JFS_BARRIER) {
-               set_buffer_ordered(bh);
-               barrier_done = 1;
-       }
-       ret = sync_dirty_buffer(bh);
-       if (barrier_done)
-               clear_buffer_ordered(bh);
-       /* is it possible for another commit to fail at roughly
-        * the same time as this one?  If so, we don't want to
-        * trust the barrier flag in the super, but instead want
-        * to remember if we sent a barrier request
-        */
-       if (ret == -EOPNOTSUPP && barrier_done) {
-               char b[BDEVNAME_SIZE];
+               ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
 
-               printk(KERN_WARNING
-                       "JBD: barrier-based sync failed on %s - "
-                       "disabling barriers\n",
-                       bdevname(journal->j_dev, b));
-               spin_lock(&journal->j_state_lock);
-               journal->j_flags &= ~JFS_BARRIER;
-               spin_unlock(&journal->j_state_lock);
+               /*
+                * Is it possible for another commit to fail at roughly
+                * the same time as this one?  If so, we don't want to
+                * trust the barrier flag in the super, but instead want
+                * to remember if we sent a barrier request
+                */
+               if (ret == -EOPNOTSUPP) {
+                       char b[BDEVNAME_SIZE];
 
-               /* And try again, without the barrier */
-               set_buffer_uptodate(bh);
-               set_buffer_dirty(bh);
+                       printk(KERN_WARNING
+                               "JBD: barrier-based sync failed on %s - "
+                               "disabling barriers\n",
+                               bdevname(journal->j_dev, b));
+                       spin_lock(&journal->j_state_lock);
+                       journal->j_flags &= ~JFS_BARRIER;
+                       spin_unlock(&journal->j_state_lock);
+
+                       /* And try again, without the barrier */
+                       set_buffer_uptodate(bh);
+                       set_buffer_dirty(bh);
+                       ret = sync_dirty_buffer(bh);
+               }
+       } else {
                ret = sync_dirty_buffer(bh);
        }
+
        put_bh(bh);             /* One for getblk() */
        journal_put_journal_head(descriptor);
 
index f19ce94693d848e60ea9e34164a1c75cb56618ea..2c4b1f109da9e6bc3bcedddd423f02cb84c6bc2f 100644 (file)
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
        if (wait)
                sync_dirty_buffer(bh);
        else
-               ll_rw_block(SWRITE, 1, &bh);
+               write_dirty_buffer(bh, WRITE);
 
 out:
        /* If we have just flushed the log (by marking s_start==0), then
index ad717328343acc9e1c2c66ed758c14d2aaffbe2c..d29018307e2e9cca4b97409036d18f4b9421bcac 100644 (file)
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
-       ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+       write_dirty_buffer(bh, write_op);
 }
 #endif
 
index 1c23a0f4e8a35021a3c291fa3407b976fdbb34a3..5247e7ffdcb46d400e10681535a5728ef8006ad8 100644 (file)
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
 {
        int i;
 
-       ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
+       for (i = 0; i < *batch_count; i++)
+               write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
+
        for (i = 0; i < *batch_count; i++) {
                struct buffer_head *bh = journal->j_chkpt_bhs[i];
                clear_buffer_jwrite(bh);
index f52e5e8049f195ec461bfb8781584722b5da2562..7c068c189d80d713d56705e63c5b5e0bf6982ab9 100644 (file)
@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
        struct commit_header *tmp;
        struct buffer_head *bh;
        int ret;
-       int barrier_done = 0;
        struct timespec now = current_kernel_time();
 
        if (is_journal_aborted(journal))
@@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal,
        if (journal->j_flags & JBD2_BARRIER &&
            !JBD2_HAS_INCOMPAT_FEATURE(journal,
                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
-               set_buffer_ordered(bh);
-               barrier_done = 1;
-       }
-       ret = submit_bh(WRITE_SYNC_PLUG, bh);
-       if (barrier_done)
-               clear_buffer_ordered(bh);
-
-       /* is it possible for another commit to fail at roughly
-        * the same time as this one?  If so, we don't want to
-        * trust the barrier flag in the super, but instead want
-        * to remember if we sent a barrier request
-        */
-       if (ret == -EOPNOTSUPP && barrier_done) {
-               printk(KERN_WARNING
-                      "JBD2: Disabling barriers on %s, "
-                      "not supported by device\n", journal->j_devname);
-               write_lock(&journal->j_state_lock);
-               journal->j_flags &= ~JBD2_BARRIER;
-               write_unlock(&journal->j_state_lock);
+               ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
+               if (ret == -EOPNOTSUPP) {
+                       printk(KERN_WARNING
+                              "JBD2: Disabling barriers on %s, "
+                              "not supported by device\n", journal->j_devname);
+                       write_lock(&journal->j_state_lock);
+                       journal->j_flags &= ~JBD2_BARRIER;
+                       write_unlock(&journal->j_state_lock);
 
-               /* And try again, without the barrier */
-               lock_buffer(bh);
-               set_buffer_uptodate(bh);
-               clear_buffer_dirty(bh);
+                       /* And try again, without the barrier */
+                       lock_buffer(bh);
+                       set_buffer_uptodate(bh);
+                       clear_buffer_dirty(bh);
+                       ret = submit_bh(WRITE_SYNC_PLUG, bh);
+               }
+       } else {
                ret = submit_bh(WRITE_SYNC_PLUG, bh);
        }
        *cbh = bh;
index ad5866aaf0f9aa88cc114fb915e1888f393ffdbb..0e8014ea6b94ad8985f1b0b842f2cea550578e67 100644 (file)
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
                        set_buffer_uptodate(bh);
                }
        } else
-               ll_rw_block(SWRITE, 1, &bh);
+               write_dirty_buffer(bh, WRITE);
 
 out:
        /* If we have just flushed the log (by marking s_start==0), then
index a360b06af2e3b488933cfd5450c9af6b3f3db36f..9ad321fd63fdf73b4d7aa99ee5c78c6edbad9da4 100644 (file)
@@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal,
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
-       ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+       write_dirty_buffer(bh, write_op);
 }
 #endif
 
index cf4e6cdfd15b5afc091c0f2a060af90450176811..93444747237b98c03d5192870bd9740311da93f7 100644 (file)
@@ -80,6 +80,7 @@ struct mb_cache {
        struct list_head                c_cache_list;
        const char                      *c_name;
        atomic_t                        c_entry_count;
+       int                             c_max_entries;
        int                             c_bucket_bits;
        struct kmem_cache               *c_entry_cache;
        struct list_head                *c_block_hash;
@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
        if (!cache->c_entry_cache)
                goto fail2;
 
+       /*
+        * Set an upper limit on the number of cache entries so that the hash
+        * chains won't grow too long.
+        */
+       cache->c_max_entries = bucket_count << 4;
+
        spin_lock(&mb_cache_spinlock);
        list_add(&cache->c_cache_list, &mb_cache_list);
        spin_unlock(&mb_cache_spinlock);
@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
        kfree(cache);
 }
 
-
 /*
  * mb_cache_entry_alloc()
  *
@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
 struct mb_cache_entry *
 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
 {
-       struct mb_cache_entry *ce;
-
-       ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
-       if (ce) {
+       struct mb_cache_entry *ce = NULL;
+
+       if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
+               spin_lock(&mb_cache_spinlock);
+               if (!list_empty(&mb_cache_lru_list)) {
+                       ce = list_entry(mb_cache_lru_list.next,
+                                       struct mb_cache_entry, e_lru_list);
+                       list_del_init(&ce->e_lru_list);
+                       __mb_cache_entry_unhash(ce);
+               }
+               spin_unlock(&mb_cache_spinlock);
+       }
+       if (!ce) {
+               ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
+               if (!ce)
+                       return NULL;
                atomic_inc(&cache->c_entry_count);
                INIT_LIST_HEAD(&ce->e_lru_list);
                INIT_LIST_HEAD(&ce->e_block_list);
                ce->e_cache = cache;
-               ce->e_used = 1 + MB_CACHE_WRITER;
                ce->e_queued = 0;
        }
+       ce->e_used = 1 + MB_CACHE_WRITER;
        return ce;
 }
 
index 17ea76bf2fbee41e9ff8a20616e0523781ed2315..24896e8335658c9c0ce2c81c117cb664b964da70 100644 (file)
@@ -595,15 +595,16 @@ int follow_up(struct path *path)
 {
        struct vfsmount *parent;
        struct dentry *mountpoint;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        parent = path->mnt->mnt_parent;
        if (parent == path->mnt) {
-               spin_unlock(&vfsmount_lock);
+               br_read_unlock(vfsmount_lock);
                return 0;
        }
        mntget(parent);
        mountpoint = dget(path->mnt->mnt_mountpoint);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        dput(path->dentry);
        path->dentry = mountpoint;
        mntput(path->mnt);
@@ -685,6 +686,35 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
        follow_mount(&nd->path);
 }
 
+/*
+ * Allocate a dentry with name and parent, and perform a parent
+ * directory ->lookup on it. Returns the new dentry, or ERR_PTR
+ * on error. parent->d_inode->i_mutex must be held. d_lookup must
+ * have verified that no child exists while under i_mutex.
+ */
+static struct dentry *d_alloc_and_lookup(struct dentry *parent,
+                               struct qstr *name, struct nameidata *nd)
+{
+       struct inode *inode = parent->d_inode;
+       struct dentry *dentry;
+       struct dentry *old;
+
+       /* Don't create child dentry for a dead directory. */
+       if (unlikely(IS_DEADDIR(inode)))
+               return ERR_PTR(-ENOENT);
+
+       dentry = d_alloc(parent, name);
+       if (unlikely(!dentry))
+               return ERR_PTR(-ENOMEM);
+
+       old = inode->i_op->lookup(inode, dentry, nd);
+       if (unlikely(old)) {
+               dput(dentry);
+               dentry = old;
+       }
+       return dentry;
+}
+
 /*
  *  It's more convoluted than I'd like it to be, but... it's still fairly
  *  small and for now I'd prefer to have fast path as straight as possible.
@@ -706,9 +736,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                        return err;
        }
 
+       /*
+        * Rename seqlock is not required here because in the off chance
+        * of a false negative due to a concurrent rename, we're going to
+        * do the non-racy lookup, below.
+        */
        dentry = __d_lookup(nd->path.dentry, name);
        if (!dentry)
                goto need_lookup;
+found:
        if (dentry->d_op && dentry->d_op->d_revalidate)
                goto need_revalidate;
 done:
@@ -724,56 +760,28 @@ need_lookup:
        mutex_lock(&dir->i_mutex);
        /*
         * First re-do the cached lookup just in case it was created
-        * while we waited for the directory semaphore..
+        * while we waited for the directory semaphore, or the first
+        * lookup failed due to an unrelated rename.
         *
-        * FIXME! This could use version numbering or similar to
-        * avoid unnecessary cache lookups.
-        *
-        * The "dcache_lock" is purely to protect the RCU list walker
-        * from concurrent renames at this point (we mustn't get false
-        * negatives from the RCU list walk here, unlike the optimistic
-        * fast walk).
-        *
-        * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
+        * This could use version numbering or similar to avoid unnecessary
+        * cache lookups, but then we'd have to do the first lookup in the
+        * non-racy way. However in the common case here, everything should
+        * be hot in cache, so would it be a big win?
         */
        dentry = d_lookup(parent, name);
-       if (!dentry) {
-               struct dentry *new;
-
-               /* Don't create child dentry for a dead directory. */
-               dentry = ERR_PTR(-ENOENT);
-               if (IS_DEADDIR(dir))
-                       goto out_unlock;
-
-               new = d_alloc(parent, name);
-               dentry = ERR_PTR(-ENOMEM);
-               if (new) {
-                       dentry = dir->i_op->lookup(dir, new, nd);
-                       if (dentry)
-                               dput(new);
-                       else
-                               dentry = new;
-               }
-out_unlock:
+       if (likely(!dentry)) {
+               dentry = d_alloc_and_lookup(parent, name, nd);
                mutex_unlock(&dir->i_mutex);
                if (IS_ERR(dentry))
                        goto fail;
                goto done;
        }
-
        /*
         * Uhhuh! Nasty case: the cache was re-populated while
         * we waited on the semaphore. Need to revalidate.
         */
        mutex_unlock(&dir->i_mutex);
-       if (dentry->d_op && dentry->d_op->d_revalidate) {
-               dentry = do_revalidate(dentry, nd);
-               if (!dentry)
-                       dentry = ERR_PTR(-ENOENT);
-       }
-       if (IS_ERR(dentry))
-               goto fail;
-       goto done;
+       goto found;
 
 need_revalidate:
        dentry = do_revalidate(dentry, nd);
@@ -1130,35 +1138,18 @@ static struct dentry *__lookup_hash(struct qstr *name,
                        goto out;
        }
 
-       dentry = __d_lookup(base, name);
-
-       /* lockess __d_lookup may fail due to concurrent d_move()
-        * in some unrelated directory, so try with d_lookup
+       /*
+        * Don't bother with __d_lookup: callers are for creat as
+        * well as unlink, so a lot of the time it would cost
+        * a double lookup.
         */
-       if (!dentry)
-               dentry = d_lookup(base, name);
+       dentry = d_lookup(base, name);
 
        if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
                dentry = do_revalidate(dentry, nd);
 
-       if (!dentry) {
-               struct dentry *new;
-
-               /* Don't create child dentry for a dead directory. */
-               dentry = ERR_PTR(-ENOENT);
-               if (IS_DEADDIR(inode))
-                       goto out;
-
-               new = d_alloc(base, name);
-               dentry = ERR_PTR(-ENOMEM);
-               if (!new)
-                       goto out;
-               dentry = inode->i_op->lookup(inode, new, nd);
-               if (!dentry)
-                       dentry = new;
-               else
-                       dput(new);
-       }
+       if (!dentry)
+               dentry = d_alloc_and_lookup(base, name, nd);
 out:
        return dentry;
 }
index 2e10cb19c5b02983e159bfe5f8039f3d08f3035d..de402eb6eafbad3df3957ca7b74ee92256c1e8c4 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
 #define HASH_SIZE (1UL << HASH_SHIFT)
 
-/* spinlock for vfsmount related operations, inplace of dcache_lock */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
-
 static int event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
+static DEFINE_SPINLOCK(mnt_id_lock);
 static int mnt_id_start = 0;
 static int mnt_group_start = 1;
 
@@ -55,6 +55,16 @@ static struct rw_semaphore namespace_sem;
 struct kobject *fs_kobj;
 EXPORT_SYMBOL_GPL(fs_kobj);
 
+/*
+ * vfsmount lock may be taken for read to prevent changes to the
+ * vfsmount hash, ie. during mountpoint lookups or walking back
+ * up the tree.
+ *
+ * It should be taken for write in all cases where the vfsmount
+ * tree or hash is modified or when a vfsmount structure is modified.
+ */
+DEFINE_BRLOCK(vfsmount_lock);
+
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -65,18 +75,21 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 
 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
 
-/* allocation is serialized by namespace_sem */
+/*
+ * allocation is serialized by namespace_sem, but we need the spinlock to
+ * serialize with freeing.
+ */
 static int mnt_alloc_id(struct vfsmount *mnt)
 {
        int res;
 
 retry:
        ida_pre_get(&mnt_id_ida, GFP_KERNEL);
-       spin_lock(&vfsmount_lock);
+       spin_lock(&mnt_id_lock);
        res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
        if (!res)
                mnt_id_start = mnt->mnt_id + 1;
-       spin_unlock(&vfsmount_lock);
+       spin_unlock(&mnt_id_lock);
        if (res == -EAGAIN)
                goto retry;
 
@@ -86,11 +99,11 @@ retry:
 static void mnt_free_id(struct vfsmount *mnt)
 {
        int id = mnt->mnt_id;
-       spin_lock(&vfsmount_lock);
+       spin_lock(&mnt_id_lock);
        ida_remove(&mnt_id_ida, id);
        if (mnt_id_start > id)
                mnt_id_start = id;
-       spin_unlock(&vfsmount_lock);
+       spin_unlock(&mnt_id_lock);
 }
 
 /*
@@ -348,7 +361,7 @@ static int mnt_make_readonly(struct vfsmount *mnt)
 {
        int ret = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_flags |= MNT_WRITE_HOLD;
        /*
         * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -382,15 +395,15 @@ static int mnt_make_readonly(struct vfsmount *mnt)
         */
        smp_wmb();
        mnt->mnt_flags &= ~MNT_WRITE_HOLD;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        return ret;
 }
 
 static void __mnt_unmake_readonly(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_flags &= ~MNT_READONLY;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
@@ -414,6 +427,7 @@ void free_vfsmnt(struct vfsmount *mnt)
 /*
  * find the first or last mount at @dentry on vfsmount @mnt depending on
  * @dir. If @dir is set return the first mount else return the last mount.
+ * vfsmount_lock must be held for read or write.
  */
 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
                              int dir)
@@ -443,10 +457,11 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
 struct vfsmount *lookup_mnt(struct path *path)
 {
        struct vfsmount *child_mnt;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
                mntget(child_mnt);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return child_mnt;
 }
 
@@ -455,6 +470,9 @@ static inline int check_mnt(struct vfsmount *mnt)
        return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns) {
@@ -463,6 +481,9 @@ static void touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void __touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns && ns->event != event) {
@@ -471,6 +492,9 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
 {
        old_path->dentry = mnt->mnt_mountpoint;
@@ -482,6 +506,9 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
        old_path->dentry->d_mounted--;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
                        struct vfsmount *child_mnt)
 {
@@ -490,6 +517,9 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
        dentry->d_mounted++;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void attach_mnt(struct vfsmount *mnt, struct path *path)
 {
        mnt_set_mountpoint(path->mnt, path->dentry, mnt);
@@ -499,7 +529,7 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path)
 }
 
 /*
- * the caller must hold vfsmount_lock
+ * vfsmount lock must be held for write
  */
 static void commit_tree(struct vfsmount *mnt)
 {
@@ -623,39 +653,43 @@ static inline void __mntput(struct vfsmount *mnt)
 void mntput_no_expire(struct vfsmount *mnt)
 {
 repeat:
-       if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
-               if (likely(!mnt->mnt_pinned)) {
-                       spin_unlock(&vfsmount_lock);
-                       __mntput(mnt);
-                       return;
-               }
-               atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
-               mnt->mnt_pinned = 0;
-               spin_unlock(&vfsmount_lock);
-               acct_auto_close_mnt(mnt);
-               goto repeat;
+       if (atomic_add_unless(&mnt->mnt_count, -1, 1))
+               return;
+       br_write_lock(vfsmount_lock);
+       if (!atomic_dec_and_test(&mnt->mnt_count)) {
+               br_write_unlock(vfsmount_lock);
+               return;
+       }
+       if (likely(!mnt->mnt_pinned)) {
+               br_write_unlock(vfsmount_lock);
+               __mntput(mnt);
+               return;
        }
+       atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+       mnt->mnt_pinned = 0;
+       br_write_unlock(vfsmount_lock);
+       acct_auto_close_mnt(mnt);
+       goto repeat;
 }
-
 EXPORT_SYMBOL(mntput_no_expire);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_pinned++;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        if (mnt->mnt_pinned) {
                atomic_inc(&mnt->mnt_count);
                mnt->mnt_pinned--;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_unpin);
@@ -746,12 +780,12 @@ int mnt_had_events(struct proc_mounts *p)
        struct mnt_namespace *ns = p->ns;
        int res = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        if (p->event != ns->event) {
                p->event = ns->event;
                res = 1;
        }
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
 
        return res;
 }
@@ -952,12 +986,12 @@ int may_umount_tree(struct vfsmount *mnt)
        int minimum_refs = 0;
        struct vfsmount *p;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        for (p = mnt; p; p = next_mnt(p, mnt)) {
                actual_refs += atomic_read(&p->mnt_count);
                minimum_refs += 2;
        }
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
 
        if (actual_refs > minimum_refs)
                return 0;
@@ -984,10 +1018,10 @@ int may_umount(struct vfsmount *mnt)
 {
        int ret = 1;
        down_read(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        if (propagate_mount_busy(mnt, 2))
                ret = 0;
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        up_read(&namespace_sem);
        return ret;
 }
@@ -1003,13 +1037,14 @@ void release_mounts(struct list_head *head)
                if (mnt->mnt_parent != mnt) {
                        struct dentry *dentry;
                        struct vfsmount *m;
-                       spin_lock(&vfsmount_lock);
+
+                       br_write_lock(vfsmount_lock);
                        dentry = mnt->mnt_mountpoint;
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt_root;
                        mnt->mnt_parent = mnt;
                        m->mnt_ghosts--;
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                        dput(dentry);
                        mntput(m);
                }
@@ -1017,6 +1052,10 @@ void release_mounts(struct list_head *head)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ * namespace_sem must be held for write
+ */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
        struct vfsmount *p;
@@ -1107,7 +1146,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
        }
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        event++;
 
        if (!(flags & MNT_DETACH))
@@ -1119,7 +1158,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
                        umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
@@ -1231,19 +1270,19 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                        q = clone_mnt(p, p->mnt_root, flag);
                        if (!q)
                                goto Enomem;
-                       spin_lock(&vfsmount_lock);
+                       br_write_lock(vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
                        attach_mnt(q, &path);
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                }
        }
        return res;
 Enomem:
        if (res) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                umount_tree(res, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
        return NULL;
@@ -1262,9 +1301,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        LIST_HEAD(umount_list);
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        umount_tree(mnt, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
 }
@@ -1392,7 +1431,7 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
        if (err)
                goto out_cleanup_ids;
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
 
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1411,7 +1450,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
                list_del_init(&child->mnt_hash);
                commit_tree(child);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
+
        return 0;
 
  out_cleanup_ids:
@@ -1466,10 +1506,10 @@ static int do_change_type(struct path *path, int flag)
                        goto out_unlock;
        }
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
  out_unlock:
        up_write(&namespace_sem);
@@ -1513,9 +1553,10 @@ static int do_loopback(struct path *path, char *old_name,
        err = graft_tree(mnt, path);
        if (err) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+
+               br_write_lock(vfsmount_lock);
                umount_tree(mnt, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
 
@@ -1568,16 +1609,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        else
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
                path->mnt->mnt_flags = mnt_flags;
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
        }
        up_write(&sb->s_umount);
        if (!err) {
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                touch_mnt_namespace(path->mnt->mnt_ns);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
        }
        return err;
 }
@@ -1754,7 +1795,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                return;
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
         * following criteria:
@@ -1773,7 +1814,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                touch_mnt_namespace(mnt->mnt_ns);
                umount_tree(mnt, 1, &umounts);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
 
        release_mounts(&umounts);
@@ -1830,6 +1871,8 @@ resume:
 /*
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
+ *
+ * vfsmount_lock must be held for write
  */
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
 {
@@ -2048,9 +2091,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                kfree(new_ns);
                return ERR_PTR(-ENOMEM);
        }
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2244,7 +2287,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
                goto out2; /* not attached */
        /* make sure we can reach put_old from new_root */
        tmp = old.mnt;
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        if (tmp != new.mnt) {
                for (;;) {
                        if (tmp->mnt_parent == tmp)
@@ -2264,7 +2307,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* mount new_root on / */
        attach_mnt(new.mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        chroot_fs_refs(&root, &new);
        error = 0;
        path_put(&root_parent);
@@ -2279,7 +2322,7 @@ out1:
 out0:
        return error;
 out3:
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        goto out2;
 }
 
@@ -2326,6 +2369,8 @@ void __init mnt_init(void)
        for (u = 0; u < HASH_SIZE; u++)
                INIT_LIST_HEAD(&mount_hashtable[u]);
 
+       br_lock_init(vfsmount_lock);
+
        err = sysfs_init();
        if (err)
                printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2344,9 +2389,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
        if (!atomic_dec_and_test(&ns->count))
                return;
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        umount_tree(ns->root, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(ns);
index 26a510a7be0908023eaa5848002eced097f6e9dc..6c2aad49d7318054b57c22b58c6b4eaa0be50178 100644 (file)
@@ -63,7 +63,6 @@ config NFS_V3_ACL
 config NFS_V4
        bool "NFS client support for NFS version 4"
        depends on NFS_FS
-       select RPCSEC_GSS_KRB5
        help
          This option enables support for version 4 of the NFS protocol
          (RFC 3530) in the kernel's NFS client.
index 29539ceeb745f8d19fa57816127eceed2c5d989d..e257172d438c08afe374c99fe0e7c80af51efe6c 100644 (file)
@@ -140,6 +140,13 @@ nfs_opendir(struct inode *inode, struct file *filp)
 
        /* Call generic open code in order to cache credentials */
        res = nfs_open(inode, filp);
+       if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
+               /* This is a mountpoint, so d_revalidate will never
+                * have been called, so we need to refresh the
+                * inode (for close-open consistency) ourselves.
+                */
+               __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       }
        return res;
 }
 
@@ -1103,7 +1110,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
        if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
                goto no_open_dput;
        /* We can't create new files, or truncate existing ones here */
-       openflags &= ~(O_CREAT|O_TRUNC);
+       openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
 
        /*
         * Note: we're not holding inode->i_mutex and so may be racing with
index 2d141a74ae82ec722d1efb799de168824b593722..eb51bd6201da0cd361d8265b4c6b0e3edee50ec9 100644 (file)
@@ -323,7 +323,7 @@ nfs_file_fsync(struct file *file, int datasync)
        have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
        if (have_error)
                ret = xchg(&ctx->error, 0);
-       if (!ret)
+       if (!ret && status < 0)
                ret = status;
        return ret;
 }
index 7ffbb98ddec34c405bde993b0df16873162e21b6..089da5b5d20a1621b1142dbf75f415251ea4cb4b 100644 (file)
@@ -2036,7 +2036,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
        struct rpc_cred *cred;
        struct nfs4_state *state;
        struct dentry *res;
-       fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+       int open_flags = nd->intent.open.flags;
+       fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
 
        if (nd->flags & LOOKUP_CREATE) {
                attr.ia_mode = nd->intent.open.create_mode;
@@ -2044,8 +2045,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
                if (!IS_POSIXACL(dir))
                        attr.ia_mode &= ~current_umask();
        } else {
+               open_flags &= ~O_EXCL;
                attr.ia_valid = 0;
-               BUG_ON(nd->intent.open.flags & O_CREAT);
+               BUG_ON(open_flags & O_CREAT);
        }
 
        cred = rpc_lookup_cred();
@@ -2054,7 +2056,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
        parent = dentry->d_parent;
        /* Protect against concurrent sillydeletes */
        nfs_block_sillyrename(parent);
-       state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
+       state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
        put_rpccred(cred);
        if (IS_ERR(state)) {
                if (PTR_ERR(state) == -ENOENT) {
@@ -2273,8 +2275,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
 out:
        if (page)
                __free_page(page);
-       if (locations)
-               kfree(locations);
+       kfree(locations);
        return status;
 }
 
index ee26316ad1f44eaf276299740c2a579635682fac..ec3966e4706b2f70d8709199f86456392a6e6584 100644 (file)
@@ -655,6 +655,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
 
        if (nfss->options & NFS_OPTION_FSCACHE)
                seq_printf(m, ",fsc");
+
+       if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
+               if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+                       seq_printf(m, ",lookupcache=none");
+               else
+                       seq_printf(m, ",lookupcache=pos");
+       }
 }
 
 /*
index 503b9da159a3d3def44ca3cb5d752d1020eb1e50..95932f523aef2b2b7ef4686b8bb4a18383b4a395 100644 (file)
@@ -69,7 +69,6 @@ config NFSD_V4
        depends on NFSD && PROC_FS && EXPERIMENTAL
        select NFSD_V3
        select FS_POSIX_ACL
-       select RPCSEC_GSS_KRB5
        help
          This option enables support in your system's NFS server for
          version 4 of the NFS protocol (RFC 3530).
index bee60c04109a29b61d9cf4baffece2d18f7f75c3..922263393c765664f6b5598f4c144093ec9e582c 100644 (file)
@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
 {
        struct the_nilfs *nilfs = sbi->s_nilfs;
        int err;
-       int barrier_done = 0;
 
-       if (nilfs_test_opt(sbi, BARRIER)) {
-               set_buffer_ordered(nilfs->ns_sbh[0]);
-               barrier_done = 1;
-       }
  retry:
        set_buffer_dirty(nilfs->ns_sbh[0]);
-       err = sync_dirty_buffer(nilfs->ns_sbh[0]);
-       if (err == -EOPNOTSUPP && barrier_done) {
-               nilfs_warning(sbi->s_super, __func__,
-                             "barrier-based sync failed. "
-                             "disabling barriers\n");
-               nilfs_clear_opt(sbi, BARRIER);
-               barrier_done = 0;
-               clear_buffer_ordered(nilfs->ns_sbh[0]);
-               goto retry;
+
+       if (nilfs_test_opt(sbi, BARRIER)) {
+               err = __sync_dirty_buffer(nilfs->ns_sbh[0],
+                                         WRITE_SYNC | WRITE_BARRIER);
+               if (err == -EOPNOTSUPP) {
+                       nilfs_warning(sbi->s_super, __func__,
+                                     "barrier-based sync failed. "
+                                     "disabling barriers\n");
+                       nilfs_clear_opt(sbi, BARRIER);
+                       goto retry;
+               }
+       } else {
+               err = sync_dirty_buffer(nilfs->ns_sbh[0]);
        }
+
        if (unlikely(err)) {
                printk(KERN_ERR
                       "NILFS: unable to write superblock (err=%d)\n", err);
index 630715f9f73d0e9ae4370a339eb5163a8bb55ca0..d74e1983e8dc478145dc8da369c0479bb947d8c2 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -675,7 +675,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
        f->f_path.mnt = mnt;
        f->f_pos = 0;
        f->f_op = fops_get(inode->i_fop);
-       file_move(f, &inode->i_sb->s_files);
+       file_sb_list_add(f, inode->i_sb);
 
        error = security_dentry_open(f, cred);
        if (error)
@@ -721,7 +721,7 @@ cleanup_all:
                        mnt_drop_write(mnt);
                }
        }
-       file_kill(f);
+       file_sb_list_del(f);
        f->f_path.dentry = NULL;
        f->f_path.mnt = NULL;
 cleanup_file:
index 5cc564a83149a5fc311b57d87793febbdcfd4f27..8066b8dd748f6800a09694ee62c2a0b33c97d20a 100644 (file)
@@ -126,6 +126,9 @@ static int do_make_slave(struct vfsmount *mnt)
        return 0;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void change_mnt_propagation(struct vfsmount *mnt, int type)
 {
        if (type == MS_SHARED) {
@@ -270,12 +273,12 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
                prev_src_mnt  = child;
        }
 out:
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        while (!list_empty(&tmp_list)) {
                child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
                umount_tree(child, 0, &umount_list);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        release_mounts(&umount_list);
        return ret;
 }
@@ -296,6 +299,8 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
  * other mounts its parent propagates to.
  * Check if any of these mounts that **do not have submounts**
  * have more references than 'refcnt'. If so return busy.
+ *
+ * vfsmount lock must be held for read or write
  */
 int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
 {
@@ -353,6 +358,8 @@ static void __propagate_umount(struct vfsmount *mnt)
  * collect all mounts that receive propagation from the mount in @list,
  * and return these additional mounts in the same list.
  * @list: the list of mounts to be unmounted.
+ *
+ * vfsmount lock must be held for write
  */
 int propagate_umount(struct list_head *list)
 {
index ae35413dcbe1322a0ace18dbff0688138f585764..caa758377d66b8ba5530d029635d8cdfa98048ba 100644 (file)
@@ -83,6 +83,7 @@ void reiserfs_evict_inode(struct inode *inode)
        dquot_drop(inode);
        inode->i_blocks = 0;
        reiserfs_write_unlock_once(inode->i_sb, depth);
+       return;
 
 no_delete:
        end_writeback(inode);
index 1ec952b1f036fb30266cc8fea1e7b3795444fd37..812e2c05aa29eeda01bf94323234cdf260344353 100644 (file)
@@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb,
        /* flush out the real blocks */
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                set_buffer_dirty(real_blocks[i]);
-               ll_rw_block(SWRITE, 1, real_blocks + i);
+               write_dirty_buffer(real_blocks[i], WRITE);
        }
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                wait_on_buffer(real_blocks[i]);
index 9674ab2c8718c3f1061f3556e6ec3e9b479cb8a9..8819e3a7ff203fb521b537672d16d5e2d4cea80d 100644 (file)
@@ -54,7 +54,22 @@ static struct super_block *alloc_super(struct file_system_type *type)
                        s = NULL;
                        goto out;
                }
+#ifdef CONFIG_SMP
+               s->s_files = alloc_percpu(struct list_head);
+               if (!s->s_files) {
+                       security_sb_free(s);
+                       kfree(s);
+                       s = NULL;
+                       goto out;
+               } else {
+                       int i;
+
+                       for_each_possible_cpu(i)
+                               INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
+               }
+#else
                INIT_LIST_HEAD(&s->s_files);
+#endif
                INIT_LIST_HEAD(&s->s_instances);
                INIT_HLIST_HEAD(&s->s_anon);
                INIT_LIST_HEAD(&s->s_inodes);
@@ -108,6 +123,9 @@ out:
  */
 static inline void destroy_super(struct super_block *s)
 {
+#ifdef CONFIG_SMP
+       free_percpu(s->s_files);
+#endif
        security_sb_free(s);
        kfree(s->s_subtype);
        kfree(s->s_options);
index 048484fb10d28f12722052955abb043ecc0ac096..46f7a807bbc1ec8313af3df1a4c08c2afb3498eb 100644 (file)
@@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
        
        unlock_super (sb);
@@ -207,10 +205,8 @@ do_more:
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
 
        if (overflow) {
                fragment += count;
@@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -680,10 +674,8 @@ cg_found:
 succed:
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        result += cgno * uspi->s_fpg;
index 428017e018fe63268b216b9ff2fce6cd83d547b8..2eabf04af3de12e98d0fbf812879825e7247e619 100644 (file)
@@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode)
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        
        sb->s_dirt = 1;
        unlock_super (sb);
@@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
 
        fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
        ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer(UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
 
        UFSD("EXIT\n");
 }
@@ -290,10 +286,8 @@ cg_found:
        }
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        inode->i_ino = cg * uspi->s_ipg + bit;
index 34d5cb1353204ea8a2a7cf348750d66f4519613d..a58f9155fc9a7baea4169efec7529434035d8458 100644 (file)
@@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
                ubh_bforget(ind_ubh);
                ind_ubh = NULL;
        }
-       if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
-               ubh_ll_rw_block(SWRITE, ind_ubh);
-               ubh_wait_on_buffer (ind_ubh);
-       }
+       if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
+               ubh_sync_block(ind_ubh);
        ubh_brelse (ind_ubh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
                ubh_bforget(dind_bh);
                dind_bh = NULL;
        }
-       if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
-               ubh_ll_rw_block(SWRITE, dind_bh);
-               ubh_wait_on_buffer (dind_bh);
-       }
+       if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
+               ubh_sync_block(dind_bh);
        ubh_brelse (dind_bh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode)
                ubh_bforget(tind_bh);
                tind_bh = NULL;
        }
-       if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
-               ubh_ll_rw_block(SWRITE, tind_bh);
-               ubh_wait_on_buffer (tind_bh);
-       }
+       if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
+               ubh_sync_block(tind_bh);
        ubh_brelse (tind_bh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
index 85a7fc9e4a4e345161a1c8a8a26e0eebcb834aae..d2c36d53fe66e8a827d2f232309bc11d7df0afec 100644 (file)
@@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
        }
 }
 
-void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
+void ubh_sync_block(struct ufs_buffer_head *ubh)
 {
-       if (!ubh)
-               return;
+       if (ubh) {
+               unsigned i;
 
-       ll_rw_block(rw, ubh->count, ubh->bh);
-}
+               for (i = 0; i < ubh->count; i++)
+                       write_dirty_buffer(ubh->bh[i], WRITE);
 
-void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
-{
-       unsigned i;
-       if (!ubh)
-               return;
-       for ( i = 0; i < ubh->count; i++ )
-               wait_on_buffer (ubh->bh[i]);
+               for (i = 0; i < ubh->count; i++)
+                       wait_on_buffer(ubh->bh[i]);
+       }
 }
 
 void ubh_bforget (struct ufs_buffer_head * ubh)
index 0466036912f1adb41ccf85eb2d3dbd81b669ef4e..9f8775ce381c403647e84d1df9d45ae78c80a573 100644 (file)
@@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *);
 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
-extern void ubh_ll_rw_block(int, struct ufs_buffer_head *);
-extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
+extern void ubh_sync_block(struct ufs_buffer_head *);
 extern void ubh_bforget (struct ufs_buffer_head *);
 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
index df84e3b04555f495356dfb15e74dfcd3806b1523..d89dec864d42547b84708e3502bf81f98108e76f 100644 (file)
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs);
 #endif
 
 #ifndef sys_execve
-asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
-                       char __user * __user *envp, struct pt_regs *regs);
+asmlinkage long sys_execve(const char __user *filename,
+                          const char __user *const __user *argv,
+                          const char __user *const __user *envp,
+                          struct pt_regs *regs);
 #endif
 
 #ifndef sys_mmap2
index 43e649a72529afa8282f7da2029ab8e6759f0a97..ec94c12f21da5ab9d8098f7afbdf59d2cc7886b7 100644 (file)
@@ -32,7 +32,6 @@ enum bh_state_bits {
        BH_Delay,       /* Buffer is not yet allocated on disk */
        BH_Boundary,    /* Block is followed by a discontiguity */
        BH_Write_EIO,   /* I/O error on write */
-       BH_Ordered,     /* ordered write */
        BH_Eopnotsupp,  /* operation not supported (barrier) */
        BH_Unwritten,   /* Buffer is allocated on disk but not written */
        BH_Quiet,       /* Buffer Error Prinks to be quiet */
@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
 BUFFER_FNS(Delay, delay)
 BUFFER_FNS(Boundary, boundary)
 BUFFER_FNS(Write_EIO, write_io_error)
-BUFFER_FNS(Ordered, ordered)
 BUFFER_FNS(Eopnotsupp, eopnotsupp)
 BUFFER_FNS(Unwritten, unwritten)
 
@@ -183,6 +181,8 @@ void unlock_buffer(struct buffer_head *bh);
 void __lock_buffer(struct buffer_head *bh);
 void ll_rw_block(int, int, struct buffer_head * bh[]);
 int sync_dirty_buffer(struct buffer_head *bh);
+int __sync_dirty_buffer(struct buffer_head *bh, int rw);
+void write_dirty_buffer(struct buffer_head *bh, int rw);
 int submit_bh(int, struct buffer_head *);
 void write_boundary_block(struct block_device *bdev,
                        sector_t bblock, unsigned blocksize);
index 9a96b4d83fc126a92adb9c87f7f1e5ae62bc0056..76041b6147582ef62eb0daedafbf1771a8e844c6 100644 (file)
@@ -125,9 +125,6 @@ struct inodes_stat_t {
  *                     block layer could (in theory) choose to ignore this
  *                     request if it runs into resource problems.
  * WRITE               A normal async write. Device will be plugged.
- * SWRITE              Like WRITE, but a special case for ll_rw_block() that
- *                     tells it to lock the buffer first. Normally a buffer
- *                     must be locked before doing IO.
  * WRITE_SYNC_PLUG     Synchronous write. Identical to WRITE, but passes down
  *                     the hint that someone will be waiting on this IO
  *                     shortly. The device must still be unplugged explicitly,
@@ -138,9 +135,6 @@ struct inodes_stat_t {
  *                     immediately after submission. The write equivalent
  *                     of READ_SYNC.
  * WRITE_ODIRECT_PLUG  Special case write for O_DIRECT only.
- * SWRITE_SYNC
- * SWRITE_SYNC_PLUG    Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
- *                     See SWRITE.
  * WRITE_BARRIER       Like WRITE_SYNC, but tells the block layer that all
  *                     previously submitted writes must be safely on storage
  *                     before this one is started. Also guarantees that when
@@ -155,7 +149,6 @@ struct inodes_stat_t {
 #define READ                   0
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
-#define SWRITE                 (WRITE | READA)
 
 #define READ_SYNC              (READ | REQ_SYNC | REQ_UNPLUG)
 #define READ_META              (READ | REQ_META)
@@ -165,8 +158,6 @@ struct inodes_stat_t {
 #define WRITE_META             (WRITE | REQ_META)
 #define WRITE_BARRIER          (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
                                 REQ_HARDBARRIER)
-#define SWRITE_SYNC_PLUG       (SWRITE | REQ_SYNC | REQ_NOIDLE)
-#define SWRITE_SYNC            (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
 
 /*
  * These aren't really reads or writes, they pass down information about
@@ -929,6 +920,9 @@ struct file {
 #define f_vfsmnt       f_path.mnt
        const struct file_operations    *f_op;
        spinlock_t              f_lock;  /* f_ep_links, f_flags, no IRQ */
+#ifdef CONFIG_SMP
+       int                     f_sb_list_cpu;
+#endif
        atomic_long_t           f_count;
        unsigned int            f_flags;
        fmode_t                 f_mode;
@@ -953,9 +947,6 @@ struct file {
        unsigned long f_mnt_write_state;
 #endif
 };
-extern spinlock_t files_lock;
-#define file_list_lock() spin_lock(&files_lock);
-#define file_list_unlock() spin_unlock(&files_lock);
 
 #define get_file(x)    atomic_long_inc(&(x)->f_count)
 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
@@ -1346,7 +1337,11 @@ struct super_block {
 
        struct list_head        s_inodes;       /* all inodes */
        struct hlist_head       s_anon;         /* anonymous dentries for (nfs) exporting */
+#ifdef CONFIG_SMP
+       struct list_head __percpu *s_files;
+#else
        struct list_head        s_files;
+#endif
        /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
        struct list_head        s_dentry_lru;   /* unused dentry lru */
        int                     s_nr_dentry_unused;     /* # of dentry on lru */
@@ -2197,8 +2192,6 @@ static inline void insert_inode_hash(struct inode *inode) {
        __insert_inode_hash(inode, inode->i_ino);
 }
 
-extern void file_move(struct file *f, struct list_head *list);
-extern void file_kill(struct file *f);
 #ifdef CONFIG_BLOCK
 extern void submit_bio(int, struct bio *);
 extern int bdev_read_only(struct block_device *);
index eca3d5202138f68bb30dd1a308aaf4e121171663..a42b5bf02f8bcfec3ea20ecef765df3f4d690250 100644 (file)
@@ -5,7 +5,7 @@
 
 struct fs_struct {
        int users;
-       rwlock_t lock;
+       spinlock_t lock;
        int umask;
        int in_exec;
        struct path root, pwd;
@@ -23,29 +23,29 @@ extern int unshare_fs_struct(void);
 
 static inline void get_fs_root(struct fs_struct *fs, struct path *root)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *root = fs->root;
        path_get(root);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *pwd = fs->pwd;
        path_get(pwd);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
                                       struct path *pwd)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *root = fs->root;
        path_get(root);
        *pwd = fs->pwd;
        path_get(pwd);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 #endif /* _LINUX_FS_STRUCT_H */
index 311f8753d713dec24828856aed5a2992d6d16923..4aa95f203f3ee773a6ab4bbdbb632efaf970785e 100644 (file)
@@ -836,6 +836,8 @@ extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
 
 extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
 
+extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
+
 extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
        void *buf, unsigned int len, size_t recsize);
 
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
new file mode 100644 (file)
index 0000000..b288cb7
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Specialised local-global spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * "local/global locks" (lglocks) can be used to:
+ *
+ * - Provide fast exclusive access to per-CPU data, with exclusive access to
+ *   another CPU's data allowed but possibly subject to contention, and to
+ *   provide very slow exclusive access to all per-CPU data.
+ * - Or to provide very fast and scalable read serialisation, and to provide
+ *   very slow exclusive serialisation of data (not necessarily per-CPU data).
+ *
+ * Brlocks are also implemented as a short-hand notation for the latter use
+ * case.
+ *
+ * Copyright 2009, 2010, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_LGLOCK_H
+#define __LINUX_LGLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/lockdep.h>
+#include <linux/percpu.h>
+
+/* can make br locks by using local lock for read side, global lock for write */
+#define br_lock_init(name)     name##_lock_init()
+#define br_read_lock(name)     name##_local_lock()
+#define br_read_unlock(name)   name##_local_unlock()
+#define br_write_lock(name)    name##_global_lock_online()
+#define br_write_unlock(name)  name##_global_unlock_online()
+
+#define DECLARE_BRLOCK(name)   DECLARE_LGLOCK(name)
+#define DEFINE_BRLOCK(name)    DEFINE_LGLOCK(name)
+
+
+#define lg_lock_init(name)     name##_lock_init()
+#define lg_local_lock(name)    name##_local_lock()
+#define lg_local_unlock(name)  name##_local_unlock()
+#define lg_local_lock_cpu(name, cpu)   name##_local_lock_cpu(cpu)
+#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
+#define lg_global_lock(name)   name##_global_lock()
+#define lg_global_unlock(name) name##_global_unlock()
+#define lg_global_lock_online(name) name##_global_lock_online()
+#define lg_global_unlock_online(name) name##_global_unlock_online()
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define LOCKDEP_INIT_MAP lockdep_init_map
+
+#define DEFINE_LGLOCK_LOCKDEP(name)                                    \
+ struct lock_class_key name##_lock_key;                                        \
+ struct lockdep_map name##_lock_dep_map;                               \
+ EXPORT_SYMBOL(name##_lock_dep_map)
+
+#else
+#define LOCKDEP_INIT_MAP(a, b, c, d)
+
+#define DEFINE_LGLOCK_LOCKDEP(name)
+#endif
+
+
+#define DECLARE_LGLOCK(name)                                           \
+ extern void name##_lock_init(void);                                   \
+ extern void name##_local_lock(void);                                  \
+ extern void name##_local_unlock(void);                                        \
+ extern void name##_local_lock_cpu(int cpu);                           \
+ extern void name##_local_unlock_cpu(int cpu);                         \
+ extern void name##_global_lock(void);                                 \
+ extern void name##_global_unlock(void);                               \
+ extern void name##_global_lock_online(void);                          \
+ extern void name##_global_unlock_online(void);                                \
+
+#define DEFINE_LGLOCK(name)                                            \
+                                                                       \
+ DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
+ DEFINE_LGLOCK_LOCKDEP(name);                                          \
+                                                                       \
+ void name##_lock_init(void) {                                         \
+       int i;                                                          \
+       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+       for_each_possible_cpu(i) {                                      \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_lock_init);                                      \
+                                                                       \
+ void name##_local_lock(void) {                                                \
+       arch_spinlock_t *lock;                                          \
+       preempt_disable();                                              \
+       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
+       lock = &__get_cpu_var(name##_lock);                             \
+       arch_spin_lock(lock);                                           \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_lock);                                     \
+                                                                       \
+ void name##_local_unlock(void) {                                      \
+       arch_spinlock_t *lock;                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
+       lock = &__get_cpu_var(name##_lock);                             \
+       arch_spin_unlock(lock);                                         \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_unlock);                                   \
+                                                                       \
+ void name##_local_lock_cpu(int cpu) {                                 \
+       arch_spinlock_t *lock;                                          \
+       preempt_disable();                                              \
+       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
+       lock = &per_cpu(name##_lock, cpu);                              \
+       arch_spin_lock(lock);                                           \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_lock_cpu);                                 \
+                                                                       \
+ void name##_local_unlock_cpu(int cpu) {                               \
+       arch_spinlock_t *lock;                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
+       lock = &per_cpu(name##_lock, cpu);                              \
+       arch_spin_unlock(lock);                                         \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_unlock_cpu);                               \
+                                                                       \
+ void name##_global_lock_online(void) {                                        \
+       int i;                                                          \
+       preempt_disable();                                              \
+       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_lock(lock);                                   \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_lock_online);                             \
+                                                                       \
+ void name##_global_unlock_online(void) {                              \
+       int i;                                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_unlock(lock);                                 \
+       }                                                               \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_unlock_online);                           \
+                                                                       \
+ void name##_global_lock(void) {                                       \
+       int i;                                                          \
+       preempt_disable();                                              \
+       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_lock(lock);                                   \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_lock);                                    \
+                                                                       \
+ void name##_global_unlock(void) {                                     \
+       int i;                                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_unlock(lock);                                 \
+       }                                                               \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_unlock);
+#endif
index b8bb9a6a1f37b71cae90c33ff84e75d2d780226a..ee7e258627f9f5b9999e4c3033f7fa78c5927b48 100644 (file)
@@ -134,7 +134,7 @@ struct vm_area_struct {
                                           within vm_mm. */
 
        /* linked list of VM areas per task, sorted by address */
-       struct vm_area_struct *vm_next;
+       struct vm_area_struct *vm_next, *vm_prev;
 
        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
        unsigned long vm_flags;         /* Flags, see mm.h. */
index ae0a5286f558f334859a6d5a2163c18bcc974c3e..92e52a1e6af3fd8478bb451f04d34a3c63b1625f 100644 (file)
@@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @dma_alignment: SPI controller constraint on DMA buffers alignment.
  * @mode_bits: flags understood by this controller driver
  * @flags: other constraints relevant to this driver
+ * @bus_lock_spinlock: spinlock for SPI bus locking
+ * @bus_lock_mutex: mutex for SPI bus locking
+ * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
  * @setup: updates the device mode and clocking records used by a
  *     device's SPI controller; protocol code may call this.  This
  *     must fail if an unrecognized or unsupported mode is requested.
index 1437da3ddc629b7dc285e7cb4ce11e2b5400de7a..67d64e6efe7a8c95d775b6418d2c1c8d728d667d 100644 (file)
@@ -329,6 +329,13 @@ struct tty_struct {
        struct tty_port *port;
 };
 
+/* Each of a tty's open files has private_data pointing to tty_file_private */
+struct tty_file_private {
+       struct tty_struct *tty;
+       struct file *file;
+       struct list_head list;
+};
+
 /* tty magic number */
 #define TTY_MAGIC              0x5401
 
@@ -458,6 +465,7 @@ extern void proc_clear_tty(struct task_struct *p);
 extern struct tty_struct *get_current_tty(void);
 extern void tty_default_fops(struct file_operations *fops);
 extern struct tty_struct *alloc_tty_struct(void);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
 extern void free_tty_struct(struct tty_struct *tty);
 extern void initialize_tty_struct(struct tty_struct *tty,
                struct tty_driver *driver, int idx);
@@ -470,6 +478,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
 extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
 
 extern struct mutex tty_mutex;
+extern spinlock_t tty_files_lock;
 
 extern void tty_write_unlock(struct tty_struct *tty);
 extern int tty_write_lock(struct tty_struct *tty, int ndelay);
index 6a664c3f7c1e426dd0b74129ce9cf81cb07fefa0..7dc97d12253c1bdc494b939e5e856b1b1fa6fc2c 100644 (file)
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
        unsigned int card_type;                 /* EMU10K1_CARD_* */
        unsigned int ecard_ctrl;                /* ecard control bits */
        unsigned long dma_mask;                 /* PCI DMA mask */
+       unsigned int delay_pcm_irq;             /* in samples */
        int max_cache_pages;                    /* max memory size / PAGE_SIZE */
        struct snd_dma_buffer silent_page;      /* silent page */
        struct snd_dma_buffer ptb_pages;        /* page table pages */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644 (file)
index 0000000..49682d7
--- /dev/null
@@ -0,0 +1,62 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+
+/**
+ * workqueue_execute_start - called immediately before the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_start,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->function       = work->func;
+       ),
+
+       TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+);
+
+/**
+ * workqueue_execute_end - called immediately before the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_end,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+       ),
+
+       TP_printk("work struct %p", __entry->work)
+);
+
+
+#endif /*  _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 98b450876f93878b437bbbc6efe6abd1c4028b29..b7e9d60a675d3a08a1096ce725380229217d9fa3 100644 (file)
@@ -300,7 +300,7 @@ out:
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, **pprev;
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        if (retval)
                goto out;
 
+       prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
 
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
                tmp->vm_mm = mm;
-               tmp->vm_next = NULL;
+               tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
                        struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                 */
                *pprev = tmp;
                pprev = &tmp->vm_next;
+               tmp->vm_prev = prev;
+               prev = tmp;
 
                __vma_link_rb(mm, tmp, rb_link, rb_parent);
                rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        struct fs_struct *fs = current->fs;
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                if (fs->in_exec) {
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                        return -EAGAIN;
                }
                fs->users++;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
                return 0;
        }
        tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 
                if (new_fs) {
                        fs = current->fs;
-                       write_lock(&fs->lock);
+                       spin_lock(&fs->lock);
                        current->fs = new_fs;
                        if (--fs->users)
                                new_fs = NULL;
                        else
                                new_fs = fs;
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                }
 
                if (new_mm) {
index 4502604ecadf0bc9e1d0d132bc17fc6f3a0ab9d9..6b5580c57644dc1804b02fd85648e7100d5d75dd 100644 (file)
@@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
 }
 EXPORT_SYMBOL(__kfifo_out_r);
 
+void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
+{
+       unsigned int n;
+
+       n = __kfifo_peek_n(fifo, recsize);
+       fifo->out += n + recsize;
+}
+EXPORT_SYMBOL(__kfifo_skip_r);
+
 int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
        unsigned long len, unsigned int *copied, size_t recsize)
 {
index 3632ce87674f88dfd6c4ce5c8ed09eb184ace1a3..19cccc3c302871beae5fd39ad937b0791a2e785d 100644 (file)
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                        rpos = reader->read;
                        pos += size;
 
+                       if (rpos >= commit)
+                               break;
+
                        event = rb_reader_event(cpu_buffer);
                        size = rb_event_length(event);
                } while (len > size);
index ba14a22be4cc4aca4af73121a09910dcbebaf050..9ec59f541156625b5c4b0aea9267086c928ae07a 100644 (file)
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
 {
        char *buf;
+       size_t written;
 
        if (tracing_disabled)
                return -EINVAL;
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        } else
                buf[cnt] = '\0';
 
-       cnt = mark_printk("%s", buf);
+       written = mark_printk("%s", buf);
        kfree(buf);
-       *fpos += cnt;
+       *fpos += written;
 
-       return cnt;
+       /* don't tell userspace we wrote more - it might confuse them */
+       if (written > cnt)
+               written = cnt;
+
+       return written;
 }
 
 static int tracing_clock_show(struct seq_file *m, void *v)
index 09b4fa6e4d3be8b83758c48a529d01a6f18010da..4c758f146328f18ce82a318fb60a0413006aca8f 100644 (file)
@@ -598,88 +598,165 @@ out:
        return ret;
 }
 
-static void print_event_fields(struct trace_seq *s, struct list_head *head)
+enum {
+       FORMAT_HEADER           = 1,
+       FORMAT_PRINTFMT         = 2,
+};
+
+static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
+       struct ftrace_event_call *call = m->private;
        struct ftrace_event_field *field;
+       struct list_head *head;
 
-       list_for_each_entry_reverse(field, head, link) {
-               /*
-                * Smartly shows the array type(except dynamic array).
-                * Normal:
-                *      field:TYPE VAR
-                * If TYPE := TYPE[LEN], it is shown:
-                *      field:TYPE VAR[LEN]
-                */
-               const char *array_descriptor = strchr(field->type, '[');
+       (*pos)++;
 
-               if (!strncmp(field->type, "__data_loc", 10))
-                       array_descriptor = NULL;
+       switch ((unsigned long)v) {
+       case FORMAT_HEADER:
+               head = &ftrace_common_fields;
 
-               if (!array_descriptor) {
-                       trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
-                                       "\tsize:%u;\tsigned:%d;\n",
-                                       field->type, field->name, field->offset,
-                                       field->size, !!field->is_signed);
-               } else {
-                       trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
-                                       "\tsize:%u;\tsigned:%d;\n",
-                                       (int)(array_descriptor - field->type),
-                                       field->type, field->name,
-                                       array_descriptor, field->offset,
-                                       field->size, !!field->is_signed);
-               }
+               if (unlikely(list_empty(head)))
+                       return NULL;
+
+               field = list_entry(head->prev, struct ftrace_event_field, link);
+               return field;
+
+       case FORMAT_PRINTFMT:
+               /* all done */
+               return NULL;
+       }
+
+       head = trace_get_fields(call);
+
+       /*
+        * To separate common fields from event fields, the
+        * LSB is set on the first event field. Clear it in case.
+        */
+       v = (void *)((unsigned long)v & ~1L);
+
+       field = v;
+       /*
+        * If this is a common field, and at the end of the list, then
+        * continue with main list.
+        */
+       if (field->link.prev == &ftrace_common_fields) {
+               if (unlikely(list_empty(head)))
+                       return NULL;
+               field = list_entry(head->prev, struct ftrace_event_field, link);
+               /* Set the LSB to notify f_show to print an extra newline */
+               field = (struct ftrace_event_field *)
+                       ((unsigned long)field | 1);
+               return field;
        }
+
+       /* If we are done tell f_show to print the format */
+       if (field->link.prev == head)
+               return (void *)FORMAT_PRINTFMT;
+
+       field = list_entry(field->link.prev, struct ftrace_event_field, link);
+
+       return field;
 }
 
-static ssize_t
-event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
-                 loff_t *ppos)
+static void *f_start(struct seq_file *m, loff_t *pos)
 {
-       struct ftrace_event_call *call = filp->private_data;
-       struct list_head *head;
-       struct trace_seq *s;
-       char *buf;
-       int r;
+       loff_t l = 0;
+       void *p;
 
-       if (*ppos)
+       /* Start by showing the header */
+       if (!*pos)
+               return (void *)FORMAT_HEADER;
+
+       p = (void *)FORMAT_HEADER;
+       do {
+               p = f_next(m, p, &l);
+       } while (p && l < *pos);
+
+       return p;
+}
+
+static int f_show(struct seq_file *m, void *v)
+{
+       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_field *field;
+       const char *array_descriptor;
+
+       switch ((unsigned long)v) {
+       case FORMAT_HEADER:
+               seq_printf(m, "name: %s\n", call->name);
+               seq_printf(m, "ID: %d\n", call->event.type);
+               seq_printf(m, "format:\n");
                return 0;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
-       if (!s)
-               return -ENOMEM;
+       case FORMAT_PRINTFMT:
+               seq_printf(m, "\nprint fmt: %s\n",
+                          call->print_fmt);
+               return 0;
+       }
 
-       trace_seq_init(s);
+       /*
+        * To separate common fields from event fields, the
+        * LSB is set on the first event field. Clear it and
+        * print a newline if it is set.
+        */
+       if ((unsigned long)v & 1) {
+               seq_putc(m, '\n');
+               v = (void *)((unsigned long)v & ~1L);
+       }
 
-       trace_seq_printf(s, "name: %s\n", call->name);
-       trace_seq_printf(s, "ID: %d\n", call->event.type);
-       trace_seq_printf(s, "format:\n");
+       field = v;
 
-       /* print common fields */
-       print_event_fields(s, &ftrace_common_fields);
+       /*
+        * Smartly shows the array type(except dynamic array).
+        * Normal:
+        *      field:TYPE VAR
+        * If TYPE := TYPE[LEN], it is shown:
+        *      field:TYPE VAR[LEN]
+        */
+       array_descriptor = strchr(field->type, '[');
 
-       trace_seq_putc(s, '\n');
+       if (!strncmp(field->type, "__data_loc", 10))
+               array_descriptor = NULL;
 
-       /* print event specific fields */
-       head = trace_get_fields(call);
-       print_event_fields(s, head);
+       if (!array_descriptor)
+               seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          field->type, field->name, field->offset,
+                          field->size, !!field->is_signed);
+       else
+               seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          (int)(array_descriptor - field->type),
+                          field->type, field->name,
+                          array_descriptor, field->offset,
+                          field->size, !!field->is_signed);
 
-       r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt);
+       return 0;
+}
 
-       if (!r) {
-               /*
-                * ug!  The format output is bigger than a PAGE!!
-                */
-               buf = "FORMAT TOO BIG\n";
-               r = simple_read_from_buffer(ubuf, cnt, ppos,
-                                             buf, strlen(buf));
-               goto out;
-       }
+static void f_stop(struct seq_file *m, void *p)
+{
+}
 
-       r = simple_read_from_buffer(ubuf, cnt, ppos,
-                                   s->buffer, s->len);
- out:
-       kfree(s);
-       return r;
+static const struct seq_operations trace_format_seq_ops = {
+       .start          = f_start,
+       .next           = f_next,
+       .stop           = f_stop,
+       .show           = f_show,
+};
+
+static int trace_format_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_event_call *call = inode->i_private;
+       struct seq_file *m;
+       int ret;
+
+       ret = seq_open(file, &trace_format_seq_ops);
+       if (ret < 0)
+               return ret;
+
+       m = file->private_data;
+       m->private = call;
+
+       return 0;
 }
 
 static ssize_t
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = {
 };
 
 static const struct file_operations ftrace_event_format_fops = {
-       .open = tracing_open_generic,
-       .read = event_format_read,
+       .open = trace_format_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
 };
 
 static const struct file_operations ftrace_event_id_fops = {
index 6bff2362578115f4a087195362a5257c3b838d42..6f233698518ede15cc9302e889de9f108aa0f1cb 100644 (file)
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
                         * if the output fails.
                         */
                        data->ent = *curr;
-                       data->ret = *next;
+                       /*
+                        * If the next event is not a return type, then
+                        * we only care about what type it is. Otherwise we can
+                        * safely copy the entire event.
+                        */
+                       if (next->ent.type == TRACE_GRAPH_RET)
+                               data->ret = *next;
+                       else
+                               data->ret.ent.type = next->ent.type;
                }
        }
 
index 2994a0e3a61cc2cfdeb382936394456875a4c01c..8bd600c020e5cdf5f2454681bc2e89fcc99c47d0 100644 (file)
@@ -35,6 +35,9 @@
 #include <linux/lockdep.h>
 #include <linux/idr.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
+
 #include "workqueue_sched.h"
 
 enum {
@@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        work_clear_pending(work);
        lock_map_acquire(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
+       trace_workqueue_execute_start(work);
        f(work);
+       /*
+        * While we must be careful to not use "work" after this, the trace
+        * point will only record its address.
+        */
+       trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
        lock_map_release(&cwq->wq->lockdep_map);
 
index 9e06b7f5ecf15b6b24ec50ea832a5720145ae8f1..1b4afd2e6ca089de0babdacc5781426ef118da5c 100644 (file)
@@ -994,13 +994,16 @@ config FAULT_INJECTION_STACKTRACE_FILTER
 
 config LATENCYTOP
        bool "Latency measuring infrastructure"
+       depends on HAVE_LATENCYTOP_SUPPORT
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
        select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
        select KALLSYMS
        select KALLSYMS_ALL
        select STACKTRACE
        select SCHEDSTATS
        select SCHED_DEBUG
-       depends on HAVE_LATENCYTOP_SUPPORT
        help
          Enable this option if you want to use the LatencyTOP tool
          to find out which userspace is blocking on what kernel operations.
index e907858498a6a3184f57de0d60fe25098ec37006..5b7d4623f0b70aee189deda3bc8318a590476160 100644 (file)
@@ -625,6 +625,8 @@ EXPORT_SYMBOL(radix_tree_tag_get);
  *
  * The function returns number of leaves where the tag was set and sets
  * *first_indexp to the first unscanned index.
+ * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
+ * be prepared to handle that.
  */
 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
                unsigned long *first_indexp, unsigned long last_index,
@@ -675,7 +677,8 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
 next:
                /* Go to next item at level determined by 'shift' */
                index = ((index >> shift) + 1) << shift;
-               if (index > last_index)
+               /* Overflow can happen when last_index is ~0UL... */
+               if (index > last_index || !index)
                        break;
                if (tagged >= nr_to_tag)
                        break;
index b6e5fd23cc5a48e13f49b0d824ca3cc9713bd3ab..2ed2267439df0f894011ab62adef6d7efe68db9a 100644 (file)
@@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
 {
        address &= PAGE_MASK;
        if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-               address -= PAGE_SIZE;
-               if (find_vma(vma->vm_mm, address) != vma)
-                       return -ENOMEM;
+               struct vm_area_struct *prev = vma->vm_prev;
+
+               /*
+                * Is there a mapping abutting this one below?
+                *
+                * That's only ok if it's the same stack mapping
+                * that has gotten split..
+                */
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_stack(vma, address);
+               expand_stack(vma, address - PAGE_SIZE);
        }
        return 0;
 }
index 49e5e4cb82328dea4e45ee9d81e51d5599df54ff..cbae7c5b95680a1bfca1df7e11a215bfce15b57c 100644 (file)
@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
        }
 }
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_stack_continue(vma->vm_prev, addr);
+}
+
 /**
  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                gup_flags |= FOLL_WRITE;
 
        /* We don't try to access the guard page of a stack vma */
-       if (vma->vm_flags & VM_GROWSDOWN) {
-               if (start == vma->vm_start) {
-                       start += PAGE_SIZE;
-                       nr_pages--;
-               }
+       if (stack_guard_page(vma, start)) {
+               addr += PAGE_SIZE;
+               nr_pages--;
        }
 
        while (nr_pages > 0) {
index 31003338b978b78ef66d4579033cdd1d3ebd1a16..331e51af38c9c950c8295fa125c67d00ba540025 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       struct vm_area_struct *next;
+
+       vma->vm_prev = prev;
        if (prev) {
-               vma->vm_next = prev->vm_next;
+               next = prev->vm_next;
                prev->vm_next = vma;
        } else {
                mm->mmap = vma;
                if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
+                       next = rb_entry(rb_parent,
                                        struct vm_area_struct, vm_rb);
                else
-                       vma->vm_next = NULL;
+                       next = NULL;
        }
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -483,7 +489,11 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       prev->vm_next = vma->vm_next;
+       struct vm_area_struct *next = vma->vm_next;
+
+       prev->vm_next = next;
+       if (next)
+               next->vm_prev = prev;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
@@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
@@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
+       if (vma)
+               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
index efa9a380335eed137c30d5c762c5a5cdae469b9c..88ff091eb07a23dcaee1e8e6d0cc70927183b89b 100644 (file)
@@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp;
+       struct vm_area_struct *pvma, **pp, *next;
        struct address_space *mapping;
        struct rb_node **p, *parent;
 
@@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
                        break;
        }
 
-       vma->vm_next = *pp;
+       next = *pp;
        *pp = vma;
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 /*
index 5014e50644d1b7528a06fca742677cf1a77cc04f..fc81cb22869ef54e6871daf39f51b32e3377aa98 100644 (file)
@@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
                }
 
                pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
-                       task->pid, __task_cred(task)->uid, task->tgid,
+                       task->pid, task_uid(task), task->tgid,
                        task->mm->total_vm, get_mm_rss(task->mm),
                        task_cpu(task), task->signal->oom_adj,
                        task->signal->oom_score_adj, task->comm);
@@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
 {
        p = find_lock_task_mm(p);
-       if (!p) {
-               task_unlock(p);
+       if (!p)
                return 1;
-       }
+
        pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
                task_pid_nr(p), p->comm, K(p->mm->total_vm),
                K(get_mm_counter(p->mm, MM_ANONPAGES)),
@@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        unsigned long freed = 0;
        unsigned int points;
        enum oom_constraint constraint = CONSTRAINT_NONE;
+       int killed = 0;
 
        blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
        if (freed > 0)
@@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
                if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
                                NULL, nodemask,
                                "Out of memory (oom_kill_allocating_task)"))
-                       return;
+                       goto out;
        }
 
 retry:
@@ -692,7 +692,7 @@ retry:
                        constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
                                                                 NULL);
        if (PTR_ERR(p) == -1UL)
-               return;
+               goto out;
 
        /* Found nothing?!?! Either we hang forever, or we panic. */
        if (!p) {
@@ -704,13 +704,15 @@ retry:
        if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
                                nodemask, "Out of memory"))
                goto retry;
+       killed = 1;
+out:
        read_unlock(&tasklist_lock);
 
        /*
         * Give "p" a good chance of killing itself before we
         * retry to allocate memory unless "p" is current
         */
-       if (!test_thread_flag(TIF_MEMDIE))
+       if (killed && !test_thread_flag(TIF_MEMDIE))
                schedule_timeout_uninterruptible(1);
 }
 
index 7262aacea8a201c073bd7ee5522f2e4b59fec937..c09ef5219cbe36f267a37f55d6fc670815082522 100644 (file)
@@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping,
                spin_unlock_irq(&mapping->tree_lock);
                WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
                cond_resched();
-       } while (tagged >= WRITEBACK_TAG_BATCH);
+               /* We check 'start' to handle wrapping when end == ~0UL */
+       } while (tagged >= WRITEBACK_TAG_BATCH && start);
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
index 443c161eb8bdd0dd4c988a9bab0d6d387fe2f0e3..3376d7657185324adf2afef79984369bd16984ff 100644 (file)
@@ -18,10 +18,11 @@ config SUNRPC_XPRT_RDMA
          If unsure, say N.
 
 config RPCSEC_GSS_KRB5
-       tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
-       depends on SUNRPC && EXPERIMENTAL
+       tristate
+       depends on SUNRPC && CRYPTO
+       prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4)
+       default y
        select SUNRPC_GSS
-       select CRYPTO
        select CRYPTO_MD5
        select CRYPTO_DES
        select CRYPTO_CBC
@@ -34,7 +35,7 @@ config RPCSEC_GSS_KRB5
          available from http://linux-nfs.org/.  In addition, user-space
          Kerberos support should be installed.
 
-         If unsure, say N.
+         If unsure, say Y.
 
 config RPCSEC_GSS_SPKM3
        tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
index e5e28d1946a41a852a558caa83ca49c68265bed7..2ac3f6e8adffaf74a7480961997bb9fb97201e98 100644 (file)
@@ -249,6 +249,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
        req->rl_nchunks = nchunks;
 
        BUG_ON(nchunks == 0);
+       BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
+              && (nchunks > 3));
 
        /*
         * finish off header. If write, marshal discrim and nchunks.
index 27015c6d8eb58311ed88264e86313fc3879ad0c2..5f4c7b3bc7114c597703c2739a427815c3be741c 100644 (file)
@@ -650,10 +650,22 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        ep->rep_attr.cap.max_send_wr = cdata->max_requests;
        switch (ia->ri_memreg_strategy) {
        case RPCRDMA_FRMR:
-               /* Add room for frmr register and invalidate WRs */
-               ep->rep_attr.cap.max_send_wr *= 3;
-               if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr)
-                       return -EINVAL;
+               /* Add room for frmr register and invalidate WRs.
+                * 1. FRMR reg WR for head
+                * 2. FRMR invalidate WR for head
+                * 3. FRMR reg WR for pagelist
+                * 4. FRMR invalidate WR for pagelist
+                * 5. FRMR reg WR for tail
+                * 6. FRMR invalidate WR for tail
+                * 7. The RDMA_SEND WR
+                */
+               ep->rep_attr.cap.max_send_wr *= 7;
+               if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
+                       cdata->max_requests = devattr.max_qp_wr / 7;
+                       if (!cdata->max_requests)
+                               return -EINVAL;
+                       ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7;
+               }
                break;
        case RPCRDMA_MEMWINDOWS_ASYNC:
        case RPCRDMA_MEMWINDOWS:
@@ -1490,7 +1502,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
        memset(&frmr_wr, 0, sizeof frmr_wr);
        frmr_wr.opcode = IB_WR_FAST_REG_MR;
        frmr_wr.send_flags = 0;                 /* unsignaled */
-       frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma;
+       frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
        frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
        frmr_wr.wr.fast_reg.page_list_len = i;
        frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
index 49a62f0c4b87dc7f5a92223a7ecf67699e246e37..b6309db5622689deaf1c76a6acb1f6a09a02602c 100644 (file)
@@ -1305,10 +1305,11 @@ static void xs_tcp_state_change(struct sock *sk)
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
-       dprintk("RPC:       state %x conn %d dead %d zapped %d\n",
+       dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
                        sk->sk_state, xprt_connected(xprt),
                        sock_flag(sk, SOCK_DEAD),
-                       sock_flag(sk, SOCK_ZAPPED));
+                       sock_flag(sk, SOCK_ZAPPED),
+                       sk->sk_shutdown);
 
        switch (sk->sk_state) {
        case TCP_ESTABLISHED:
@@ -1779,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
 {
        unsigned int state = transport->inet->sk_state;
 
-       if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
-               return;
-       if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
-               return;
+       if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
+               /* we don't need to abort the connection if the socket
+                * hasn't undergone a shutdown
+                */
+               if (transport->inet->sk_shutdown == 0)
+                       return;
+               dprintk("RPC:       %s: TCP_CLOSEd and sk_shutdown set to %d\n",
+                               __func__, transport->inet->sk_shutdown);
+       }
+       if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
+               /* we don't need to abort the connection if the socket
+                * hasn't undergone a shutdown
+                */
+               if (transport->inet->sk_shutdown == 0)
+                       return;
+               dprintk("RPC:       %s: ESTABLISHED/SYN_SENT "
+                               "sk_shutdown set to %d\n",
+                               __func__, transport->inet->sk_shutdown);
+       }
        xs_abort_connection(xprt, transport);
 }
 
index 642eef3f633624413dccda3ab85b1eb6e2bcc578..178061e87ffe47d39ac97a247071d896ff080e8a 100644 (file)
@@ -44,10 +44,17 @@ static struct kfifo test;
 static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE);
 #endif
 
+static const unsigned char expected_result[FIFO_SIZE] = {
+        3,  4,  5,  6,  7,  8,  9,  0,
+        1, 20, 21, 22, 23, 24, 25, 26,
+       27, 28, 29, 30, 31, 32, 33, 34,
+       35, 36, 37, 38, 39, 40, 41, 42,
+};
+
 static int __init testfunc(void)
 {
        unsigned char   buf[6];
-       unsigned char   i;
+       unsigned char   i, j;
        unsigned int    ret;
 
        printk(KERN_INFO "byte stream fifo test start\n");
@@ -73,16 +80,34 @@ static int __init testfunc(void)
        ret = kfifo_in(&test, buf, ret);
        printk(KERN_INFO "ret: %d\n", ret);
 
+       /* skip first element of the fifo */
+       printk(KERN_INFO "skip 1st element\n");
+       kfifo_skip(&test);
+
        /* put values into the fifo until is full */
        for (i = 20; kfifo_put(&test, &i); i++)
                ;
 
        printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
 
-       /* print out all values in the fifo */
-       while (kfifo_get(&test, &i))
-               printk("%d ", i);
-       printk("\n");
+       /* show the first value without removing from the fifo */
+       if (kfifo_peek(&test, &i))
+               printk(KERN_INFO "%d\n", i);
+
+       /* check the correctness of all values in the fifo */
+       j = 0;
+       while (kfifo_get(&test, &i)) {
+               printk(KERN_INFO "item = %d\n", i);
+               if (i != expected_result[j++]) {
+                       printk(KERN_WARNING "value mismatch: test failed\n");
+                       return -EIO;
+               }
+       }
+       if (j != ARRAY_SIZE(expected_result)) {
+               printk(KERN_WARNING "size mismatch: test failed\n");
+               return -EIO;
+       }
+       printk(KERN_INFO "test passed\n");
 
        return 0;
 }
@@ -138,7 +163,12 @@ static int __init example_init(void)
 #else
        INIT_KFIFO(test);
 #endif
-       testfunc();
+       if (testfunc() < 0) {
+#ifdef DYNAMIC
+               kfifo_free(&test);
+#endif
+               return -EIO;
+       }
 
        if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
index b9482c28b41aa0a54cb24c483a84499fe0135d9a..ee03a4f0b64f4361af8c850b3123b8d457bec0bf 100644 (file)
@@ -29,8 +29,8 @@ static int __init example_init(void)
        printk(KERN_INFO "DMA fifo test start\n");
 
        if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
-               printk(KERN_ERR "error kfifo_alloc\n");
-               return 1;
+               printk(KERN_WARNING "error kfifo_alloc\n");
+               return -ENOMEM;
        }
 
        printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
@@ -41,72 +41,99 @@ static int __init example_init(void)
                kfifo_put(&fifo, &i);
 
        /* kick away first byte */
-       ret = kfifo_get(&fifo, &i);
+       kfifo_skip(&fifo);
 
        printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
 
+       /*
+        * Configure the kfifo buffer to receive data from DMA input.
+        *
+        *  .--------------------------------------.
+        *  | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
+        *  |---|------------------|---------------|
+        *   \_/ \________________/ \_____________/
+        *    \          \                  \
+        *     \          \_allocated data   \
+        *      \_*free space*                \_*free space*
+        *
+        * We need two different SG entries: one for the free space area at the
+        * end of the kfifo buffer (19 bytes) and another for the first free
+        * byte at the beginning, after the kfifo_skip().
+        */
+       sg_init_table(sg, ARRAY_SIZE(sg));
        ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
        printk(KERN_INFO "DMA sgl entries: %d\n", ret);
+       if (!ret) {
+               /* fifo is full and no sgl was created */
+               printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
+               return -EIO;
+       }
 
-       /* if 0 was returned, fifo is full and no sgl was created */
-       if (ret) {
-               printk(KERN_INFO "scatterlist for receive:\n");
-               for (i = 0; i < ARRAY_SIZE(sg); i++) {
-                       printk(KERN_INFO
-                       "sg[%d] -> "
-                       "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
-                               i, sg[i].page_link, sg[i].offset, sg[i].length);
+       /* receive data */
+       printk(KERN_INFO "scatterlist for receive:\n");
+       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+               printk(KERN_INFO
+               "sg[%d] -> "
+               "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+                       i, sg[i].page_link, sg[i].offset, sg[i].length);
 
-                       if (sg_is_last(&sg[i]))
-                               break;
-               }
+               if (sg_is_last(&sg[i]))
+                       break;
+       }
 
-               /* but here your code to setup and exectute the dma operation */
-               /* ... */
+       /* put here your code to setup and exectute the dma operation */
+       /* ... */
 
-               /* example: zero bytes received */
-               ret = 0;
+       /* example: zero bytes received */
+       ret = 0;
 
-               /* finish the dma operation and update the received data */
-               kfifo_dma_in_finish(&fifo, ret);
-       }
+       /* finish the dma operation and update the received data */
+       kfifo_dma_in_finish(&fifo, ret);
 
+       /* Prepare to transmit data, example: 8 bytes */
        ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
        printk(KERN_INFO "DMA sgl entries: %d\n", ret);
+       if (!ret) {
+               /* no data was available and no sgl was created */
+               printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
+               return -EIO;
+       }
 
-       /* if 0 was returned, no data was available and no sgl was created */
-       if (ret) {
-               printk(KERN_INFO "scatterlist for transmit:\n");
-               for (i = 0; i < ARRAY_SIZE(sg); i++) {
-                       printk(KERN_INFO
-                       "sg[%d] -> "
-                       "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
-                               i, sg[i].page_link, sg[i].offset, sg[i].length);
+       printk(KERN_INFO "scatterlist for transmit:\n");
+       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+               printk(KERN_INFO
+               "sg[%d] -> "
+               "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+                       i, sg[i].page_link, sg[i].offset, sg[i].length);
 
-                       if (sg_is_last(&sg[i]))
-                               break;
-               }
+               if (sg_is_last(&sg[i]))
+                       break;
+       }
 
-               /* but here your code to setup and exectute the dma operation */
-               /* ... */
+       /* put here your code to setup and exectute the dma operation */
+       /* ... */
 
-               /* example: 5 bytes transmitted */
-               ret = 5;
+       /* example: 5 bytes transmitted */
+       ret = 5;
 
-               /* finish the dma operation and update the transmitted data */
-               kfifo_dma_out_finish(&fifo, ret);
-       }
+       /* finish the dma operation and update the transmitted data */
+       kfifo_dma_out_finish(&fifo, ret);
 
+       ret = kfifo_len(&fifo);
        printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
 
+       if (ret != 7) {
+               printk(KERN_WARNING "size mismatch: test failed");
+               return -EIO;
+       }
+       printk(KERN_INFO "test passed\n");
+
        return 0;
 }
 
 static void __exit example_exit(void)
 {
-#ifdef DYNAMIC
-       kfifo_free(&test);
-#endif
+       kfifo_free(&fifo);
 }
 
 module_init(example_init);
index d6c5b7d9df64ada598c500bc2e1293200fffe006..71b2aabca96aa4d41af62d391a5b6f620fbd6d51 100644 (file)
@@ -44,10 +44,17 @@ static DECLARE_KFIFO_PTR(test, int);
 static DEFINE_KFIFO(test, int, FIFO_SIZE);
 #endif
 
+static const int expected_result[FIFO_SIZE] = {
+        3,  4,  5,  6,  7,  8,  9,  0,
+        1, 20, 21, 22, 23, 24, 25, 26,
+       27, 28, 29, 30, 31, 32, 33, 34,
+       35, 36, 37, 38, 39, 40, 41, 42,
+};
+
 static int __init testfunc(void)
 {
        int             buf[6];
-       int             i;
+       int             i, j;
        unsigned int    ret;
 
        printk(KERN_INFO "int fifo test start\n");
@@ -66,8 +73,13 @@ static int __init testfunc(void)
        ret = kfifo_in(&test, buf, ret);
        printk(KERN_INFO "ret: %d\n", ret);
 
-       for (i = 20; i != 30; i++)
-               kfifo_put(&test, &i);
+       /* skip first element of the fifo */
+       printk(KERN_INFO "skip 1st element\n");
+       kfifo_skip(&test);
+
+       /* put values into the fifo until is full */
+       for (i = 20; kfifo_put(&test, &i); i++)
+               ;
 
        printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
 
@@ -75,10 +87,20 @@ static int __init testfunc(void)
        if (kfifo_peek(&test, &i))
                printk(KERN_INFO "%d\n", i);
 
-       /* print out all values in the fifo */
-       while (kfifo_get(&test, &i))
-               printk("%d ", i);
-       printk("\n");
+       /* check the correctness of all values in the fifo */
+       j = 0;
+       while (kfifo_get(&test, &i)) {
+               printk(KERN_INFO "item = %d\n", i);
+               if (i != expected_result[j++]) {
+                       printk(KERN_WARNING "value mismatch: test failed\n");
+                       return -EIO;
+               }
+       }
+       if (j != ARRAY_SIZE(expected_result)) {
+               printk(KERN_WARNING "size mismatch: test failed\n");
+               return -EIO;
+       }
+       printk(KERN_INFO "test passed\n");
 
        return 0;
 }
@@ -132,7 +154,12 @@ static int __init example_init(void)
                return ret;
        }
 #endif
-       testfunc();
+       if (testfunc() < 0) {
+#ifdef DYNAMIC
+               kfifo_free(&test);
+#endif
+               return -EIO;
+       }
 
        if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
index 32c6e0bda7448b7971ce8b9f921373f1f9f06caa..e68bd16a5da43d010f315264931c40132e84632a 100644 (file)
@@ -55,6 +55,19 @@ typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest;
 static mytest test;
 #endif
 
+static const char *expected_result[] = {
+       "a",
+       "bb",
+       "ccc",
+       "dddd",
+       "eeeee",
+       "ffffff",
+       "ggggggg",
+       "hhhhhhhh",
+       "iiiiiiiii",
+       "jjjjjjjjjj",
+};
+
 static int __init testfunc(void)
 {
        char            buf[100];
@@ -75,6 +88,10 @@ static int __init testfunc(void)
                kfifo_in(&test, buf, i + 1);
        }
 
+       /* skip first element of the fifo */
+       printk(KERN_INFO "skip 1st element\n");
+       kfifo_skip(&test);
+
        printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
 
        /* show the first record without removing from the fifo */
@@ -82,11 +99,22 @@ static int __init testfunc(void)
        if (ret)
                printk(KERN_INFO "%.*s\n", ret, buf);
 
-       /* print out all records in the fifo */
+       /* check the correctness of all values in the fifo */
+       i = 0;
        while (!kfifo_is_empty(&test)) {
                ret = kfifo_out(&test, buf, sizeof(buf));
-               printk(KERN_INFO "%.*s\n", ret, buf);
+               buf[ret] = '\0';
+               printk(KERN_INFO "item = %.*s\n", ret, buf);
+               if (strcmp(buf, expected_result[i++])) {
+                       printk(KERN_WARNING "value mismatch: test failed\n");
+                       return -EIO;
+               }
+       }
+       if (i != ARRAY_SIZE(expected_result)) {
+               printk(KERN_WARNING "size mismatch: test failed\n");
+               return -EIO;
        }
+       printk(KERN_INFO "test passed\n");
 
        return 0;
 }
@@ -142,7 +170,12 @@ static int __init example_init(void)
 #else
        INIT_KFIFO(test);
 #endif
-       testfunc();
+       if (testfunc() < 0) {
+#ifdef DYNAMIC
+               kfifo_free(&test);
+#endif
+               return -EIO;
+       }
 
        if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
index 0171060b5fd654e66434bc9f3ef3ee9064296d35..e67f054860877d676fc4359ef2f670b5d7531479 100755 (executable)
@@ -159,6 +159,7 @@ my $section_regex;  # Find the start of a section
 my $function_regex;    # Find the name of a function
                        #    (return offset and func name)
 my $mcount_regex;      # Find the call site to mcount (return offset)
+my $mcount_adjust;     # Address adjustment to mcount offset
 my $alignment;         # The .align value to use for $mcount_section
 my $section_type;      # Section header plus possible alignment command
 my $can_use_local = 0;         # If we can use local function references
@@ -213,6 +214,7 @@ $section_regex = "Disassembly of section\\s+(\\S+):";
 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
 $section_type = '@progbits';
+$mcount_adjust = 0;
 $type = ".long";
 
 if ($arch eq "x86_64") {
@@ -351,6 +353,9 @@ if ($arch eq "x86_64") {
 } elsif ($arch eq "microblaze") {
     # Microblaze calls '_mcount' instead of plain 'mcount'.
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+} elsif ($arch eq "blackfin") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
+    $mcount_adjust = -4;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -511,7 +516,7 @@ while (<IN>) {
     }
     # is this a call site to mcount? If so, record it to print later
     if ($text_found && /$mcount_regex/) {
-       push(@offsets, hex $1);
+       push(@offsets, (hex $1) + $mcount_adjust);
     }
 }
 
index 96bab9469d487fb8bc66c4724f14c51bf86b295a..19358dc14605bae1422ae00226291751695ba44c 100644 (file)
@@ -62,19 +62,14 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        int deleted, connected;
        int error = 0;
 
-       /* Get the root we want to resolve too */
+       /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
                /* resolve paths relative to chroot */
-               read_lock(&current->fs->lock);
-               root = current->fs->root;
-               /* released below */
-               path_get(&root);
-               read_unlock(&current->fs->lock);
+               get_fs_root(current->fs, &root);
        } else {
                /* resolve paths relative to namespace */
                root.mnt = current->nsproxy->mnt_ns->root;
                root.dentry = root.mnt->mnt_root;
-               /* released below */
                path_get(&root);
        }
 
index 42043f96e54f69d0d2cb2ab1f063a7e638d0786f..4796ddd4e721ae454a02563d713aa235870ece02 100644 (file)
@@ -2170,8 +2170,9 @@ static inline void flush_unauthorized_files(const struct cred *cred,
 
        tty = get_current_tty();
        if (tty) {
-               file_list_lock();
+               spin_lock(&tty_files_lock);
                if (!list_empty(&tty->tty_files)) {
+                       struct tty_file_private *file_priv;
                        struct inode *inode;
 
                        /* Revalidate access to controlling tty.
@@ -2179,14 +2180,16 @@ static inline void flush_unauthorized_files(const struct cred *cred,
                           than using file_has_perm, as this particular open
                           file may belong to another process and we are only
                           interested in the inode-based check here. */
-                       file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list);
+                       file_priv = list_first_entry(&tty->tty_files,
+                                               struct tty_file_private, list);
+                       file = file_priv->file;
                        inode = file->f_path.dentry->d_inode;
                        if (inode_has_perm(cred, inode,
                                           FILE__READ | FILE__WRITE, NULL)) {
                                drop_tty = 1;
                        }
                }
-               file_list_unlock();
+               spin_unlock(&tty_files_lock);
                tty_kref_put(tty);
        }
        /* Reset controlling tty. */
index a3b2a6479246deca30152bbb7f84e049634f2662..134fc6c2e08dc01eeda84a730545b0532f0588fe 100644 (file)
@@ -978,6 +978,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
 {
        if (substream->runtime->trigger_master != substream)
                return 0;
+       /* some drivers might use hw_ptr to recover from the pause -
+          update the hw_ptr now */
+       if (push)
+               snd_pcm_update_hw_ptr(substream);
        /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
         * a delta betwen the current jiffies, this gives a large enough
         * delta, effectively to skip the check once.
index 4203782d7cb79bec7e7bf47157b32ee04de1efee..aff8387c45cf2e2955d71ee0ca3446808402feaa 100644 (file)
@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
 static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
 static int enable_ir[SNDRV_CARDS];
 static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
+static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
 MODULE_PARM_DESC(enable_ir, "Enable IR.");
 module_param_array(subsystem, uint, NULL, 0444);
 MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
+module_param_array(delay_pcm_irq, uint, NULL, 0444);
+MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
 /*
  * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value  Model:SB0400
  */
@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
                                      &emu)) < 0)
                goto error;
        card->private_data = emu;
+       emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
        if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
                goto error;
        if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
index 55b83ef73c630e83b2d098a45cce1f570923df7f..622bace148e3c4e5e4de3efa1936b56245004c69 100644 (file)
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
                evoice->epcm->ccca_start_addr = start_addr + ccis;
                if (extra) {
                        start_addr += ccis;
-                       end_addr += ccis;
+                       end_addr += ccis + emu->delay_pcm_irq;
                }
                if (stereo && !extra) {
                        snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
        /* Assumption that PT is already 0 so no harm overwriting */
        snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
        snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
-       snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
+       snd_emu10k1_ptr_write(emu, PSST, voice,
+                       (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
+                       (send_amount[2] << 24));
        if (emu->card_capabilities->emu_model)
                pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
        else 
@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
        snd_emu10k1_ptr_write(emu, IP, voice, 0);
 }
 
+static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
+               struct snd_emu10k1_pcm *epcm,
+               struct snd_pcm_substream *substream,
+               struct snd_pcm_runtime *runtime)
+{
+       unsigned int ptr, period_pos;
+
+       /* try to sychronize the current position for the interrupt
+          source voice */
+       period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
+       period_pos %= runtime->period_size;
+       ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
+       ptr &= ~0x00ffffff;
+       ptr |= epcm->ccca_start_addr + period_pos;
+       snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
+}
+
 static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                                        int cmd)
 {
@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                /* follow thru */
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
        case SNDRV_PCM_TRIGGER_RESUME:
+               if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+                       snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
                mix = &emu->pcm_mixer[substream->number];
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
 #endif
        /*
        printk(KERN_DEBUG
-              "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
-              ptr, runtime->buffer_size, runtime->period_size);
+              "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
+              (long)ptr, (long)runtime->buffer_size,
+              (long)runtime->period_size);
        */
        return ptr;
 }
index ffb1ddb8dc28ea1d3fbdf574d0c4efbef9772c98..957a311514c8ed9fe34586b518aa463ec279dddd 100644 (file)
@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
        if (snd_BUG_ON(!hdr))
                return NULL;
 
+       idx = runtime->period_size >= runtime->buffer_size ?
+                                       (emu->delay_pcm_irq * 2) : 0;
        mutex_lock(&hdr->block_mutex);
-       blk = search_empty(emu, runtime->dma_bytes);
+       blk = search_empty(emu, runtime->dma_bytes + idx);
        if (blk == NULL) {
                mutex_unlock(&hdr->block_mutex);
                return NULL;
index 31b5d9eeba68655db32a631160bcf95049b4f1d0..c424952a734e0e48546518fe6b47ccbf959e9aee 100644 (file)
@@ -3049,6 +3049,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
                      CXT5066_DELL_LAPTOP),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
index 2cd1ae809e4677a84834654bcdafa3e07ddaf06a..a4dd04524e4391ce7d47f76196b6eeca5e6e9f85 100644 (file)
@@ -19030,6 +19030,7 @@ static int patch_alc888(struct hda_codec *codec)
 /*
  * ALC680 support
  */
+#define ALC680_DIGIN_NID       ALC880_DIGIN_NID
 #define ALC680_DIGOUT_NID      ALC880_DIGOUT_NID
 #define alc680_modes           alc260_modes
 
@@ -19044,23 +19045,93 @@ static hda_nid_t alc680_adc_nids[3] = {
        0x07, 0x08, 0x09
 };
 
+/*
+ * Analog capture ADC cgange
+ */
+static int alc680_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+                                     struct hda_codec *codec,
+                                     unsigned int stream_tag,
+                                     unsigned int format,
+                                     struct snd_pcm_substream *substream)
+{
+       struct alc_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
+       unsigned int pre_mic, pre_line;
+
+       pre_mic  = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+       pre_line = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_LINE]);
+
+       spec->cur_adc_stream_tag = stream_tag;
+       spec->cur_adc_format = format;
+
+       if (pre_mic || pre_line) {
+               if (pre_mic)
+                       snd_hda_codec_setup_stream(codec, 0x08, stream_tag, 0,
+                                                                       format);
+               else
+                       snd_hda_codec_setup_stream(codec, 0x09, stream_tag, 0,
+                                                                       format);
+       } else
+               snd_hda_codec_setup_stream(codec, 0x07, stream_tag, 0, format);
+       return 0;
+}
+
+static int alc680_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+                                     struct hda_codec *codec,
+                                     struct snd_pcm_substream *substream)
+{
+       snd_hda_codec_cleanup_stream(codec, 0x07);
+       snd_hda_codec_cleanup_stream(codec, 0x08);
+       snd_hda_codec_cleanup_stream(codec, 0x09);
+       return 0;
+}
+
+static struct hda_pcm_stream alc680_pcm_analog_auto_capture = {
+       .substreams = 1, /* can be overridden */
+       .channels_min = 2,
+       .channels_max = 2,
+       /* NID is set in alc_build_pcms */
+       .ops = {
+               .prepare = alc680_capture_pcm_prepare,
+               .cleanup = alc680_capture_pcm_cleanup
+       },
+};
+
 static struct snd_kcontrol_new alc680_base_mixer[] = {
        /* output mixer control */
        HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
        HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
        HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
        HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
        HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+       HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
        { }
 };
 
-static struct snd_kcontrol_new alc680_capture_mixer[] = {
-       HDA_CODEC_VOLUME("Capture Volume", 0x07, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x07, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x09, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x09, 0x0, HDA_INPUT),
+static struct hda_bind_ctls alc680_bind_cap_vol = {
+       .ops = &snd_hda_bind_vol,
+       .values = {
+               HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+               0
+       },
+};
+
+static struct hda_bind_ctls alc680_bind_cap_switch = {
+       .ops = &snd_hda_bind_sw,
+       .values = {
+               HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+               0
+       },
+};
+
+static struct snd_kcontrol_new alc680_master_capture_mixer[] = {
+       HDA_BIND_VOL("Capture Volume", &alc680_bind_cap_vol),
+       HDA_BIND_SW("Capture Switch", &alc680_bind_cap_switch),
        { } /* end */
 };
 
@@ -19068,25 +19139,73 @@ static struct snd_kcontrol_new alc680_capture_mixer[] = {
  * generic initialization of ADC, input mixers and output mixers
  */
 static struct hda_verb alc680_init_verbs[] = {
-       /* Unmute DAC0-1 and set vol = 0 */
-       {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+       {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0},
-       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
-       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20},
+       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
 
        {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+
+       {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT   | AC_USRSP_EN},
+       {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT  | AC_USRSP_EN},
+
        { }
 };
 
+/* toggle speaker-output according to the hp-jack state */
+static void alc680_base_setup(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       spec->autocfg.hp_pins[0] = 0x16;
+       spec->autocfg.speaker_pins[0] = 0x14;
+       spec->autocfg.speaker_pins[1] = 0x15;
+       spec->autocfg.input_pins[AUTO_PIN_MIC] = 0x18;
+       spec->autocfg.input_pins[AUTO_PIN_LINE] = 0x19;
+}
+
+static void alc680_rec_autoswitch(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
+       unsigned int present;
+       hda_nid_t new_adc;
+
+       present = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+
+       new_adc = present ? 0x8 : 0x7;
+       __snd_hda_codec_cleanup_stream(codec, !present ? 0x8 : 0x7, 1);
+       snd_hda_codec_setup_stream(codec, new_adc,
+                                  spec->cur_adc_stream_tag, 0,
+                                  spec->cur_adc_format);
+
+}
+
+static void alc680_unsol_event(struct hda_codec *codec,
+                                          unsigned int res)
+{
+       if ((res >> 26) == ALC880_HP_EVENT)
+               alc_automute_amp(codec);
+       if ((res >> 26) == ALC880_MIC_EVENT)
+               alc680_rec_autoswitch(codec);
+}
+
+static void alc680_inithook(struct hda_codec *codec)
+{
+       alc_automute_amp(codec);
+       alc680_rec_autoswitch(codec);
+}
+
 /* create input playback/capture controls for the given pin */
 static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
                                    const char *ctlname, int idx)
@@ -19197,13 +19316,7 @@ static void alc680_auto_init_hp_out(struct hda_codec *codec)
 #define alc680_pcm_analog_capture      alc880_pcm_analog_capture
 #define alc680_pcm_analog_alt_capture  alc880_pcm_analog_alt_capture
 #define alc680_pcm_digital_playback    alc880_pcm_digital_playback
-
-static struct hda_input_mux alc680_capture_source = {
-       .num_items = 1,
-       .items = {
-               { "Mic", 0x0 },
-       },
-};
+#define alc680_pcm_digital_capture     alc880_pcm_digital_capture
 
 /*
  * BIOS auto configuration
@@ -19218,6 +19331,7 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
                                           alc680_ignore);
        if (err < 0)
                return err;
+
        if (!spec->autocfg.line_outs) {
                if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) {
                        spec->multiout.max_channels = 2;
@@ -19239,8 +19353,6 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
                add_mixer(spec, spec->kctls.list);
 
        add_verb(spec, alc680_init_verbs);
-       spec->num_mux_defs = 1;
-       spec->input_mux = &alc680_capture_source;
 
        err = alc_auto_add_mic_boost(codec);
        if (err < 0)
@@ -19279,17 +19391,17 @@ static struct snd_pci_quirk alc680_cfg_tbl[] = {
 static struct alc_config_preset alc680_presets[] = {
        [ALC680_BASE] = {
                .mixers = { alc680_base_mixer },
-               .cap_mixer =  alc680_capture_mixer,
+               .cap_mixer =  alc680_master_capture_mixer,
                .init_verbs = { alc680_init_verbs },
                .num_dacs = ARRAY_SIZE(alc680_dac_nids),
                .dac_nids = alc680_dac_nids,
-               .num_adc_nids = ARRAY_SIZE(alc680_adc_nids),
-               .adc_nids = alc680_adc_nids,
-               .hp_nid = 0x04,
                .dig_out_nid = ALC680_DIGOUT_NID,
                .num_channel_mode = ARRAY_SIZE(alc680_modes),
                .channel_mode = alc680_modes,
-               .input_mux = &alc680_capture_source,
+               .unsol_event = alc680_unsol_event,
+               .setup = alc680_base_setup,
+               .init_hook = alc680_inithook,
+
        },
 };
 
@@ -19333,9 +19445,9 @@ static int patch_alc680(struct hda_codec *codec)
                setup_preset(codec, &alc680_presets[board_config]);
 
        spec->stream_analog_playback = &alc680_pcm_analog_playback;
-       spec->stream_analog_capture = &alc680_pcm_analog_capture;
-       spec->stream_analog_alt_capture = &alc680_pcm_analog_alt_capture;
+       spec->stream_analog_capture = &alc680_pcm_analog_auto_capture;
        spec->stream_digital_playback = &alc680_pcm_digital_playback;
+       spec->stream_digital_capture = &alc680_pcm_digital_capture;
 
        if (!spec->adc_nids) {
                spec->adc_nids = alc680_adc_nids;
index f64fb7d988cb57f63876bbc92f0b94d93beb9cfe..ad5202efd7a9e270523cc517e487c6e1997027e3 100644 (file)
@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
                    firmware.firmware.ASIC, firmware.firmware.CODEC,
                    firmware.firmware.AUXDSP, firmware.firmware.PROG);
 
+       if (!chip)
+               return 1;
+
        for (i = 0; i < FIRMWARE_VERSIONS; i++) {
                if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
-                       break;
-       }
-       if (i >= FIRMWARE_VERSIONS)
-               return 0; /* no match */
+                       return 1; /* OK */
 
-       if (!chip)
-               return 1; /* OK */
+       }
 
        snd_printdd("Writing Firmware\n");
        if (!chip->fw_entry) {
index 4e212ed62ea609be9e1b3d45a5ac3fb6efea1a38..f8154e661524c64e1098f318d447ba7bfcda3616 100644 (file)
@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_LEFT_J:
                iface |= 0x0001;
                break;
-               /* FIXME: CHECK A/B */
-       case SND_SOC_DAIFMT_DSP_A:
-               iface |= 0x0003;
-               break;
-       case SND_SOC_DAIFMT_DSP_B:
-               iface |= 0x0007;
-               break;
        default:
                return -EINVAL;
        }
index 41abb90df50d1fdf97a062e5be387613c7131ee6..4f1fa77c1feb0b7a854ab8a85bd21682cbc66377 100644 (file)
@@ -5,6 +5,12 @@ endif
 # The default target of this Makefile is...
 all::
 
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
+
 # Define V=1 to have a more verbose compile.
 # Define V=2 to have an even more verbose compile.
 #
@@ -157,10 +163,6 @@ all::
 #
 # Define NO_DWARF if you do not want debug-info analysis feature at all.
 
-$(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null)
-$(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null)
-$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
-
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
 -include $(OUTPUT)PERF-VERSION-FILE
@@ -186,8 +188,6 @@ ifeq ($(ARCH),x86_64)
         ARCH := x86
 endif
 
-$(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null)
-
 # CFLAGS and LDFLAGS are for the users to override from the command line.
 
 #
@@ -268,6 +268,7 @@ export prefix bindir sharedir sysconfdir
 CC = $(CROSS_COMPILE)gcc
 AR = $(CROSS_COMPILE)ar
 RM = rm -f
+MKDIR = mkdir
 TAR = tar
 FIND = find
 INSTALL = install
@@ -838,6 +839,7 @@ ifndef V
        QUIET_CC       = @echo '   ' CC $@;
        QUIET_AR       = @echo '   ' AR $@;
        QUIET_LINK     = @echo '   ' LINK $@;
+       QUIET_MKDIR    = @echo '   ' MKDIR $@;
        QUIET_BUILT_IN = @echo '   ' BUILTIN $@;
        QUIET_GEN      = @echo '   ' GEN $@;
        QUIET_SUBDIR0  = +@subdir=
@@ -935,15 +937,15 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
        $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
 
 $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
-       $(QUIET_GEN)$(RM) $$@+ && \
+       $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
        sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
            -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
            -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
            -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
            -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
-           $@.sh >$@+ && \
-       chmod +x $@+ && \
-       mv $@+ $(OUTPUT)$@
+           $@.sh > $(OUTPUT)$@+ && \
+       chmod +x $(OUTPUT)$@+ && \
+       mv $(OUTPUT)$@+ $(OUTPUT)$@
 
 configure: configure.ac
        $(QUIET_GEN)$(RM) $@ $<+ && \
@@ -1012,6 +1014,14 @@ $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
 $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 builtin-revert.o wt-status.o: wt-status.h
 
+# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
+# we depend the various files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+       $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+
 $(LIB_FILE): $(LIB_OBJS)
        $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
 
index ddb68e601f0ec2f67558afef92d3506f7661cd82..7a7b6085905382c791834b0c0f35ed5f39658e26 100644 (file)
@@ -113,7 +113,7 @@ endef
 # try-cc
 # Usage: option = $(call try-cc, source-to-build, cc-options)
 try-cc = $(shell sh -c                                           \
-       'TMP="$(TMPOUT).$$$$";                                    \
+       'TMP="$(OUTPUT)$(TMPOUT).$$$$";                           \
         echo "$(1)" |                                            \
         $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
         rm -f "$$TMP"')
index 55ff792459accf7a054a01b7d7b3dba55cc64281..a90273e63f4fb6939ea64e074513e1afabb1f289 100644 (file)
@@ -146,6 +146,7 @@ static int annotate_browser__run(struct annotate_browser *self,
                return -1;
 
        newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
 
        nd = self->curr_hot;
        if (nd) {
@@ -178,7 +179,7 @@ static int annotate_browser__run(struct annotate_browser *self,
        }
 out:
        ui_browser__hide(&self->b);
-       return 0;
+       return es->u.key;
 }
 
 int hist_entry__tui_annotate(struct hist_entry *self)