]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Sep 2013 20:44:09 +0000 (13:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Sep 2013 20:44:09 +0000 (13:44 -0700)
Pull i2c fixes from Wolfram Sang:
 "Some driver bugfixes for the I2C subsystem"

* 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: ismt: initialize DMA buffer
  i2c: designware: 10-bit addressing mode enabling if I2C_DYNAMIC_TAR_UPDATE is set
  i2c: mv64xxx: Do not use writel_relaxed()
  i2c: mv64xxx: Fix some build warnings
  i2c: s3c2410: fix clk_disable/clk_unprepare WARNings

134 files changed:
CREDITS
Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt [moved from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt with 93% similarity]
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/kernel-parameters.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
arch/Kconfig
arch/arm/Kconfig
arch/arm/crypto/aes-armv4.S
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-common.S
arch/mips/include/asm/cpu-features.h
arch/mips/mm/dma-default.c
arch/openrisc/include/asm/prom.h
arch/powerpc/boot/Makefile
arch/powerpc/boot/epapr-wrapper.c [new file with mode: 0644]
arch/powerpc/boot/epapr.c
arch/powerpc/boot/of.c
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/processor.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/lib/sstep.c
arch/powerpc/platforms/pseries/smp.c
arch/s390/Kconfig
arch/s390/include/asm/mutex.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/spinlock.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/reboot.c
arch/x86/platform/efi/efi.c
arch/x86/xen/p2m.c
arch/x86/xen/spinlock.c
drivers/acpi/acpi_ipmi.c
drivers/acpi/scan.c
drivers/ata/sata_promise.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/char/tpm/xen-tpmfront.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/hwmon/applesmc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/dm-io.c
drivers/md/dm-mpath.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stats.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/dm.h
drivers/pci/pci.c
drivers/video/mmp/hw/mmp_ctrl.c
drivers/video/mxsfb.c
drivers/video/neofb.c
drivers/video/of_display_timing.c
drivers/video/omap2/displays-new/Kconfig
drivers/video/omap2/displays-new/connector-analog-tv.c
drivers/video/omap2/displays-new/connector-dvi.c
drivers/video/omap2/displays-new/connector-hdmi.c
drivers/video/omap2/dss/dispc.c
drivers/video/s3fb.c
drivers/xen/balloon.c
fs/bio.c
fs/ocfs2/super.c
fs/reiserfs/journal.c
fs/udf/ialloc.c
fs/udf/super.c
fs/udf/udf_sb.h
include/linux/device-mapper.h
include/linux/memcontrol.h
include/linux/mutex.h
include/linux/of_irq.h
include/linux/smp.h
include/uapi/linux/perf_event.h
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/audit.c
kernel/events/core.c
kernel/params.c
kernel/reboot.c
kernel/sched/fair.c
kernel/watchdog.c
lib/kobject.c
lib/lockref.c
mm/memcontrol.c
mm/mlock.c
mm/vmscan.c
scripts/checkpatch.pl
sound/core/compress_offload.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
tools/lib/lk/debugfs.c
tools/perf/arch/x86/util/tsc.c
tools/perf/builtin-inject.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/feature-tests.mak
tools/perf/util/annotate.c
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/symbol-elf.c
tools/perf/util/trace-event-parse.c

diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e6c4404c80ade376feff7b6df406a..0640e16504832e43c2d3b9bb82b6b5fe3f5bea48 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
 S: Canada K2P 0X8
 
 N: Mikael Pettersson
-E: mikpe@it.uu.se
-W: http://user.it.uu.se/~mikpe/linux/
+E: mikpelinux@gmail.com
 D: Miscellaneous fixes
 
 N: Reed H. Petty
index 6d1c0988cfc7c7a0466d9f0cc18cc7feacdb0d2b..c67b975c89063f51fa20ae563f601c7c6113fe08 100644 (file)
@@ -1,11 +1,11 @@
-* Samsung Exynos specific extensions to the Synopsis Designware Mobile
+* Samsung Exynos specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
index 8a3d91d47b6af2e3e99d6182e25f21007b129f94..c559f3f36309e57f1eccda86e926771047960cbb 100644 (file)
@@ -1,11 +1,11 @@
-* Rockchip specific extensions to the Synopsis Designware Mobile
+* Rockchip specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
similarity index 93%
rename from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
rename to Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f55f55e87f7647d2d8670c04e470b4..066a78b034ca8589e4de4d31183075768496f81a 100644 (file)
@@ -1,14 +1,14 @@
-* Synopsis Designware Mobile Storage Host Controller
+* Synopsys Designware Mobile Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsis Designware Mobile Storage Host Controller.
+properties used by the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
 * compatible: should be
-       - snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
+       - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
 * #address-cells: should be 1.
 * #size-cells: should be 0.
 
index eabcb4b5db6e6711b244ea9a35e7b4ff711c12ef..e216af356847c05ac4ab9f2e9a6ae887b1939f2a 100644 (file)
@@ -1,4 +1,4 @@
-* Synopsis Designware PCIe interface
+* Synopsys Designware PCIe interface
 
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
index 1a036cd972fb0c205109ed66b968c3d771f66418..539a236319907739faded777280b9352a84a284f 100644 (file)
@@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                the unplug protocol
                        never -- do not unplug even if version check succeeds
 
+       xen_nopvspin    [X86,XEN]
+                       Disables the ticketlock slowpath using Xen PV
+                       optimizations.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index a46ddb85e83a0dcdf0f2a54fd9b745eadb31cb6f..f911e3656209f6c1bdc49a156f322b64dfd3a824 100644 (file)
@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
   imac27       IMac 27 Inch
   auto         BIOS setup (default)
 
+Cirrus Logic CS4208
+===================
+  mba6         MacBook Air 6,1 and 6,2
+  gpio0                Enable GPIO 0 amp
+  auto         BIOS setup (default)
+
 VIA VT17xx/VT18xx/VT20xx
 ========================
   auto         BIOS setup (default)
index e61c2e83fc2b3b1f7570cff0cc6dc12d87aa8059..c53fe9559642cc3eb34477254af3729b3651c418 100644 (file)
@@ -1812,7 +1812,8 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
-M:     Christian Daudt <csd@broadcom.com>
+M:     Christian Daudt <bcm@fixthebug.org>
+L:     bcm-kernel-feedback-list@broadcom.com
 T:     git git://git.github.com/broadcom/bcm11351
 S:     Maintained
 F:     arch/arm/mach-bcm/
@@ -6595,7 +6596,7 @@ S:        Obsolete
 F:     drivers/net/wireless/prism54/
 
 PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M:     Mikael Pettersson <mikpe@it.uu.se>
+M:     Mikael Pettersson <mikpelinux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     drivers/ata/sata_promise.*
index 1feb169274fe613cc5f0360c797668bf84497c4b..af2cc6eabcc781c4e8f7ee2067de75844ed5874d 100644 (file)
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
 config HAVE_ARCH_JUMP_LABEL
        bool
 
-config HAVE_ARCH_MUTEX_CPU_RELAX
-       bool
-
 config HAVE_RCU_TABLE_FREE
        bool
 
index 3f7714d8d2d216bf3bbd7b4a5b227ea982997554..1ad6fb6c094db415ec76a72a28356e75bdfd7d17 100644 (file)
@@ -2217,8 +2217,7 @@ config NEON
 
 config KERNEL_MODE_NEON
        bool "Support for NEON in kernel mode"
-       default n
-       depends on NEON
+       depends on NEON && AEABI
        help
          Say Y to include support for NEON in kernel mode.
 
index 19d6cd6f29f98b95962cca5643dca61debdf7b8e..3a14ea8fe97e5cac183ad8c9148b816e473b4ae5 100644 (file)
@@ -148,7 +148,7 @@ AES_Te:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_encrypt)
-       sub     r3,pc,#8                @ AES_encrypt
+       adr     r3,AES_encrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
 .align 5
 ENTRY(private_AES_set_encrypt_key)
 _armv4_AES_set_encrypt_key:
-       sub     r3,pc,#8                @ AES_set_encrypt_key
+       adr     r3,_armv4_AES_set_encrypt_key
        teq     r0,#0
        moveq   r0,#-1
        beq     .Labrt
@@ -843,7 +843,7 @@ AES_Td:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_decrypt)
-       sub     r3,pc,#8                @ AES_decrypt
+       adr     r3,AES_decrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
index 7e1f76027f666e252c35bd320d4518e110548c47..72abdc541f38f6e892a24050d1992fc37098f5e3 100644 (file)
 #include <asm/unified.h>
 #include <asm/compiler.h>
 
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
 
index 74ad15d1a065fba97aac1d4ac43ff33a9e994470..bc6bd9683ba4555d9713e1693b258e6204fc90a5 100644 (file)
@@ -442,10 +442,10 @@ local_restart:
        ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
 
        add     r1, sp, #S_OFF
-       cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
        eor     r0, scno, #__NR_SYSCALL_BASE    @ put OS number back
        bcs     arm_syscall
-2:     mov     why, #0                         @ no longer a real syscall
+       mov     why, #0                         @ no longer a real syscall
        b       sys_ni_syscall                  @ not private func
 
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
index 51680d15ca8ec2b2971365573506b39f473170aa..d445d060e346ab689a177bf584cfebd5b9dfca51 100644 (file)
 
 /*
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  */
index f25a7e9f8cbc56e320df0c6c51cc7bf8a45cb40e..5f8b955125801935f33370559476ca93f640c4df 100644 (file)
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (cpu_needs_post_dma_flush(dev))
+       if (cpu_needs_post_dma_flush(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
index eb59bfe23e8510c7c812d43496860bcee37974b7..93c9980e1b6b61a93c81dc93a4c0902ee59a451c 100644 (file)
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-
-#include <linux/of.h>  /* linux/of.h gets to determine #include ordering */
-
 #ifndef _ASM_OPENRISC_PROM_H
 #define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
 
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev:      the device whose interrupt is to be resolved
- * @out_irq:   structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* _ASM_OPENRISC_PROM_H */
index 6a15c968d21453230ab469b5921fea28b790dcc5..15ca2255f43853945789c1fdc58f4a5bc5a82dbf 100644 (file)
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
 src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
 
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
                                treeboot-walnut.c cuboot-acadia.c \
                                cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
                                        prpmc2800.c
 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644 (file)
index 0000000..c101910
--- /dev/null
@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+                               unsigned long r5, unsigned long r6,
+                               unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       epapr_platform_init(r3, r4, r5, r6, r7);
+}
index 06c1961bd124a06966da1c5c6a85a6d4452e951a..02e91aa2194a57a66766852444835607143fa29a 100644 (file)
@@ -48,8 +48,8 @@ static void platform_fixups(void)
                       fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
 }
 
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
-                  unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7)
 {
        epapr_magic = r6;
        ima_size = r7;
index 61d9899aa0d09d371c99ec70b7959693f8bfab36..62e2f43ec1df1144d3a790e1f3cf3e3263fdca47 100644 (file)
@@ -26,6 +26,9 @@
 
 static unsigned long claim_base;
 
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7);
+
 static void *of_try_claim(unsigned long size)
 {
        unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
        }
 }
 
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
 {
        platform_ops.image_hdr = of_image_hdr;
        platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
                loader_info.initrd_size = a2;
        }
 }
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       /* Detect OF vs. ePAPR boot */
+       if (r5)
+               of_platform_init(r3, r4, (void *)r5);
+       else
+               epapr_platform_init(r3, r4, r5, r6, r7);
+}
+
index 6761c746048df389812888b8430aca4f442e191c..cd7af841ba051f8725e8ab33dffae1a23201fc60 100755 (executable)
@@ -148,18 +148,18 @@ make_space=y
 
 case "$platform" in
 pseries)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x4000000'
     ;;
 maple)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x400000'
     ;;
 pmac|chrp)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     ;;
 coff)
-    platformo="$object/crt0.o $object/of.o"
+    platformo="$object/crt0.o $object/of.o $object/epapr.o"
     lds=$object/zImage.coff.lds
     link_address='0x500000'
     pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
     platformo="$object/treeboot-iss4xx.o"
     ;;
 epapr)
+    platformo="$object/epapr.o $object/epapr-wrapper.o"
     link_address='0x20000000'
     pie=-pie
     ;;
index 0e40843a1c6ed58273c1a0e2eb22841e9007e977..41f13cec8a8fcd01bbd2e02f3cfc50083b3addba 100644 (file)
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
 
 extern void irq_ctx_init(void);
 extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
-                          struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
 extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
 
 int irq_choose_cpu(const struct cpumask *mask);
 
index e378cccfca55bb20db094f12a2009ad3ba7bf1a4..ce4de5aed7b5c302b292bd38c69f21bd2f059038 100644 (file)
@@ -149,8 +149,6 @@ typedef struct {
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
-       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
-
 #ifdef CONFIG_PPC64
        unsigned long   ksp_vsid;
 #endif
@@ -162,6 +160,7 @@ struct thread_struct {
 #endif
 #ifdef CONFIG_PPC32
        void            *pgdir;         /* root of page-table tree */
+       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
 #endif
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        /*
@@ -321,7 +320,6 @@ struct thread_struct {
 #else
 #define INIT_THREAD  { \
        .ksp = INIT_SP, \
-       .ksp_limit = INIT_SP_LIMIT, \
        .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
        .fs = KERNEL_DS, \
        .fpr = {{0}}, \
index d8958be5f31a18b7c0bae16509749067aae75265..502c7a4e73f70dc1008754b7f55e5ae164f25a76 100644 (file)
@@ -80,10 +80,11 @@ int main(void)
        DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
 #else
        DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+       DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 #endif /* CONFIG_PPC64 */
 
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
        DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
index c69440cef7af43413987f3cd4b2c936cd0e8f788..57d286a78f86f6ff1231c695b9a10a591af30710 100644 (file)
@@ -441,50 +441,6 @@ void migrate_irqs(void)
 }
 #endif
 
-static inline void handle_one_irq(unsigned int irq)
-{
-       struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit;
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       if (!desc)
-               return;
-
-       /* Switch to the irq stack to handle this */
-       curtp = current_thread_info();
-       irqtp = hardirq_ctx[smp_processor_id()];
-
-       if (curtp == irqtp) {
-               /* We're already on the irq stack, just handle it */
-               desc->handle_irq(irq, desc);
-               return;
-       }
-
-       saved_sp_limit = current->thread.ksp_limit;
-
-       irqtp->task = curtp->task;
-       irqtp->flags = 0;
-
-       /* Copy the softirq bits in preempt_count so that the
-        * softirq checks work in the hardirq context. */
-       irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-                              (curtp->preempt_count & SOFTIRQ_MASK);
-
-       current->thread.ksp_limit = (unsigned long)irqtp +
-               _ALIGN_UP(sizeof(struct thread_info), 16);
-
-       call_handle_irq(irq, desc, irqtp, desc->handle_irq);
-       current->thread.ksp_limit = saved_sp_limit;
-       irqtp->task = NULL;
-
-       /* Set any flag that may have been set on the
-        * alternate stack
-        */
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
-}
-
 static inline void check_stack_overflow(void)
 {
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
 #endif
 }
 
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct irq_desc *desc;
        unsigned int irq;
 
        irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
         */
        irq = ppc_md.get_irq();
 
-       /* We can hard enable interrupts now */
+       /* We can hard enable interrupts now to allow perf interrupts */
        may_hard_irq_enable();
 
        /* And finally process it */
-       if (irq != NO_IRQ)
-               handle_one_irq(irq);
-       else
+       if (unlikely(irq == NO_IRQ))
                __get_cpu_var(irq_stat).spurious_irqs++;
+       else {
+               desc = irq_to_desc(irq);
+               if (likely(desc))
+                       desc->handle_irq(irq, desc);
+       }
 
        trace_irq_exit(regs);
 
        irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct thread_info *curtp, *irqtp;
+
+       /* Switch to the irq stack to handle this */
+       curtp = current_thread_info();
+       irqtp = hardirq_ctx[raw_smp_processor_id()];
+
+       /* Already there ? */
+       if (unlikely(curtp == irqtp)) {
+               __do_irq(regs);
+               set_irq_regs(old_regs);
+               return;
+       }
+
+       /* Prepare the thread_info in the irq stack */
+       irqtp->task = curtp->task;
+       irqtp->flags = 0;
+
+       /* Copy the preempt_count so that the [soft]irq checks work. */
+       irqtp->preempt_count = curtp->preempt_count;
+
+       /* Switch stack and call */
+       call_do_irq(regs, irqtp);
+
+       /* Restore stack limit */
+       irqtp->task = NULL;
+
+       /* Copy back updates to the thread_info */
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+
        set_irq_regs(old_regs);
 }
 
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
                memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
                tp = softirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = 0;
 
                memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
                tp = hardirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = HARDIRQ_OFFSET;
        }
 }
 
 static inline void do_softirq_onstack(void)
 {
        struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit = current->thread.ksp_limit;
 
        curtp = current_thread_info();
        irqtp = softirq_ctx[smp_processor_id()];
        irqtp->task = curtp->task;
        irqtp->flags = 0;
-       current->thread.ksp_limit = (unsigned long)irqtp +
-                                   _ALIGN_UP(sizeof(struct thread_info), 16);
        call_do_softirq(irqtp);
-       current->thread.ksp_limit = saved_sp_limit;
        irqtp->task = NULL;
 
        /* Set any flag that may have been set on the
index 777d999f563bb377bff3217358aacdc7667f1fd4..2b0ad984536333d7a15a3b39e6c9234c11b4b669 100644 (file)
 
        .text
 
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
 _GLOBAL(call_do_softirq)
        mflr    r0
        stw     r0,4(r1)
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
        stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
        mr      r1,r3
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
        bl      __do_softirq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
        mflr    r0
        stw     r0,4(r1)
-       mtctr   r6
-       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
+       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
+       bl      __do_irq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
index 971d7e78aff20e1ca801dd923dfc77337e834ad5..e59caf874d05ed60e500579b06bfcdf38bd85125 100644 (file)
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
-       ld      r8,0(r6)
+_GLOBAL(call_do_irq)
        mflr    r0
        std     r0,16(r1)
-       mtctr   r8
-       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       bl      .__do_irq
        ld      r1,0(r1)
        ld      r0,16(r1)
        mtlr    r0
index 6f428da53e2085b877334270286069e2ba54da37..96d2fdf3aa9ebe3bba547fd567c5a232be20ec9a 100644 (file)
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
        p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
                                _ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        p->thread.ptrace_bps[0] = NULL;
 #endif
index 12e656ffe60ea86a00a531578efd55ef98e3faef..5fe2842e8bab7cc4013c987c74791c00005c51ee 100644 (file)
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
 
 static cell_t __initdata regbuf[1024];
 
+static bool rtas_has_query_cpu_stopped;
+
 
 /*
  * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
        prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
                     &val, sizeof(val));
 
+       /* Check if it supports "query-cpu-stopped-state" */
+       if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+                        &val, sizeof(val)) != PROM_ERROR)
+               rtas_has_query_cpu_stopped = true;
+
 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
        /* PowerVN takeover hack */
        prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
                = (void *) LOW_ADDR(__secondary_hold_acknowledge);
        unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
 
+       /*
+        * On pseries, if RTAS supports "query-cpu-stopped-state",
+        * we skip this stage, the CPUs will be started by the
+        * kernel using RTAS.
+        */
+       if ((of_platform == PLATFORM_PSERIES ||
+            of_platform == PLATFORM_PSERIES_LPAR) &&
+           rtas_has_query_cpu_stopped) {
+               prom_printf("prom_hold_cpus: skipped\n");
+               return;
+       }
+
        prom_debug("prom_hold_cpus: start...\n");
        prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
        prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         * On non-powermacs, put all CPUs in spin-loops.
         *
         * PowerMacs use a different mechanism to spin CPUs
+        *
+        * (This must be done after instanciating RTAS)
         */
        if (of_platform != PLATFORM_POWERMAC &&
            of_platform != PLATFORM_OPAL)
index a7ee978fb860c9ffd237d9fbafb716b724e3afec..b1faa1593c9067e68995e1cdc54dbb8465dce587 100644 (file)
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                 */
                if ((ra == 1) && !(regs->msr & MSR_PR) \
                        && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
                        /*
                         * Check if we will touch kernel sack overflow
                         */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                                err = -EINVAL;
                                break;
                        }
-
+#endif /* CONFIG_PPC32 */
                        /*
                         * Check if we already set since that means we'll
                         * lose the previous value.
index 1c1771a402501d00328c450cfaeb5942f360d3f4..24f58cb0a543ece1cb92242b36ec8e18c145f5be 100644 (file)
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
 
        alloc_bootmem_cpumask_var(&of_spin_mask);
 
-       /* Mark threads which are still spinning in hold loops. */
-       if (cpu_has_feature(CPU_FTR_SMT)) {
-               for_each_present_cpu(i) { 
-                       if (cpu_thread_in_core(i) == 0)
-                               cpumask_set_cpu(i, of_spin_mask);
-               }
-       } else {
-               cpumask_copy(of_spin_mask, cpu_present_mask);
+       /*
+        * Mark threads which are still spinning in hold loops
+        *
+        * We know prom_init will not have started them if RTAS supports
+        * query-cpu-stopped-state.
+        */
+       if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+               if (cpu_has_feature(CPU_FTR_SMT)) {
+                       for_each_present_cpu(i) {
+                               if (cpu_thread_in_core(i) == 0)
+                                       cpumask_set_cpu(i, of_spin_mask);
+                       }
+               } else
+                       cpumask_copy(of_spin_mask, cpu_present_mask);
+
+               cpumask_clear_cpu(boot_cpuid, of_spin_mask);
        }
 
-       cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
        /* Non-lpar has additional take/give timebase */
        if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
                smp_ops->give_timebase = rtas_give_timebase;
index dcc6ac2d802637ab6e5fb9c004eaa69ba5e01479..7143793859fadf0cbc66c1b2119c5cbaee6368c8 100644 (file)
@@ -93,6 +93,7 @@ config S390
        select ARCH_INLINE_WRITE_UNLOCK_IRQ
        select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
        select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+       select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-       select HAVE_ARCH_MUTEX_CPU_RELAX
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
index 688271f5f2e452b9951599550f33ed0ddcfe0a7c..458c1f7fbc1808d48982aa0c5fe89bfe3df2098c 100644 (file)
@@ -7,5 +7,3 @@
  */
 
 #include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax() barrier()
index 0eb37505cab11c71f083ed508f02c98a72127e95..ca7821f07260301f26c2ff9314432b193ad8bebf 100644 (file)
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
        barrier();
 }
 
+#define arch_mutex_cpu_relax()  barrier()
+
 static inline void psw_set_key(unsigned int key)
 {
        asm volatile("spka 0(%0)" : : "d" (key));
index 701fe8c59e1f0e9efc302e711eef5bcfe3db479a..83e5d216105e86a2df2ab1367bcfc3fd1590542a 100644 (file)
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 extern int arch_spin_trylock_retry(arch_spinlock_t *);
 extern void arch_spin_relax(arch_spinlock_t *lock);
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.owner_cpu == 0;
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lp)
 {
        int old;
index 6aef9fbc09b7a48ec82c13e04b1f8c761dc34661..b913915e8e631f9c9ec2996fde3862bd33e2edce 100644 (file)
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
        return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
 }
 
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
 {
        unsigned long pfn;
-       int ret = 0;
+       int ret;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return mfn;
 
-       if (unlikely(mfn >= machine_to_phys_nr)) {
-               pfn = ~0;
-               goto try_override;
-       }
-       pfn = 0;
+       if (unlikely(mfn >= machine_to_phys_nr))
+               return ~0;
+
        /*
         * The array access can fail (e.g., device space beyond end of RAM).
         * In such cases it doesn't matter what we return (we return garbage),
         * but we must handle the fault without crashing!
         */
        ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
-       /* ret might be < 0 if there are no entries in the m2p for mfn */
        if (ret < 0)
-               pfn = ~0;
-       else if (get_phys_to_machine(pfn) != mfn)
+               return ~0;
+
+       return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+       unsigned long pfn;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return mfn;
+
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) != mfn) {
                /*
                 * If this appears to be a foreign mfn (because the pfn
                 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
                 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
                 */
                pfn = m2p_find_override_pfn(mfn, ~0);
+       }
 
        /* 
         * pfn is ~0 if there are no entries in the m2p for mfn or if the
index 8355c84b9729df767945c5f4c8a5f7c5db15fab8..a9c606bb494529d985abf9243237d365da194105 100644 (file)
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
 
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
-       userpg->cap_usr_time = 0;
-       userpg->cap_usr_time_zero = 0;
-       userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+       userpg->cap_user_time = 0;
+       userpg->cap_user_time_zero = 0;
+       userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
        userpg->pmc_width = x86_pmu.cntval_bits;
 
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
        if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
                return;
 
-       userpg->cap_usr_time = 1;
+       userpg->cap_user_time = 1;
        userpg->time_mult = this_cpu_read(cyc2ns);
        userpg->time_shift = CYC2NS_SCALE_FACTOR;
        userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 
        if (sched_clock_stable && !check_tsc_disabled()) {
-               userpg->cap_usr_time_zero = 1;
+               userpg->cap_user_time_zero = 1;
                userpg->time_zero = this_cpu_read(cyc2ns_offset);
        }
 }
index 9db76c31b3c311bf1f78633f87bef6853cb3db6b..f31a1655d1ff5bd602239e211b14bdd28d95a79a 100644 (file)
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 55: /* Atom 22nm "Silvermont" */
+       case 77: /* Avoton "Silvermont" */
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
index 8ed44589b0e486eda5a45efa74f3f4d3836a730f..4118f9f683151e99402740f1882b205a261a1465 100644 (file)
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
        box->hrtimer.function = uncore_pmu_hrtimer;
 }
 
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
 {
        struct intel_uncore_box *box;
        int i, size;
 
        size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
 
-       box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+       box = kzalloc_node(size, GFP_KERNEL, node);
        if (!box)
                return NULL;
 
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
        struct intel_uncore_box *fake_box;
        int ret = -EINVAL, n;
 
-       fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+       fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
        if (!fake_box)
                return -ENOMEM;
 
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        }
 
        type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
-       box = uncore_alloc_box(type, 0);
+       box = uncore_alloc_box(type, NUMA_NO_NODE);
        if (!box)
                return -ENOMEM;
 
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
                        if (pmu->func_id < 0)
                                pmu->func_id = j;
 
-                       box = uncore_alloc_box(type, cpu);
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
                        if (!box)
                                return -ENOMEM;
 
index 563ed91e6faa3a2adac62ddeb8caff7e5ad50f24..e643e744e4d8bf855ce1889bdae58b51001aab45 100644 (file)
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
        },
        {       /* Handle problems with rebooting on the Precision M6600. */
                .callback = set_pci_reboot,
-               .ident = "Dell OptiPlex 990",
+               .ident = "Dell Precision M6600",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
                },
        },
+       {       /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
+       {       /* Some C6100 machines were shipped with vendor being 'Dell'. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
        { }
 };
 
index 90f6ed127096566ab06c0a9e6f25a355ccdfc048..c7e22ab29a5a2eb6ce3dd75f0fd3e0b26be2d61b 100644 (file)
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_CODE &&
-                   md->type != EFI_BOOT_SERVICES_DATA)
-                       continue;
+               if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+                       if (md->type != EFI_BOOT_SERVICES_CODE &&
+                           md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+                               continue;
+               }
 
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
index 8b901e8d782dadf00091ac88a6fc859c54c4d1de..a61c7d5811beac47e2549cd6fd44118bc5bf7b28 100644 (file)
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
         * frontend pages while they are being shared with the backend,
         * because mfn_to_pfn (that ends up being called by GUPF) will
         * return the backend pfn rather than the frontend pfn. */
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == mfn)
                set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
 
        return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
         * the original pfn causes mfn_to_pfn(mfn) to return the frontend
         * pfn again. */
        mfn &= ~FOREIGN_FRAME_BIT;
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
                        m2p_find_override(mfn) == NULL)
                set_phys_to_machine(pfn, mfn);
 
index 253f63fceea104978c6e5d1f54ba81e1e3581b20..be6b8607895748304c860c61603d24a29bacfa34 100644 (file)
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
 }
 
 
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
 void __init xen_init_spinlocks(void)
 {
 
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
                return;
        }
 
-       static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
 }
 
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+       if (!xen_pvspin)
+               return 0;
+
+       static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
 static __init int xen_parse_nopvspin(char *arg)
 {
        xen_pvspin = false;
index f40acef80269fa9e6a38435b1c328a7c80552b34..a6977e12d5745ab5f2015e2f2879174bddd64bc0 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/ipmi.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
+#include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
        struct list_head head;
        /* the IPMI request message list */
        struct list_head tx_msg_list;
-       struct mutex    tx_msg_lock;
+       spinlock_t      tx_msg_lock;
        acpi_handle handle;
        struct pnp_dev *pnp_dev;
        ipmi_user_t     user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
        struct kernel_ipmi_msg *msg;
        struct acpi_ipmi_buffer *buffer;
        struct acpi_ipmi_device *device;
+       unsigned long flags;
 
        msg = &tx_msg->tx_message;
        /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 
        /* Get the msgid */
        device = tx_msg->device;
-       mutex_lock(&device->tx_msg_lock);
+       spin_lock_irqsave(&device->tx_msg_lock, flags);
        device->curr_msgid++;
        tx_msg->tx_msgid = device->curr_msgid;
-       mutex_unlock(&device->tx_msg_lock);
+       spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
        int msg_found = 0;
        struct acpi_ipmi_msg *tx_msg;
        struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+       unsigned long flags;
 
        if (msg->user != ipmi_device->user_interface) {
                dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
                ipmi_free_recv_msg(msg);
                return;
        }
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
                if (msg->msgid == tx_msg->tx_msgid) {
                        msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
                }
        }
 
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        if (!msg_found) {
                dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
                        "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        struct acpi_ipmi_device *ipmi_device = handler_context;
        int err, rem_time;
        acpi_status status;
+       unsigned long flags;
        /*
         * IPMI opregion message.
         * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
                return AE_NO_MEMORY;
 
        acpi_format_ipmi_msg(tx_msg, address, value);
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        err = ipmi_request_settime(ipmi_device->user_interface,
                                        &tx_msg->addr,
                                        tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        status = AE_OK;
 
 end_label:
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_del(&tx_msg->head);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        kfree(tx_msg);
        return status;
 }
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
 
        INIT_LIST_HEAD(&ipmi_device->head);
 
-       mutex_init(&ipmi_device->tx_msg_lock);
+       spin_lock_init(&ipmi_device->tx_msg_lock);
        INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
        ipmi_install_space_handler(ipmi_device);
 
index fbdb82e70d10623c0bbf94aa73723a40fd32e77d..611ce9061dc54c409160538df58fc3eb2c6c4676 100644 (file)
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
 EXPORT_SYMBOL(acpi_bus_register_driver);
 
 /**
- * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
  * @driver: driver to unregister
  *
  * Unregisters a driver with the ACPI bus.  Searches the namespace for all
index 958ba2a420c34b0e5411b116d639214e700f5c47..97f4acb54ad626795972ffb8e35572101d05d08d 100644 (file)
@@ -2,7 +2,7 @@
  *  sata_promise.c - Promise SATA
  *
  *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Mikael Pettersson <mikpe@it.uu.se>
+ *                 Mikael Pettersson
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index d2d95ff5353b08a4f49d1c3da4179090751851bf..edfa2515bc8613f952c448194bfcebf7835d75c1 100644 (file)
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
        int err;
        u32 cp;
 
+       memset(&arg64, 0, sizeof(arg64));
        err = 0;
        err |=
            copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
index 639d26b90b9117a56c69f991663f603847cc206c..2b944038453681ef15ba61f41e1cfa3a9e885fbe 100644 (file)
@@ -1193,6 +1193,7 @@ out_passthru:
                ida_pci_info_struct pciinfo;
 
                if (!arg) return -EINVAL;
+               memset(&pciinfo, 0, sizeof(pciinfo));
                pciinfo.bus = host->pci_dev->bus->number;
                pciinfo.dev_fn = host->pci_dev->devfn;
                pciinfo.board_id = host->board_id;
index 7a7929ba26588dbf07d014bf8bb3cf8f55da509a..06189e55b4e5a0479d2eaab0a31610ef91bf472f 100644 (file)
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        return length;
 }
 
-ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 locality = priv->shr->locality;
-
-       return sprintf(buf, "%d\n", locality);
-}
-
-ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t len)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 val;
-
-       int rv = kstrtou8(buf, 0, &val);
-       if (rv)
-               return rv;
-
-       priv->shr->locality = val;
-
-       return len;
-}
-
 static const struct file_operations vtpm_ops = {
        .owner = THIS_MODULE,
        .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
-               tpm_store_locality);
 
 static struct attribute *vtpm_attrs[] = {
        &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
        &dev_attr_cancel.attr,
        &dev_attr_durations.attr,
        &dev_attr_timeouts.attr,
-       &dev_attr_locality.attr,
        NULL,
 };
 
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
        .attrs = vtpm_attrs,
 };
 
-#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
-
 static const struct tpm_vendor_specific tpm_vtpm = {
        .status = vtpm_status,
        .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
        .miscdev = {
                .fops = &vtpm_ops,
        },
-       .duration = {
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-       },
 };
 
 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
index a1260b4549db647336192d24b3471b6b627897af..d2c3253e015ee23f107d2d34333c6afa533ab7cf 100644 (file)
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
 {
        int ret;
 
+       /* don't keep reloading if cpufreq_driver exists */
+       if (cpufreq_get_current_driver())
+               return 0;
+
        if (acpi_disabled)
                return 0;
 
index 89b3c52cd5c3a61a7961358dea59009c3ce2de02..04548f7023af68dee6b36fc590ec59f02c0216f7 100644 (file)
@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
 {
        unsigned int ret_freq = 0;
 
+       if (cpufreq_disabled() || !cpufreq_driver)
+               return -ENOENT;
+
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
index d514c152fd1a43041e8ff570fca7f9cc2b28f58d..be5380ecdcd43f95c4aa88a62389c184597f3971 100644 (file)
@@ -457,7 +457,7 @@ err_free_table:
        opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
        of_node_put(np);
-       dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+       dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
        return ret;
 }
 
index b1f8fc69023fd97f9ee2ac28b3aaf35760aa3323..60e84043aa348fe3ec4293507edeb6b9477e3dc2 100644 (file)
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
                reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
                break;
        case DRM_MODE_DPMS_OFF:
-               /* disable audio and video ports */
-               reg_write(encoder, REG_ENA_AP, 0x00);
+               /* disable video ports */
                reg_write(encoder, REG_ENA_VP_0, 0x00);
                reg_write(encoder, REG_ENA_VP_1, 0x00);
                reg_write(encoder, REG_ENA_VP_2, 0x00);
index 62c2e32e25ef6823290380074d50829af30e5751..98814d12a6040e00a9a56d80686d9392c78b3765 100644 (file)
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
 {
        struct applesmc_registers *s = &smcreg;
        bool left_light_sensor, right_light_sensor;
+       unsigned int count;
        u8 tmp[1];
        int ret;
 
        if (s->init_complete)
                return 0;
 
-       ret = read_register_count(&s->key_count);
+       ret = read_register_count(&count);
        if (ret)
                return ret;
 
+       if (s->cache && s->key_count != count) {
+               pr_warn("key count changed from %d to %d\n",
+                       s->key_count, count);
+               kfree(s->cache);
+               s->cache = NULL;
+       }
+       s->key_count = count;
+
        if (!s->cache)
                s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
        if (!s->cache)
index b39f6f0b45f27b89f29e580844e41d8d315d9c18..0f12382aa35d6c939b53967b205639134e181eae 100644 (file)
@@ -498,7 +498,7 @@ struct cached_dev {
         */
        atomic_t                has_dirty;
 
-       struct ratelimit        writeback_rate;
+       struct bch_ratelimit    writeback_rate;
        struct delayed_work     writeback_rate_update;
 
        /*
@@ -507,10 +507,9 @@ struct cached_dev {
         */
        sector_t                last_read;
 
-       /* Number of writeback bios in flight */
-       atomic_t                in_flight;
+       /* Limit number of writeback bios in flight */
+       struct semaphore        in_flight;
        struct closure_with_timer writeback;
-       struct closure_waitlist writeback_wait;
 
        struct keybuf           writeback_keys;
 
index 8010eed06a51c8320786a1c49463c98c8aa70555..22d1ae72c2826a53b67656a59ea717a62a3b95a4 100644 (file)
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
 
 /* Mergesort */
 
+static void sort_key_next(struct btree_iter *iter,
+                         struct btree_iter_set *i)
+{
+       i->k = bkey_next(i->k);
+
+       if (i->k == i->end)
+               *i = iter->data[--iter->used];
+}
+
 static void btree_sort_fixup(struct btree_iter *iter)
 {
        while (iter->used > 1) {
                struct btree_iter_set *top = iter->data, *i = top + 1;
-               struct bkey *k;
 
                if (iter->used > 2 &&
                    btree_iter_cmp(i[0], i[1]))
                        i++;
 
-               for (k = i->k;
-                    k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
-                    k = bkey_next(k))
-                       if (top->k > i->k)
-                               __bch_cut_front(top->k, k);
-                       else if (KEY_SIZE(k))
-                               bch_cut_back(&START_KEY(k), top->k);
-
-               if (top->k < i->k || k == i->k)
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
                        break;
 
-               heap_sift(iter, i - top, btree_iter_cmp);
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+                       heap_sift(iter, i - top, btree_iter_cmp);
+                       continue;
+               }
+
+               if (top->k > i->k) {
+                       if (bkey_cmp(top->k, i->k) >= 0)
+                               sort_key_next(iter, i);
+                       else
+                               bch_cut_front(top->k, i->k);
+
+                       heap_sift(iter, i - top, btree_iter_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+                       bch_cut_back(&START_KEY(i->k), top->k);
+               }
        }
 }
 
index f9764e61978b5749487862b5ab88eb73b13f18ba..f42fc7ed9cd63b14fd4cf54a879dfd9d046584d1 100644 (file)
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
 
        return;
 err:
-       bch_cache_set_error(b->c, "io error reading bucket %lu",
+       bch_cache_set_error(b->c, "io error reading bucket %zu",
                            PTR_BUCKET_NR(b->c, &b->key, 0));
 }
 
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
                return SHRINK_STOP;
 
        /* Return -1 if we can't do anything right now */
-       if (sc->gfp_mask & __GFP_WAIT)
+       if (sc->gfp_mask & __GFP_IO)
                mutex_lock(&c->bucket_lock);
        else if (!mutex_trylock(&c->bucket_lock))
                return -1;
index ba95ab84b2be5a5a32292625d229bb29e051a53d..8435f81e5d858012e8aca6be8204e923a34b1d01 100644 (file)
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
                pr_debug("%u journal buckets", ca->sb.njournal_buckets);
 
-               /* Read journal buckets ordered by golden ratio hash to quickly
+               /*
+                * Read journal buckets ordered by golden ratio hash to quickly
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                                goto bsearch;
                }
 
-               /* If that fails, check all the buckets we haven't checked
+               /*
+                * If that fails, check all the buckets we haven't checked
                 * already
                 */
                pr_debug("falling back to linear search");
 
-               for (l = 0; l < ca->sb.njournal_buckets; l++) {
-                       if (test_bit(l, bitmap))
-                               continue;
-
+               for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+                    l < ca->sb.njournal_buckets;
+                    l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
                        if (read_bucket(l))
                                goto bsearch;
-               }
+
+               if (list_empty(list))
+                       continue;
 bsearch:
                /* Binary search */
                m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
                                r = m;
                }
 
-               /* Read buckets in reverse order until we stop finding more
+               /*
+                * Read buckets in reverse order until we stop finding more
                 * journal entries
                 */
-               pr_debug("finishing up");
+               pr_debug("finishing up: m %u njournal_buckets %u",
+                        m, ca->sb.njournal_buckets);
                l = m;
 
                while (1) {
@@ -228,9 +233,10 @@ bsearch:
                        }
        }
 
-       c->journal.seq = list_entry(list->prev,
-                                   struct journal_replay,
-                                   list)->j.seq;
+       if (!list_empty(list))
+               c->journal.seq = list_entry(list->prev,
+                                           struct journal_replay,
+                                           list)->j.seq;
 
        return 0;
 #undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
                return;
        }
 
-       switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+       switch (atomic_read(&ja->discard_in_flight)) {
        case DISCARD_IN_FLIGHT:
                return;
 
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
                if (cl)
                        BUG_ON(!closure_wait(&w->wait, cl));
 
+               closure_flush(&c->journal.io);
                __journal_try_write(c, true);
        }
 }
index 786a1a4f74d853fafe3ab2ae263d2ecf780134aa..71eb233b9ace7a23f143170e8f4cc924a2a1de9a 100644 (file)
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
        } else {
                bch_writeback_add(dc);
 
-               if (s->op.flush_journal) {
+               if (bio->bi_rw & REQ_FLUSH) {
                        /* Also need to send a flush to the backing device */
-                       s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
-                                                          dc->disk.bio_split);
+                       struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+                                                            dc->disk.bio_split);
 
-                       bio->bi_size = 0;
-                       bio->bi_vcnt = 0;
-                       closure_bio_submit(bio, cl, s->d);
+                       flush->bi_rw    = WRITE_FLUSH;
+                       flush->bi_bdev  = bio->bi_bdev;
+                       flush->bi_end_io = request_endio;
+                       flush->bi_private = cl;
+
+                       closure_bio_submit(flush, cl, s->d);
                } else {
                        s->op.cache_bio = bio;
                }
index 4fe6ab2fbe2ede59644441521aa4cc2f27f5d312..924dcfdae11102256e1ce193eefc01d82cc173cc 100644 (file)
@@ -223,8 +223,13 @@ STORE(__cached_dev)
        }
 
        if (attr == &sysfs_label) {
-               /* note: endlines are preserved */
-               memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+               if (size > SB_LABEL_SIZE)
+                       return -EINVAL;
+               memcpy(dc->sb.label, buf, size);
+               if (size < SB_LABEL_SIZE)
+                       dc->sb.label[size] = '\0';
+               if (size && dc->sb.label[size - 1] == '\n')
+                       dc->sb.label[size - 1] = '\0';
                bch_write_bdev_super(dc, NULL);
                if (dc->disk.c) {
                        memcpy(dc->disk.c->uuids[dc->disk.id].label,
index 98eb81159a22ba9f9e88fec53127e2e338ae3d4d..420dad545c7d8a01e8b5c18d26db4b25677fd334 100644 (file)
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
        stats->last = now ?: 1;
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+/**
+ * bch_next_delay() - increment @d by the amount of work done, and return how
+ * long to delay until the next time to do some work.
+ *
+ * @d - the struct bch_ratelimit to update
+ * @done - the amount of work done, in arbitrary units
+ *
+ * Returns the amount of time to delay by, in jiffies
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 {
        uint64_t now = local_clock();
 
index 1ae2a73ad85f5628b5292b769d79b0f28252f4ca..ea345c6896f47777942b64f88810c99d4cbc278e 100644 (file)
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
        (ewma) >> factor;                                               \
 })
 
-struct ratelimit {
+struct bch_ratelimit {
+       /* Next time we want to do some work, in nanoseconds */
        uint64_t                next;
+
+       /*
+        * Rate at which we want to do work, in units per nanosecond
+        * The units here correspond to the units passed to bch_next_delay()
+        */
        unsigned                rate;
 };
 
-static inline void ratelimit_reset(struct ratelimit *d)
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
 {
        d->next = local_clock();
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
 
 #define __DIV_SAFE(n, d, zero)                                         \
 ({                                                                     \
index 22cbff551628f3c9cff87ccc35563c09974f03b3..ba3ee48320f2a38509adb2603f766c55e67f1da1 100644 (file)
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
+       uint64_t ret;
+
        if (atomic_read(&dc->disk.detaching) ||
            !dc->writeback_percent)
                return 0;
 
-       return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+       ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+
+       return min_t(uint64_t, ret, HZ);
 }
 
 /* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
 
        up_write(&dc->writeback_lock);
 
-       ratelimit_reset(&dc->writeback_rate);
+       bch_ratelimit_reset(&dc->writeback_rate);
 
        /* Punt to workqueue only so we don't recurse and blow the stack */
        continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
        }
 
        bch_keybuf_del(&dc->writeback_keys, w);
-       atomic_dec_bug(&dc->in_flight);
-
-       closure_wake_up(&dc->writeback_wait);
+       up(&dc->in_flight);
 
        closure_return_with_destructor(cl, dirty_io_destructor);
 }
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty_finish, dirty_wq);
+       continue_at(cl, write_dirty_finish, system_wq);
 }
 
 static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty, dirty_wq);
+       continue_at(cl, write_dirty, system_wq);
 }
 
 static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
 
                if (delay > 0 &&
                    (KEY_START(&w->key) != dc->last_read ||
-                    jiffies_to_msecs(delay) > 50)) {
-                       w->private = NULL;
-
-                       closure_delay(&dc->writeback, delay);
-                       continue_at(cl, read_dirty, dirty_wq);
-               }
+                    jiffies_to_msecs(delay) > 50))
+                       delay = schedule_timeout_uninterruptible(delay);
 
                dc->last_read   = KEY_OFFSET(&w->key);
 
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
 
                trace_bcache_writeback(&w->key);
 
-               closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+               down(&dc->in_flight);
+               closure_call(&io->cl, read_dirty_submit, NULL, cl);
 
                delay = writeback_delay(dc, KEY_SIZE(&w->key));
-
-               atomic_inc(&dc->in_flight);
-
-               if (!closure_wait_event(&dc->writeback_wait, cl,
-                                       atomic_read(&dc->in_flight) < 64))
-                       continue_at(cl, read_dirty, dirty_wq);
        }
 
        if (0) {
@@ -442,7 +435,11 @@ err:
                bch_keybuf_del(&dc->writeback_keys, w);
        }
 
-       refill_dirty(cl);
+       /*
+        * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+        * freed) before refilling again
+        */
+       continue_at(cl, refill_dirty, dirty_wq);
 }
 
 /* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
+       sema_init(&dc->in_flight, 64);
        closure_init_unlocked(&dc->writeback);
        init_rwsem(&dc->writeback_lock);
 
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
 
 int __init bch_writeback_init(void)
 {
-       dirty_wq = create_singlethread_workqueue("bcache_writeback");
+       dirty_wq = create_workqueue("bcache_writeback");
        if (!dirty_wq)
                return -ENOMEM;
 
index ea49834377c8e17b6e8b221e4bd58389a8d7c32e..2a20986a2fec9701cd25e443c990f2b7a8479f9f 100644 (file)
@@ -19,8 +19,6 @@
 #define DM_MSG_PREFIX "io"
 
 #define DM_IO_MAX_REGIONS      BITS_PER_LONG
-#define MIN_IOS                16
-#define MIN_BIOS       16
 
 struct dm_io_client {
        mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
 struct dm_io_client *dm_io_client_create(void)
 {
        struct dm_io_client *client;
+       unsigned min_ios = dm_get_reserved_bio_based_ios();
 
        client = kmalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return ERR_PTR(-ENOMEM);
 
-       client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
+       client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
        if (!client->pool)
                goto bad;
 
-       client->bios = bioset_create(MIN_BIOS, 0);
+       client->bios = bioset_create(min_ios, 0);
        if (!client->bios)
                goto bad;
 
index b759a127f9c3718bbfffe2d16ca258fe4afe15e2..de570a55876451a0326d071da4cf68fd772eec2e 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/device-mapper.h>
 
+#include "dm.h"
 #include "dm-path-selector.h"
 #include "dm-uevent.h"
 
@@ -116,8 +117,6 @@ struct dm_mpath_io {
 
 typedef int (*action_fn) (struct pgpath *pgpath);
 
-#define MIN_IOS 256    /* Mempool size */
-
 static struct kmem_cache *_mpio_cache;
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
 static struct multipath *alloc_multipath(struct dm_target *ti)
 {
        struct multipath *m;
+       unsigned min_ios = dm_get_reserved_rq_based_ios();
 
        m = kzalloc(sizeof(*m), GFP_KERNEL);
        if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                INIT_WORK(&m->trigger_event, trigger_event);
                init_waitqueue_head(&m->pg_init_wait);
                mutex_init(&m->work_mutex);
-               m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
+               m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
                        return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
        case -EREMOTEIO:
        case -EILSEQ:
        case -ENODATA:
+       case -ENOSPC:
                return 1;
        }
 
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
        if (!error && !clone->errors)
                return 0;       /* I/O complete */
 
-       if (noretry_error(error))
+       if (noretry_error(error)) {
+               if ((clone->cmd_flags & REQ_WRITE_SAME) &&
+                   !clone->q->limits.max_write_same_sectors) {
+                       struct queue_limits *limits;
+
+                       /* device doesn't really support WRITE SAME, disable it */
+                       limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
+                       limits->max_write_same_sectors = 0;
+               }
                return error;
+       }
 
        if (mpio->pgpath)
                fail_path(mpio->pgpath);
index 3ac415675b6c778b5dd22aaaf4ee6c2dc4ca48eb..4caa8e6d59d7968584e23a9187bc07f8c81df321 100644 (file)
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         */
        INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
-       flush_work(&req.work);
+       flush_workqueue(ps->metadata_wq);
 
        return req.result;
 }
index c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025..aec57d76db5d616c8e692fa95cee58a8f62a0573 100644 (file)
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
  */
 static int init_hash_tables(struct dm_snapshot *s)
 {
-       sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+       sector_t hash_size, cow_dev_size, max_buckets;
 
        /*
         * Calculate based on the size of the original volume or
         * the COW volume...
         */
        cow_dev_size = get_dev_size(s->cow->bdev);
-       origin_dev_size = get_dev_size(s->origin->bdev);
        max_buckets = calc_max_buckets();
 
-       hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+       hash_size = cow_dev_size >> s->store->chunk_shift;
        hash_size = min(hash_size, max_buckets);
 
        if (hash_size < 64)
index 8ae31e8d3d64964652fd1bd8d31d35d3ed49e879..3d404c1371ed2d7e6f4fa052fd4379fbc89ec388 100644 (file)
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
        struct dm_stat_percpu *p;
 
        /*
-        * For strict correctness we should use local_irq_disable/enable
+        * For strict correctness we should use local_irq_save/restore
         * instead of preempt_disable/enable.
         *
-        * This is racy if the driver finishes bios from non-interrupt
-        * context as well as from interrupt context or from more different
-        * interrupts.
+        * preempt_disable/enable is racy if the driver finishes bios
+        * from non-interrupt context as well as from interrupt context
+        * or from more different interrupts.
         *
-        * However, the race only results in not counting some events,
-        * so it is acceptable.
+        * On 64-bit architectures the race only results in not counting some
+        * events, so it is acceptable.  On 32-bit architectures the race could
+        * cause the counter going off by 2^32, so we need to do proper locking
+        * there.
         *
         * part_stat_lock()/part_stat_unlock() have this race too.
         */
+#if BITS_PER_LONG == 32
+       unsigned long flags;
+       local_irq_save(flags);
+#else
        preempt_disable();
+#endif
        p = &s->stat_percpu[smp_processor_id()][entry];
 
        if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
                p->ticks[idx] += duration;
        }
 
+#if BITS_PER_LONG == 32
+       local_irq_restore(flags);
+#else
        preempt_enable();
+#endif
 }
 
 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
index ed063427d676f64b53f3d9569449fc0daaf174b0..2c0cf511ec2385fa5a558b5d2e1e1ed0c874c9f6 100644 (file)
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         * them down to the data device.  The thin device's discard
         * processing will cause mappings to be removed from the btree.
         */
+       ti->discard_zeroes_data_unsupported = true;
        if (pf.discard_enabled && pf.discard_passdown) {
                ti->num_discard_bios = 1;
 
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                 * thin devices' discard limits consistent).
                 */
                ti->discards_supported = true;
-               ti->discard_zeroes_data_unsupported = true;
        }
        ti->private = pt;
 
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
         * They get transferred to the live pool in bind_control_target()
         * called from pool_preresume().
         */
-       if (!pt->adjusted_pf.discard_enabled)
+       if (!pt->adjusted_pf.discard_enabled) {
+               /*
+                * Must explicitly disallow stacking discard limits otherwise the
+                * block layer will stack them if pool's data device has support.
+                * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
+                * user to see that, so make sure to set all discard limits to 0.
+                */
+               limits->discard_granularity = 0;
                return;
+       }
 
        disable_passdown_if_not_supported(pt);
 
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
+       ti->discard_zeroes_data_unsupported = true;
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = true;
                ti->num_discard_bios = 1;
-               ti->discard_zeroes_data_unsupported = true;
                /* Discard bios must be split on a block boundary */
                ti->split_discard_bios = true;
        }
index 6a5e9ed2fcc3eb775268e2e5fc401efa55d6da3d..b3e26c7d141771c74d24726f8b5a56ea2134b156 100644 (file)
@@ -211,10 +211,55 @@ struct dm_md_mempools {
        struct bio_set *bs;
 };
 
-#define MIN_IOS 256
+#define RESERVED_BIO_BASED_IOS         16
+#define RESERVED_REQUEST_BASED_IOS     256
+#define RESERVED_MAX_IOS               1024
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_rq_tio_cache;
 
+/*
+ * Bio-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+                                     unsigned def, unsigned max)
+{
+       unsigned ios = ACCESS_ONCE(*reserved_ios);
+       unsigned modified_ios = 0;
+
+       if (!ios)
+               modified_ios = def;
+       else if (ios > max)
+               modified_ios = max;
+
+       if (modified_ios) {
+               (void)cmpxchg(reserved_ios, ios, modified_ios);
+               ios = modified_ios;
+       }
+
+       return ios;
+}
+
+unsigned dm_get_reserved_bio_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_bio_based_ios,
+                                    RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_rq_based_ios,
+                                    RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
 static int __init local_init(void)
 {
        int r = -ENOMEM;
@@ -2277,6 +2322,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
        return md->immutable_target_type;
 }
 
+/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'.
+ */
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+       BUG_ON(!atomic_read(&md->holders));
+       return &md->queue->limits;
+}
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
 /*
  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
  */
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
 
        if (type == DM_TYPE_BIO_BASED) {
                cachep = _io_cache;
-               pool_size = 16;
+               pool_size = dm_get_reserved_bio_based_ios();
                front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
        } else if (type == DM_TYPE_REQUEST_BASED) {
                cachep = _rq_tio_cache;
-               pool_size = MIN_IOS;
+               pool_size = dm_get_reserved_rq_based_ios();
                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
                /* per_bio_data_size is not used. See __bind_mempools(). */
                WARN_ON(per_bio_data_size != 0);
        } else
                goto out;
 
-       pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
+       pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
        if (!pools->io_pool)
                goto out;
 
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
 
 module_param(major, uint, 0);
 MODULE_PARM_DESC(major, "The major number of the device mapper");
+
+module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
 MODULE_DESCRIPTION(DM_NAME " driver");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 5e604cc7b4aa26c41db96c84c1fa0f306bcb698e..1d1ad7b7e527e671d1265007b25e2e04217509ab 100644 (file)
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
 /*
  * Helpers that are used by DM core
  */
+unsigned dm_get_reserved_bio_based_ios(void);
+unsigned dm_get_reserved_rq_based_ios(void);
+
 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 {
        return !maxlen || strlen(result) + 1 >= maxlen;
index e8ccf6c0f08a3d4d5a9b03aec428efddcba77c6f..bdd64b1b4817f7223fa31db19a118db5a9caaaa5 100644 (file)
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
 
        pci_enable_bridge(dev->bus->self);
 
-       if (pci_is_enabled(dev))
+       if (pci_is_enabled(dev)) {
+               if (!dev->is_busmaster) {
+                       dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+                       pci_set_master(dev);
+               }
                return;
+       }
+
        retval = pci_enable_device(dev);
        if (retval)
                dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
index 75dca19bf2149a968f733ed1ff8a0cd468baa745..6ac755270ab46d1a3ef830c89338c0daab519664 100644 (file)
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
        if (IS_ERR(ctrl->clk)) {
                dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
                ret = -ENOENT;
-               goto failed_get_clk;
+               goto failed;
        }
        clk_prepare_enable(ctrl->clk);
 
@@ -551,21 +551,8 @@ failed_path_init:
                path_deinit(path_plat);
        }
 
-       if (ctrl->clk) {
-               devm_clk_put(ctrl->dev, ctrl->clk);
-               clk_disable_unprepare(ctrl->clk);
-       }
-failed_get_clk:
-       devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+       clk_disable_unprepare(ctrl->clk);
 failed:
-       if (ctrl) {
-               if (ctrl->reg_base)
-                       devm_iounmap(ctrl->dev, ctrl->reg_base);
-               devm_release_mem_region(ctrl->dev, res->start,
-                               resource_size(res));
-               devm_kfree(ctrl->dev, ctrl);
-       }
-
        dev_err(&pdev->dev, "device init failed\n");
 
        return ret;
index d250ed0f806d3bf66d858e9f633852f3baa58f4d..27197a8048c0a7aa9ffcbd3c8a2280ae12866dcc 100644 (file)
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
                break;
        case 3:
                bits_per_pixel = 32;
+               break;
        case 1:
        default:
                return -EINVAL;
index 7ef079c146e7242c9edee33cc43bc0cd3c850ce1..c172a5281f9e6c9369b0cec236e3bbf2abb875ac 100644 (file)
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
                        info->monspecs.modedb, 16)) {
                printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
+               err = -EINVAL;
                goto err_map_video;
        }
 
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
               info->fix.smem_len >> 10, info->var.xres,
               info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
 
-       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+       err = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (err < 0)
                goto err_map_video;
 
        err = register_framebuffer(info);
index 171821ddd78de381a866e9912bece023a8fcaca5..ba5b40f581f6fdfe1849a3d4d62c8f38ea65dab1 100644 (file)
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
                return -EINVAL;
        }
 
-       timing_np = of_find_node_by_name(np, name);
+       timing_np = of_get_child_by_name(np, name);
        if (!timing_np) {
                pr_err("%s: could not find node '%s'\n",
                        of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
        struct display_timings *disp;
 
        if (!np) {
-               pr_err("%s: no devicenode given\n", of_node_full_name(np));
+               pr_err("%s: no device node given\n", of_node_full_name(np));
                return NULL;
        }
 
-       timings_np = of_find_node_by_name(np, "display-timings");
+       timings_np = of_get_child_by_name(np, "display-timings");
        if (!timings_np) {
                pr_err("%s: could not find display-timings node\n",
                        of_node_full_name(np));
index 6c90885b094020f33b74d591db6a93edcbd4aeb7..10b25e7cd878c6e7a5657fd8c1a4865b0935c004 100644 (file)
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
 
 config DISPLAY_PANEL_DSI_CM
        tristate "Generic DSI Command Mode Panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DSI command mode panels.
 
index 1b60698f141ed983e2661743f2a5c79f471a02a6..ccd9073f706f6d59242a5b1866d74dcd2c9bffa6 100644 (file)
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index bc5f8ceda371b1b26308db7c644c6913445c8b18..63d88ee6dfe410469215455795bd4b63b031a040 100644 (file)
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index c5826716d6abbb8b28e85ccd1d70aaa8c0058b44..9abe2c039ae9c44f0ea2cb2a3362c021e9273df4 100644 (file)
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index 02a7340111dfbf4b856122aae670bdbff2c34150..477975009eee87e89c34cc773e2d5a8818f918d6 100644 (file)
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
+       pm_runtime_irq_safe(&pdev->dev);
 
        r = dispc_runtime_get();
        if (r)
index 47ca86c5c6c0b3d1d609a001210e5eb132616076..d838ba829459400acc86388cbeda1bab77f57b24 100644 (file)
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        (info->var.bits_per_pixel * info->var.xres_virtual);
        if (info->var.yres_virtual < info->var.yres) {
                dev_err(info->device, "virtual vertical size smaller than real\n");
-               goto err_find_mode;
-       }
-
-       /* maximize virtual vertical size for fast scrolling */
-       info->var.yres_virtual = info->fix.smem_len * 8 /
-                       (info->var.bits_per_pixel * info->var.xres_virtual);
-       if (info->var.yres_virtual < info->var.yres) {
-               dev_err(info->device, "virtual vertical size smaller than real\n");
+               rc = -EINVAL;
                goto err_find_mode;
        }
 
index a50c6e3a7cc4824db07f43e84732ee64a58b65cb..b232908a61925724bb61bc0f8a09ecbca6b753e6 100644 (file)
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
 
-       scratch_page = get_balloon_scratch_page();
-
        for (i = 0; i < nr_pages; i++) {
                page = alloc_page(gfp);
                if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
                scrub_page(page);
 
+               /*
+                * Ballooned out frames are effectively replaced with
+                * a scratch frame.  Ensure direct mappings and the
+                * p2m are consistent.
+                */
+               scratch_page = get_balloon_scratch_page();
 #ifdef CONFIG_XEN_HAVE_PVMMU
                if (xen_pv_domain() && !PageHighMem(page)) {
                        ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        BUG_ON(ret);
                }
 #endif
-       }
-
-       /* Ensure that ballooned highmem pages don't have kmaps. */
-       kmap_flush_unused();
-       flush_tlb_all();
-
-       /* No more mappings: invalidate P2M and add to balloon. */
-       for (i = 0; i < nr_pages; i++) {
-               pfn = mfn_to_pfn(frame_list[i]);
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        unsigned long p;
                        p = page_to_pfn(scratch_page);
                        __set_phys_to_machine(pfn, pfn_to_mfn(p));
                }
+               put_balloon_scratch_page();
+
                balloon_append(pfn_to_page(pfn));
        }
 
-       put_balloon_scratch_page();
+       /* Ensure that ballooned highmem pages don't have kmaps. */
+       kmap_flush_unused();
+       flush_tlb_all();
 
        set_xen_guest_handle(reservation.extent_start, frame_list);
        reservation.nr_extents   = nr_pages;
index b3b20ed9510e5ccc285195cce7aa7e3f524ed063..ea5035da4d9a0cd9fd6f5f657bb01272c63175c3 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
                src_p = kmap_atomic(src_bv->bv_page);
                dst_p = kmap_atomic(dst_bv->bv_page);
 
-               memcpy(dst_p + dst_bv->bv_offset,
-                      src_p + src_bv->bv_offset,
+               memcpy(dst_p + dst_offset,
+                      src_p + src_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
index 121da2dc3be841e579dd64fdea6bcfb89f5c121c..d4e81e4a9b0489de2eb66899eb23b9d87ea81ae5 100644 (file)
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 {
        int tmp, hangup_needed = 0;
        struct ocfs2_super *osb = NULL;
-       char nodestr[8];
+       char nodestr[12];
 
        trace_ocfs2_dismount_volume(sb);
 
index 73feacc49b2ef3bc2b088f7d74ed05d4195a45ac..fd777032c2ba7551dd10fbf9572289b36d09ad27 100644 (file)
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
        return NULL;
 }
 
-static int newer_jl_done(struct reiserfs_journal_cnode *cn)
-{
-       struct super_block *sb = cn->sb;
-       b_blocknr_t blocknr = cn->blocknr;
-
-       cn = cn->hprev;
-       while (cn) {
-               if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
-                   atomic_read(&cn->jlist->j_commit_left) != 0)
-                                   return 0;
-               cn = cn->hprev;
-       }
-       return 1;
-}
-
 static void remove_journal_hash(struct super_block *,
                                struct reiserfs_journal_cnode **,
                                struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
                reiserfs_warning(s, "clm-2048", "called with wcount %d",
                                 atomic_read(&journal->j_wcount));
        }
-       BUG_ON(jl->j_trans_id == 0);
 
        /* if flushall == 0, the lock is already held */
        if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
        return err;
 }
 
-static int test_transaction(struct super_block *s,
-                            struct reiserfs_journal_list *jl)
-{
-       struct reiserfs_journal_cnode *cn;
-
-       if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
-               return 1;
-
-       cn = jl->j_realblock;
-       while (cn) {
-               /* if the blocknr == 0, this has been cleared from the hash,
-                ** skip it
-                */
-               if (cn->blocknr == 0) {
-                       goto next;
-               }
-               if (cn->bh && !newer_jl_done(cn))
-                       return 0;
-             next:
-               cn = cn->next;
-               cond_resched();
-       }
-       return 0;
-}
-
 static int write_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl,
                                 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
                        break;
                tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
        }
+       get_journal_list(jl);
+       get_journal_list(flush_jl);
        /* try to find a group of blocks we can flush across all the
         ** transactions, but only bother if we've actually spanned
         ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
                ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
        }
        flush_journal_list(s, flush_jl, 1);
+       put_journal_list(s, flush_jl);
+       put_journal_list(s, jl);
        return 0;
 }
 
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
        return 1;
 }
 
-static void flush_old_journal_lists(struct super_block *s)
-{
-       struct reiserfs_journal *journal = SB_JOURNAL(s);
-       struct reiserfs_journal_list *jl;
-       struct list_head *entry;
-       time_t now = get_seconds();
-
-       while (!list_empty(&journal->j_journal_list)) {
-               entry = journal->j_journal_list.next;
-               jl = JOURNAL_LIST_ENTRY(entry);
-               /* this check should always be run, to send old lists to disk */
-               if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
-                   atomic_read(&jl->j_commit_left) == 0 &&
-                   test_transaction(s, jl)) {
-                       flush_used_journal_lists(s, jl);
-               } else {
-                       break;
-               }
-       }
-}
-
 /*
 ** long and ugly.  If flush, will not return until all commit
 ** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
                        }
                }
        }
-       flush_old_journal_lists(sb);
 
        journal->j_current_jl->j_list_bitmap =
            get_list_bitmap(sb, journal->j_current_jl);
index 7e5aae4bf46fd1c1e65da56615238ffe4944fd3b..6eaf5edf1ea1577e88cafc60184963e1b18df5a5 100644 (file)
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       mutex_lock(&sbi->s_alloc_mutex);
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
+       if (lvidiu) {
+               mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(inode->i_mode))
                        le32_add_cpu(&lvidiu->numDirs, -1);
                else
                        le32_add_cpu(&lvidiu->numFiles, -1);
                udf_updated_lvid(sb);
+               mutex_unlock(&sbi->s_alloc_mutex);
        }
-       mutex_unlock(&sbi->s_alloc_mutex);
 
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
+       struct logicalVolIntegrityDescImpUse *lvidiu;
 
        inode = new_inode(sb);
 
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                return NULL;
        }
 
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu;
-
+       lvidiu = udf_sb_lvidiu(sb);
+       if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
                mutex_lock(&sbi->s_alloc_mutex);
-               lvidiu = udf_sb_lvidiu(sbi);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
                else
index 839a2bad7f45b693db4ed478598b997c42077712..91219385691d8f80d1db9aed3973183bb931a48d 100644 (file)
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
 static int udf_statfs(struct dentry *, struct kstatfs *);
 static int udf_show_options(struct seq_file *, struct dentry *);
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
 {
-       struct logicalVolIntegrityDesc *lvid =
-               (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
-       __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
-       __u32 offset = number_of_partitions * 2 *
-                               sizeof(uint32_t)/sizeof(uint8_t);
+       struct logicalVolIntegrityDesc *lvid;
+       unsigned int partnum;
+       unsigned int offset;
+
+       if (!UDF_SB(sb)->s_lvid_bh)
+               return NULL;
+       lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
+       partnum = le32_to_cpu(lvid->numOfPartitions);
+       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
+            offsetof(struct logicalVolIntegrityDesc, impUse)) /
+            (2 * sizeof(uint32_t)) < partnum) {
+               udf_err(sb, "Logical volume integrity descriptor corrupted "
+                       "(numOfPartitions = %u)!\n", partnum);
+               return NULL;
+       }
+       /* The offset is to skip freeSpaceTable and sizeTable arrays */
+       offset = partnum * 2 * sizeof(uint32_t);
        return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
 }
 
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        struct udf_options uopt;
        struct udf_sb_info *sbi = UDF_SB(sb);
        int error = 0;
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       if (sbi->s_lvid_bh) {
-               int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+       if (lvidiu) {
+               int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
                        return -EACCES;
        }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
 
        if (!bh)
                return;
-
-       mutex_lock(&sbi->s_alloc_mutex);
        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
+       mutex_lock(&sbi->s_alloc_mutex);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
 
        if (!bh)
                return;
+       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
        mutex_lock(&sbi->s_alloc_mutex);
-       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 
        if (sbi->s_lvid_bh) {
                struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
-               uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
-               uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               /* uint16_t maxUDFWriteRev =
-                               le16_to_cpu(lvidiu->maxUDFWriteRev); */
+                                                       udf_sb_lvidiu(sb);
+               uint16_t minUDFReadRev;
+               uint16_t minUDFWriteRev;
 
+               if (!lvidiu) {
+                       ret = -EINVAL;
+                       goto error_out;
+               }
+               minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
+               minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (minUDFReadRev > UDF_MAX_READ_VERSION) {
                        udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
-                               le16_to_cpu(lvidiu->minUDFReadRev),
+                               minUDFReadRev,
                                UDF_MAX_READ_VERSION);
                        ret = -EINVAL;
                        goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct logicalVolIntegrityDescImpUse *lvidiu;
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
-       if (sbi->s_lvid_bh != NULL)
-               lvidiu = udf_sb_lvidiu(sbi);
-       else
-               lvidiu = NULL;
-
+       lvidiu = udf_sb_lvidiu(sb);
        buf->f_type = UDF_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
index ed401e94aa8c956dd8685ab33f496fc4ea52eec7..1f32c7bd9f57f21fb2859413cacde08b2949fd3a 100644 (file)
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
index 653073de09e379ef1c8b04c1a96d0ef2c948f5a3..ed419c62dde1876f4561179b6b4bc3052a50ed14 100644 (file)
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
 union map_info *dm_get_mapinfo(struct bio *bio);
 union map_info *dm_get_rq_mapinfo(struct request *rq);
 
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
 /*
  * Geometry functions.
  */
 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 
-
 /*-----------------------------------------------------------------
  * Functions for manipulating device-mapper tables.
  *---------------------------------------------------------------*/
index 60e95872da2983365300f1abb193b6e81974b5de..ecc82b37c4ccf00fb863ecf6981de19bbccec52a 100644 (file)
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
        unsigned int generation;
 };
 
-enum mem_cgroup_filter_t {
-       VISIT,          /* visit current node */
-       SKIP,           /* skip the current node and continue traversal */
-       SKIP_TREE,      /* skip the whole subtree and continue traversal */
-};
-
-/*
- * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
- * iterate through the hierarchy tree. Each tree element is checked by the
- * predicate before it is returned by the iterator. If a filter returns
- * SKIP or SKIP_TREE then the iterator code continues traversal (with the
- * next node down the hierarchy or the next node that doesn't belong under the
- * memcg's subtree).
- */
-typedef enum mem_cgroup_filter_t
-(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
-
 #ifdef CONFIG_MEMCG
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
        struct page *oldpage, struct page *newpage, bool migration_ok);
 
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond);
-
-static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim)
-{
-       return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
-}
-
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+                                  struct mem_cgroup *,
+                                  struct mem_cgroup_reclaim_cookie *);
 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 
 /*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
        mem_cgroup_update_page_stat(page, idx, -1);
 }
 
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                               gfp_t gfp_mask,
+                                               unsigned long *total_scanned);
 
 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
                struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 }
-static inline struct mem_cgroup *
-mem_cgroup_iter_cond(struct mem_cgroup *root,
-               struct mem_cgroup *prev,
-               struct mem_cgroup_reclaim_cookie *reclaim,
-               mem_cgroup_iter_filter cond)
-{
-       /* first call must return non-NULL, second return NULL */
-       return (struct mem_cgroup *)(unsigned long)!prev;
-}
 
 static inline struct mem_cgroup *
 mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
 }
 
 static inline
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
 {
-       return VISIT;
+       return 0;
 }
 
 static inline void mem_cgroup_split_huge_fixup(struct page *head)
index ccd4260834c57da2ea30fc5b20714ef15afe076f..bab49da8a0f0b1bd2e01d516fdd0e39a07162326 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/spinlock_types.h>
 #include <linux/linkage.h>
 #include <linux/lockdep.h>
-
 #include <linux/atomic.h>
+#include <asm/processor.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
-#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
-#define arch_mutex_cpu_relax() cpu_relax()
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
 #endif
index 535cecf1e02f7823a1ec3f416c3b06382ea6d8fa..fcd63baee5f28b3361625cc1877b9a275d221204 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __OF_IRQ_H
 #define __OF_IRQ_H
 
-#if defined(CONFIG_OF)
-struct of_irq;
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
 #include <linux/ioport.h>
 #include <linux/of.h>
 
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently.  However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
 /**
  * of_irq - container for device_node/irq_specifier pair for an irq controller
  * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
 extern int of_irq_count(struct device_node *dev);
 extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 extern void of_irq_init(const struct of_device_id *matches);
 
-#endif /* CONFIG_OF_IRQ */
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently.  However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
index cfb7ca094b384522d2378c2a952bbe788812f1c6..731f5237d5f4e0f87dd817e15983f509d3b4d483 100644 (file)
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 
 static inline void kick_all_cpus_sync(void) {  }
 
+static inline void __smp_call_function_single(int cpuid,
+               struct call_single_data *data, int wait)
+{
+       on_each_cpu(data->func, data->info, wait);
+}
+
 #endif /* !SMP */
 
 /*
index 40a1fb8073961425249d50110a6de04f856feac9..009a655a5d354c51e20fb34cb12ca653e94f9b71 100644 (file)
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
        union {
                __u64   capabilities;
                struct {
-                       __u64   cap_usr_time            : 1,
-                               cap_usr_rdpmc           : 1,
-                               cap_usr_time_zero       : 1,
-                               cap_____res             : 61;
+                       __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+                               cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
+
+                               cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
+                               cap_user_time           : 1, /* The time_* fields are used */
+                               cap_user_time_zero      : 1, /* The time_zero field is used */
+                               cap_____res             : 59;
                };
        };
 
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
         *               ((rem * time_mult) >> time_shift);
         */
        __u64   time_zero;
+       __u32   size;                   /* Header size up to __reserved[] fields. */
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u64   __reserved[119];        /* align to 1k */
+       __u8    __reserved[118*8+4];    /* align to 1k. */
 
        /*
         * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
         *      u64                             len;
         *      u64                             pgoff;
         *      char                            filename[];
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_MMAP                        = 1,
index b0d541d426771caec217961b031e3de191135ced..9e4310c546ae93fa85f3932675bcd53ae33ece7a 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
        ipc_rmid(&msg_ids(ns), &s->q_perm);
 }
 
+static void msg_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct msg_queue *msq = ipc_rcu_to_struct(p);
+
+       security_msg_queue_free(msq);
+       ipc_rcu_free(head);
+}
+
 /**
  * newque - Create a new msg queue
  * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        msq->q_perm.security = NULL;
        retval = security_msg_queue_alloc(msq);
        if (retval) {
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                return retval;
        }
 
        /* ipc_addid() locks msq upon success. */
        id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
        if (id < 0) {
-               security_msg_queue_free(msq);
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, msg_rcu_free);
                return id;
        }
 
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
                free_msg(msg);
        }
        atomic_sub(msq->q_cbytes, &ns->msg_bytes);
-       security_msg_queue_free(msq);
-       ipc_rcu_putref(msq);
+       ipc_rcu_putref(msq, msg_rcu_free);
 }
 
 /*
@@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                rcu_read_lock();
                ipc_lock_object(&msq->q_perm);
 
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                if (msq->q_perm.deleted) {
                        err = -EIDRM;
                        goto out_unlock0;
index 69b6a21f38441aa437a8f79cb53ae4c4a6a520a1..19c8b980d1feff430d82297a53be528cea801c42 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma)
        }
 }
 
+static void sem_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct sem_array *sma = ipc_rcu_to_struct(p);
+
+       security_sem_free(sma);
+       ipc_rcu_free(head);
+}
+
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
 static inline void sem_lock_and_putref(struct sem_array *sma)
 {
        sem_lock(sma, NULL, -1);
-       ipc_rcu_putref(sma);
-}
-
-static inline void sem_putref(struct sem_array *sma)
-{
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, ipc_rcu_free);
 }
 
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        sma->sem_perm.security = NULL;
        retval = security_sem_alloc(sma);
        if (retval) {
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return retval;
        }
 
        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
        if (id < 0) {
-               security_sem_free(sma);
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, sem_rcu_free);
                return id;
        }
        ns->used_sems += nsems;
@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 
        wake_up_sem_queue_do(&tasks);
        ns->used_sems -= sma->sem_nsems;
-       security_sem_free(sma);
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, sem_rcu_free);
 }
 
 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                        rcu_read_unlock();
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
 
@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
-                       sem_putref(sma);
+                       ipc_rcu_putref(sma, ipc_rcu_free);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                err = -ERANGE;
                                goto out_free;
                        }
@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
        /* step 2: allocate new undo structure */
        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               sem_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return ERR_PTR(-ENOMEM);
        }
 
index 2821cdf93adb39ac83f8a604e6f487590bfe01f7..d69739610fd4384323004c46782116d54db6bbd5 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
        ipc_lock_object(&ipcp->shm_perm);
 }
 
+static void shm_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct shmid_kernel *shp = ipc_rcu_to_struct(p);
+
+       security_shm_free(shp);
+       ipc_rcu_free(head);
+}
+
 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 {
        ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
                user_shm_unlock(file_inode(shp->shm_file)->i_size,
                                                shp->mlock_user);
        fput (shp->shm_file);
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
 }
 
 /*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_perm.security = NULL;
        error = security_shm_alloc(shp);
        if (error) {
-               ipc_rcu_putref(shp);
+               ipc_rcu_putref(shp, ipc_rcu_free);
                return error;
        }
 
@@ -566,8 +574,7 @@ no_id:
                user_shm_unlock(size, shp->mlock_user);
        fput(file);
 no_file:
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
        return error;
 }
 
index e829da9ed01f3dbe2973fd232a4f2b889f401c9c..fdb8ae7407755f9b1c3725e2a7fd38a8a31c7f54 100644 (file)
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
                kfree(ptr);
 }
 
-struct ipc_rcu {
-       struct rcu_head rcu;
-       atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
 /**
  *     ipc_rcu_alloc   -       allocate ipc and rcu space 
  *     @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
        return atomic_inc_not_zero(&p->refcount);
 }
 
-/**
- * ipc_schedule_free - free ipc + rcu space
- * @head: RCU callback structure for queued work
- */
-static void ipc_schedule_free(struct rcu_head *head)
-{
-       vfree(container_of(head, struct ipc_rcu, rcu));
-}
-
-void ipc_rcu_putref(void *ptr)
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
 {
        struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
 
        if (!atomic_dec_and_test(&p->refcount))
                return;
 
-       if (is_vmalloc_addr(ptr)) {
-               call_rcu(&p->rcu, ipc_schedule_free);
-       } else {
-               kfree_rcu(p, rcu);
-       }
+       call_rcu(&p->rcu, func);
+}
+
+void ipc_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+
+       if (is_vmalloc_addr(p))
+               vfree(p);
+       else
+               kfree(p);
 }
 
 /**
index c5f3338ba1fa7913967c1bc7e9845e0561eeb027..f2f5036f2eeda9794bc545ac8045db3c56f7d425 100644 (file)
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
 static inline void shm_exit_ns(struct ipc_namespace *ns) { }
 #endif
 
+struct ipc_rcu {
+       struct rcu_head rcu;
+       atomic_t refcount;
+} ____cacheline_aligned_in_smp;
+
+#define ipc_rcu_to_struct(p)  ((void *)(p+1))
+
 /*
  * Structure that holds the parameters needed by the ipc operations
  * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
  */
 void* ipc_rcu_alloc(int size);
 int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr);
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
+void ipc_rcu_free(struct rcu_head *head);
 
 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
index 91e53d04b6a9e8841e697dcb290f1206468da21a..7b0e23a740ce345987c33f9e012302c24de0f4db 100644 (file)
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
                        sleep_time = timeout_start + audit_backlog_wait_time -
                                        jiffies;
-                       if ((long)sleep_time > 0)
+                       if ((long)sleep_time > 0) {
                                wait_for_auditd(sleep_time);
-                       continue;
+                               continue;
+                       }
                }
                if (audit_rate_check() && printk_ratelimit())
                        printk(KERN_WARNING
index dd236b66ca3a8ef894386e7dcb8c7f02dbba34ae..cb4238e85b38e37886b27dd76351d2590a835924 100644 (file)
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
        *running = ctx_time - event->tstamp_running;
 }
 
+static void perf_event_init_userpage(struct perf_event *event)
+{
+       struct perf_event_mmap_page *userpg;
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       userpg = rb->user_page;
+
+       /* Allow new userspace to detect that bit 0 is deprecated */
+       userpg->cap_bit0_is_deprecated = 1;
+       userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+       rcu_read_unlock();
+}
+
 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 }
@@ -4044,6 +4064,7 @@ again:
        ring_buffer_attach(event, rb);
        rcu_assign_pointer(event->rb, rb);
 
+       perf_event_init_userpage(event);
        perf_event_update_userpage(event);
 
 unlock:
index 81c4e78c8f4cc0b79b89086c3255934079082380..c00d5b502aa487de0f09117c21dfa12eb8923e0c 100644 (file)
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
 
 
 STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
 STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
 STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
 STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
 
 int param_set_charp(const char *val, const struct kernel_param *kp)
index 269ed9384cc4284e9cf6043bd00667ea582c4369..f813b3474646c5b320a19d9a8997349bdd14d68e 100644 (file)
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
 #endif
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
 
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line).  This is needed so that we can
+ * suppress DMI scanning for reboot quirks.  Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
 int reboot_cpu;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
index 11cd13667359862c58872a9ce6091391a1d40b86..7c70201fbc61aef012d5b2536600d898602bab0a 100644 (file)
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        }
 
        if (!se) {
-               cfs_rq->h_load = rq->avg.load_avg_contrib;
+               cfs_rq->h_load = cfs_rq->runnable_load_avg;
                cfs_rq->last_h_load_update = now;
        }
 
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
                (busiest->load_per_task * SCHED_POWER_SCALE) /
                busiest->group_power;
 
-       if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
-           (scaled_busy_load_per_task * imbn)) {
+       if (busiest->avg_load + scaled_busy_load_per_task >=
+           local->avg_load + (scaled_busy_load_per_task * imbn)) {
                env->imbalance = busiest->load_per_task;
                return;
        }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         * max load less than avg load(as we skip the groups at or below
         * its cpu_power, while calculating max_load..)
         */
-       if (busiest->avg_load < sds->avg_load) {
+       if (busiest->avg_load <= sds->avg_load ||
+           local->avg_load >= sds->avg_load) {
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
index 51c4f34d258ea397266e0dd1a96a16436415a38f..4431610f049ac77888adefe3a335d47d4d232939 100644 (file)
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
        .unpark                 = watchdog_enable,
 };
 
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+       struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+       int ret;
+
+       /*
+        * No need to cancel and restart hrtimer if it is currently executing
+        * because it will reprogram itself with the new period now.
+        * We should never see it unqueued here because we are running per-cpu
+        * with interrupts disabled.
+        */
+       ret = hrtimer_try_to_cancel(hrtimer);
+       if (ret == 1)
+               hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+                               HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+       struct call_single_data data = {.func = restart_watchdog_hrtimer};
+       /*
+        * Make sure that perf event counter will adopt to a new
+        * sampling period. Updating the sampling period directly would
+        * be much nicer but we do not have an API for that now so
+        * let's use a big hammer.
+        * Hrtimer will adopt the new period on the next tick but this
+        * might be late already so we have to restart the timer as well.
+        */
+       watchdog_nmi_disable(cpu);
+       __smp_call_function_single(cpu, &data, 1);
+       watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       preempt_disable();
+       for_each_online_cpu(cpu)
+               update_timers(cpu);
+       preempt_enable();
+       put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
 {
        int err = 0;
 
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
                        pr_err("Failed to create watchdog threads, disabled\n");
                else
                        watchdog_running = 1;
+       } else if (sample_period_changed) {
+               update_timers_all_cpus();
        }
 
        return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int err, old_thresh, old_enabled;
+       static DEFINE_MUTEX(watchdog_proc_mutex);
 
+       mutex_lock(&watchdog_proc_mutex);
        old_thresh = ACCESS_ONCE(watchdog_thresh);
        old_enabled = ACCESS_ONCE(watchdog_user_enabled);
 
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (err || !write)
-               return err;
+               goto out;
 
        set_sample_period();
        /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
         * watchdog_*_all_cpus() function takes care of this.
         */
        if (watchdog_user_enabled && watchdog_thresh)
-               err = watchdog_enable_all_cpus();
+               err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
        else
                watchdog_disable_all_cpus();
 
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                watchdog_thresh = old_thresh;
                watchdog_user_enabled = old_enabled;
        }
-
+out:
+       mutex_unlock(&watchdog_proc_mutex);
        return err;
 }
 #endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
        set_sample_period();
 
        if (watchdog_user_enabled)
-               watchdog_enable_all_cpus();
+               watchdog_enable_all_cpus(false);
 }
index 962175134702dace0078edfd0ec7f092bc646536..669bf190d4fb91770f8912277278a6528c4065c3 100644 (file)
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
 
 bool kobj_ns_current_may_mount(enum kobj_ns_type type)
 {
-       bool may_mount = false;
-
-       if (type == KOBJ_NS_TYPE_NONE)
-               return true;
+       bool may_mount = true;
 
        spin_lock(&kobj_ns_type_lock);
        if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
index 677d036cf3c70d6d72b9aa5f108470dd890752cd..6f9d434c1521eab9ca0b2821d10936af55f2b703 100644 (file)
@@ -3,6 +3,22 @@
 
 #ifdef CONFIG_CMPXCHG_LOCKREF
 
+/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
 /*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
                struct lockref new = old, prev = old;                           \
                CODE                                                            \
-               old.lock_count = cmpxchg64(&lockref->lock_count,                \
-                                          old.lock_count, new.lock_count);     \
+               old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
+                                                  old.lock_count,              \
+                                                  new.lock_count);             \
                if (likely(old.lock_count == prev.lock_count)) {                \
                        SUCCESS;                                                \
                }                                                               \
-               cpu_relax();                                                    \
+               arch_mutex_cpu_relax();                                         \
        }                                                                       \
 } while (0)
 
index d5ff3ce13029b2c99b4ed402898ae0c76a143fde..1c52ddbc839ba1f8f42e940c51bc321ba6b2abfe 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/limits.h>
 #include <linux/export.h>
 #include <linux/mutex.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
+       struct rb_node          tree_node;      /* RB tree node */
+       unsigned long long      usage_in_excess;/* Set to the value by which */
+                                               /* the soft limit is exceeded*/
+       bool                    on_tree;
        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
                                                /* use container_of        */
 };
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 };
 
+/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+       struct rb_root rb_root;
+       spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+       struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+       struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
 struct mem_cgroup_threshold {
        struct eventfd_ctx *eventfd;
        u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
        atomic_t        numainfo_events;
        atomic_t        numainfo_updating;
 #endif
-       /*
-        * Protects soft_contributed transitions.
-        * See mem_cgroup_update_soft_limit
-        */
-       spinlock_t soft_lock;
-
-       /*
-        * If true then this group has increased parents' children_in_excess
-        * when it got over the soft limit.
-        * When a group falls bellow the soft limit, parents' children_in_excess
-        * is decreased and soft_contributed changed to false.
-        */
-       bool soft_contributed;
-
-       /* Number of children that are in soft limit excess */
-       atomic_t children_in_excess;
 
        struct mem_cgroup_per_node *nodeinfo[0];
        /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
  * limit reclaim to prevent infinite loops, if they ever occur.
  */
 #define        MEM_CGROUP_MAX_RECLAIM_LOOPS            100
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
        return mem_cgroup_zoneinfo(memcg, nid, zid);
 }
 
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
+{
+       struct rb_node **p = &mctz->rb_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct mem_cgroup_per_zone *mz_node;
+
+       if (mz->on_tree)
+               return;
+
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
+       while (*p) {
+               parent = *p;
+               mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+                                       tree_node);
+               if (mz->usage_in_excess < mz_node->usage_in_excess)
+                       p = &(*p)->rb_left;
+               /*
+                * We can't avoid mem cgroups that are over their soft
+                * limit by the same amount
+                */
+               else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&mz->tree_node, parent, p);
+       rb_insert_color(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       if (!mz->on_tree)
+               return;
+       rb_erase(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       spin_lock(&mctz->lock);
+       __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+       spin_unlock(&mctz->lock);
+}
+
+
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
+{
+       unsigned long long excess;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+       mctz = soft_limit_tree_from_page(page);
+
+       /*
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
+        */
+       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+               mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+               excess = res_counter_soft_limit_excess(&memcg->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
+       }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
+{
+       int node, zone;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+
+       for_each_node(node) {
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       mz = mem_cgroup_zoneinfo(memcg, node, zone);
+                       mctz = soft_limit_tree_node_zone(node, zone);
+                       mem_cgroup_remove_exceeded(memcg, mz, mctz);
+               }
+       }
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct rb_node *rightmost = NULL;
+       struct mem_cgroup_per_zone *mz;
+
+retry:
+       mz = NULL;
+       rightmost = rb_last(&mctz->rb_root);
+       if (!rightmost)
+               goto done;              /* Nothing to reclaim from */
+
+       mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+       /*
+        * Remove the node now but someone else can add it back,
+        * we will to add it back at the end of reclaim to its correct
+        * position in the tree.
+        */
+       __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+       if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
+               !css_tryget(&mz->memcg->css))
+               goto retry;
+done:
+       return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       spin_lock(&mctz->lock);
+       mz = __mem_cgroup_largest_soft_limit_node(mctz);
+       spin_unlock(&mctz->lock);
+       return mz;
+}
+
 /*
  * Implementation Note: reading percpu statistics for memcg.
  *
@@ -821,48 +989,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
        return false;
 }
 
-/*
- * Called from rate-limited memcg_check_events when enough
- * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
- * that all the parents up the hierarchy will be notified that this group
- * is in excess or that it is not in excess anymore. mmecg->soft_contributed
- * makes the transition a single action whenever the state flips from one to
- * the other.
- */
-static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
-{
-       unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
-       struct mem_cgroup *parent = memcg;
-       int delta = 0;
-
-       spin_lock(&memcg->soft_lock);
-       if (excess) {
-               if (!memcg->soft_contributed) {
-                       delta = 1;
-                       memcg->soft_contributed = true;
-               }
-       } else {
-               if (memcg->soft_contributed) {
-                       delta = -1;
-                       memcg->soft_contributed = false;
-               }
-       }
-
-       /*
-        * Necessary to update all ancestors when hierarchy is used
-        * because their event counter is not touched.
-        * We track children even outside the hierarchy for the root
-        * cgroup because tree walk starting at root should visit
-        * all cgroups and we want to prevent from pointless tree
-        * walk if no children is below the limit.
-        */
-       while (delta && (parent = parent_mem_cgroup(parent)))
-               atomic_add(delta, &parent->children_in_excess);
-       if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-               atomic_add(delta, &root_mem_cgroup->children_in_excess);
-       spin_unlock(&memcg->soft_lock);
-}
-
 /*
  * Check events in order.
  *
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 
                mem_cgroup_threshold(memcg);
                if (unlikely(do_softlimit))
-                       mem_cgroup_update_soft_limit(memcg);
+                       mem_cgroup_update_tree(memcg, page);
 #if MAX_NUMNODES > 1
                if (unlikely(do_numainfo))
                        atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return memcg;
 }
 
-static enum mem_cgroup_filter_t
-mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
-               mem_cgroup_iter_filter cond)
-{
-       if (!cond)
-               return VISIT;
-       return cond(memcg, root);
-}
-
 /*
  * Returns a next (in a pre-order walk) alive memcg (with elevated css
  * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
  * helper function to be used by mem_cgroup_iter
  */
 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
-               struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
+               struct mem_cgroup *last_visited)
 {
        struct cgroup_subsys_state *prev_css, *next_css;
 
@@ -963,31 +1080,11 @@ skip_node:
        if (next_css) {
                struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
 
-               switch (mem_cgroup_filter(mem, root, cond)) {
-               case SKIP:
+               if (css_tryget(&mem->css))
+                       return mem;
+               else {
                        prev_css = next_css;
                        goto skip_node;
-               case SKIP_TREE:
-                       if (mem == root)
-                               return NULL;
-                       /*
-                        * css_rightmost_descendant is not an optimal way to
-                        * skip through a subtree (especially for imbalanced
-                        * trees leaning to right) but that's what we have right
-                        * now. More effective solution would be traversing
-                        * right-up for first non-NULL without calling
-                        * css_next_descendant_pre afterwards.
-                        */
-                       prev_css = css_rightmost_descendant(next_css);
-                       goto skip_node;
-               case VISIT:
-                       if (css_tryget(&mem->css))
-                               return mem;
-                       else {
-                               prev_css = next_css;
-                               goto skip_node;
-                       }
-                       break;
                }
        }
 
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * @root: hierarchy root
  * @prev: previously returned memcg, NULL on first invocation
  * @reclaim: cookie for shared reclaim walks, NULL for full walks
- * @cond: filter for visited nodes, NULL for no filter
  *
  * Returns references to children of the hierarchy below @root, or
  * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * divide up the memcgs in the hierarchy among all concurrent
  * reclaimers operating on the same zone and priority.
  */
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                                   struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond)
+                                  struct mem_cgroup_reclaim_cookie *reclaim)
 {
        struct mem_cgroup *memcg = NULL;
        struct mem_cgroup *last_visited = NULL;
 
-       if (mem_cgroup_disabled()) {
-               /* first call must return non-NULL, second return NULL */
-               return (struct mem_cgroup *)(unsigned long)!prev;
-       }
+       if (mem_cgroup_disabled())
+               return NULL;
 
        if (!root)
                root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
        if (!root->use_hierarchy && root != root_mem_cgroup) {
                if (prev)
                        goto out_css_put;
-               if (mem_cgroup_filter(root, root, cond) == VISIT)
-                       return root;
-               return NULL;
+               return root;
        }
 
        rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                        last_visited = mem_cgroup_iter_load(iter, root, &seq);
                }
 
-               memcg = __mem_cgroup_iter_next(root, last_visited, cond);
+               memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
                        mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                                reclaim->generation = iter->generation;
                }
 
-               /*
-                * We have finished the whole tree walk or no group has been
-                * visited because filter told us to skip the root node.
-                */
-               if (!memcg && (prev || (cond && !last_visited)))
+               if (prev && !memcg)
                        goto out_unlock;
        }
 out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
        return total;
 }
 
-#if MAX_NUMNODES > 1
 /**
  * test_mem_cgroup_node_reclaimable
  * @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
        return false;
 
 }
+#if MAX_NUMNODES > 1
 
 /*
  * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        return node;
 }
 
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+{
+       int nid;
+
+       /*
+        * quick check...making use of scan_node.
+        * We can skip unused nodes.
+        */
+       if (!nodes_empty(memcg->scan_nodes)) {
+               for (nid = first_node(memcg->scan_nodes);
+                    nid < MAX_NUMNODES;
+                    nid = next_node(nid, memcg->scan_nodes)) {
+
+                       if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                               return true;
+               }
+       }
+       /*
+        * Check rest of nodes.
+        */
+       for_each_node_state(nid, N_MEMORY) {
+               if (node_isset(nid, memcg->scan_nodes))
+                       continue;
+               if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                       return true;
+       }
+       return false;
+}
+
 #else
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
 {
        return 0;
 }
 
-#endif
-
-/*
- * A group is eligible for the soft limit reclaim under the given root
- * hierarchy if
- *     a) it is over its soft limit
- *     b) any parent up the hierarchy is over its soft limit
- *
- * If the given group doesn't have any children over the limit then it
- * doesn't make any sense to iterate its subtree.
- */
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
-       struct mem_cgroup *parent;
-
-       if (!memcg)
-               memcg = root_mem_cgroup;
-       parent = memcg;
-
-       if (res_counter_soft_limit_excess(&memcg->res))
-               return VISIT;
+       return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
+}
+#endif
 
-       /*
-        * If any parent up to the root in the hierarchy is over its soft limit
-        * then we have to obey and reclaim from this group as well.
-        */
-       while ((parent = parent_mem_cgroup(parent))) {
-               if (res_counter_soft_limit_excess(&parent->res))
-                       return VISIT;
-               if (parent == root)
+static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
+                                  struct zone *zone,
+                                  gfp_t gfp_mask,
+                                  unsigned long *total_scanned)
+{
+       struct mem_cgroup *victim = NULL;
+       int total = 0;
+       int loop = 0;
+       unsigned long excess;
+       unsigned long nr_scanned;
+       struct mem_cgroup_reclaim_cookie reclaim = {
+               .zone = zone,
+               .priority = 0,
+       };
+
+       excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
+
+       while (1) {
+               victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
+               if (!victim) {
+                       loop++;
+                       if (loop >= 2) {
+                               /*
+                                * If we have not been able to reclaim
+                                * anything, it might because there are
+                                * no reclaimable pages under this hierarchy
+                                */
+                               if (!total)
+                                       break;
+                               /*
+                                * We want to do more targeted reclaim.
+                                * excess >> 2 is not to excessive so as to
+                                * reclaim too much, nor too less that we keep
+                                * coming back to reclaim from this cgroup
+                                */
+                               if (total >= (excess >> 2) ||
+                                       (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
+                                       break;
+                       }
+                       continue;
+               }
+               if (!mem_cgroup_reclaimable(victim, false))
+                       continue;
+               total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
+                                                    zone, &nr_scanned);
+               *total_scanned += nr_scanned;
+               if (!res_counter_soft_limit_excess(&root_memcg->res))
                        break;
        }
-
-       if (!atomic_read(&memcg->children_in_excess))
-               return SKIP_TREE;
-       return SKIP;
+       mem_cgroup_iter_break(root_memcg, victim);
+       return total;
 }
 
 static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
        unlock_page_cgroup(pc);
 
        /*
-        * "charge_statistics" updated event counter.
+        * "charge_statistics" updated event counter. Then, check it.
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
         */
        memcg_check_events(memcg, page);
 }
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
+{
+       unsigned long nr_reclaimed = 0;
+       struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+       unsigned long reclaimed;
+       int loop = 0;
+       struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
+       unsigned long nr_scanned;
+
+       if (order > 0)
+               return 0;
+
+       mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
+       /*
+        * This loop can run a while, specially if mem_cgroup's continuously
+        * keep exceeding their soft limit and putting the system under
+        * pressure
+        */
+       do {
+               if (next_mz)
+                       mz = next_mz;
+               else
+                       mz = mem_cgroup_largest_soft_limit_node(mctz);
+               if (!mz)
+                       break;
+
+               nr_scanned = 0;
+               reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
+                                                   gfp_mask, &nr_scanned);
+               nr_reclaimed += reclaimed;
+               *total_scanned += nr_scanned;
+               spin_lock(&mctz->lock);
+
+               /*
+                * If we failed to reclaim anything from this memory cgroup
+                * it is time to move on to the next cgroup
+                */
+               next_mz = NULL;
+               if (!reclaimed) {
+                       do {
+                               /*
+                                * Loop until we find yet another one.
+                                *
+                                * By the time we get the soft_limit lock
+                                * again, someone might have aded the
+                                * group back on the RB tree. Iterate to
+                                * make sure we get a different mem.
+                                * mem_cgroup_largest_soft_limit_node returns
+                                * NULL if no other cgroup is present on
+                                * the tree
+                                */
+                               next_mz =
+                               __mem_cgroup_largest_soft_limit_node(mctz);
+                               if (next_mz == mz)
+                                       css_put(&next_mz->memcg->css);
+                               else /* next_mz == NULL or other memcg */
+                                       break;
+                       } while (1);
+               }
+               __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->memcg->res);
+               /*
+                * One school of thought says that we should not add
+                * back the node to the tree if reclaim returns 0.
+                * But our reclaim could return 0, simply because due
+                * to priority we are exposing a smaller subset of
+                * memory to reclaim from. Consider this as a longer
+                * term TODO.
+                */
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
+               spin_unlock(&mctz->lock);
+               css_put(&mz->memcg->css);
+               loop++;
+               /*
+                * Could not reclaim anything and there are no more
+                * mem cgroups to try or we seem to be looping without
+                * reclaiming anything.
+                */
+               if (!nr_reclaimed &&
+                       (next_mz == NULL ||
+                       loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+                       break;
+       } while (!nr_reclaimed);
+       if (next_mz)
+               css_put(&next_mz->memcg->css);
+       return nr_reclaimed;
+}
+
 /**
  * mem_cgroup_force_empty_list - clears LRU of a group
  * @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
                lruvec_init(&mz->lruvec);
+               mz->usage_in_excess = 0;
+               mz->on_tree = false;
                mz->memcg = memcg;
        }
        memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        int node;
        size_t size = memcg_size();
 
+       mem_cgroup_remove_from_trees(memcg);
        free_css_id(&mem_cgroup_subsys, &memcg->css);
 
        for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
+static void __init mem_cgroup_soft_limit_tree_init(void)
+{
+       struct mem_cgroup_tree_per_node *rtpn;
+       struct mem_cgroup_tree_per_zone *rtpz;
+       int tmp, node, zone;
+
+       for_each_node(node) {
+               tmp = node;
+               if (!node_state(node, N_NORMAL_MEMORY))
+                       tmp = -1;
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+               BUG_ON(!rtpn);
+
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+       }
+}
+
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
        vmpressure_init(&memcg->vmpressure);
-       spin_lock_init(&memcg->soft_lock);
 
        return &memcg->css;
 
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
-       if (memcg->soft_contributed) {
-               while ((memcg = parent_mem_cgroup(memcg)))
-                       atomic_dec(&memcg->children_in_excess);
-
-               if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-                       atomic_dec(&root_mem_cgroup->children_in_excess);
-       }
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
 {
        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        enable_swap_cgroup();
+       mem_cgroup_soft_limit_tree_init();
        memcg_stock_init();
        return 0;
 }
index d63802663242eb6ad13ca2ed065434f24fd7e5d1..67ba6da7d0e311b1b45007bbd0e37bd78db0f3a8 100644 (file)
@@ -736,6 +736,7 @@ static int do_mlockall(int flags)
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+               cond_resched();
        }
 out:
        return 0;
index 8ed1b775bdc9cafe9aaf2ab74e4acf842ffe1a9d..beb35778c69f147e47c30b44ee151953c68c4969 100644 (file)
@@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       struct mem_cgroup *root = sc->target_mem_cgroup;
-       return !mem_cgroup_disabled() &&
-               mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
-}
 #else
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       return false;
-}
 #endif
 
 unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
        }
 }
 
-static int
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
 {
        unsigned long nr_reclaimed, nr_scanned;
-       int groups_scanned = 0;
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                        .zone = zone,
                        .priority = sc->priority,
                };
-               struct mem_cgroup *memcg = NULL;
-               mem_cgroup_iter_filter filter = (soft_reclaim) ?
-                       mem_cgroup_soft_reclaim_eligible : NULL;
+               struct mem_cgroup *memcg;
 
                nr_reclaimed = sc->nr_reclaimed;
                nr_scanned = sc->nr_scanned;
 
-               while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
+               memcg = mem_cgroup_iter(root, NULL, &reclaim);
+               do {
                        struct lruvec *lruvec;
 
-                       groups_scanned++;
                        lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
                        shrink_lruvec(lruvec, sc);
@@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                                mem_cgroup_iter_break(root, memcg);
                                break;
                        }
-               }
+                       memcg = mem_cgroup_iter(root, memcg, &reclaim);
+               } while (memcg);
 
                vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
                           sc->nr_scanned - nr_scanned,
@@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
 
        } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
                                         sc->nr_scanned - nr_scanned, sc));
-
-       return groups_scanned;
-}
-
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
-       bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
-       unsigned long nr_scanned = sc->nr_scanned;
-       int scanned_groups;
-
-       scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
-       /*
-        * memcg iterator might race with other reclaimer or start from
-        * a incomplete tree walk so the tree walk in __shrink_zone
-        * might have missed groups that are above the soft limit. Try
-        * another loop to catch up with others. Do it just once to
-        * prevent from reclaim latencies when other reclaimers always
-        * preempt this one.
-        */
-       if (do_soft_reclaim && !scanned_groups)
-               __shrink_zone(zone, sc, do_soft_reclaim);
-
-       /*
-        * No group is over the soft limit or those that are do not have
-        * pages in the zone we are reclaiming so we have to reclaim everybody
-        */
-       if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
-               __shrink_zone(zone, sc, false);
-               return;
-       }
 }
 
 /* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        bool aborted_reclaim = false;
 
        /*
@@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                                        continue;
                                }
                        }
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
                        /* need some check for avoid more shrink_zone() */
                }
 
@@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 {
        int i;
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .priority = DEF_PRIORITY,
@@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 
                        sc.nr_scanned = 0;
 
+                       nr_soft_scanned = 0;
+                       /*
+                        * Call soft limit reclaim before calling shrink_zone.
+                        */
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       order, sc.gfp_mask,
+                                                       &nr_soft_scanned);
+                       sc.nr_reclaimed += nr_soft_reclaimed;
+
                        /*
                         * There should be no need to raise the scanning
                         * priority if enough pages are already being scanned
index 47016c304c847efffb96da5358224a9b5a93cc5e..66cad506b8a2a944f2873856ef0078445f673b8b 100755 (executable)
@@ -3975,8 +3975,8 @@ sub string_find_replace {
 # check for new externs in .h files.
                if ($realfile =~ /\.h$/ &&
                    $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
-                       if (WARN("AVOID_EXTERNS",
-                                "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+                       if (CHK("AVOID_EXTERNS",
+                               "extern prototypes should be avoided in .h files\n" . $herecurr) &&
                            $fix) {
                                $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
                        }
index 98969541cbcc9c62ff81afb9818d2399dd6e7eb0..bea523a5d852e11f9d06e1d682e065b699f347c8 100644 (file)
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
 static int snd_compr_free(struct inode *inode, struct file *f)
 {
        struct snd_compr_file *data = f->private_data;
+       struct snd_compr_runtime *runtime = data->stream.runtime;
+
+       switch (runtime->state) {
+       case SNDRV_PCM_STATE_RUNNING:
+       case SNDRV_PCM_STATE_DRAINING:
+       case SNDRV_PCM_STATE_PAUSED:
+               data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+               break;
+       default:
+               break;
+       }
+
        data->stream.ops->free(&data->stream);
        kfree(data->stream.runtime->buffer);
        kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
        struct snd_compr *compr;
 
        compr = device->device_data;
-       snd_unregister_device(compr->direction, compr->card, compr->device);
+       snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+               compr->device);
        return 0;
 }
 
index b524f89a1f13a7d74c51c8231684f971662ccc15..18d9725015855c0ce3f33d174a3dfcd55836efeb 100644 (file)
@@ -111,6 +111,9 @@ enum {
 /* 0x0009 - 0x0014 -> 12 test regs */
 /* 0x0015 - visibility reg */
 
+/* Cirrus Logic CS4208 */
+#define CS4208_VENDOR_NID      0x24
+
 /*
  * Cirrus Logic CS4210
  *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+static const struct hda_verb cs4208_coef_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x24, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
+       {} /* terminator */
+};
+
 /* Errata: CS4207 rev C0/C1/C2 Silicon
  *
  * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
                /* init_verb sequence for C0/C1/C2 errata*/
                snd_hda_sequence_write(codec, cs_errata_init_verbs);
                snd_hda_sequence_write(codec, cs_coef_init_verbs);
+       } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
+               snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
        }
 
        snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
        {} /* terminator */
 };
 
+static const struct hda_pintbl mba6_pincfgs[] = {
+       { 0x10, 0x032120f0 }, /* HP */
+       { 0x11, 0x500000f0 },
+       { 0x12, 0x90100010 }, /* Speaker */
+       { 0x13, 0x500000f0 },
+       { 0x14, 0x500000f0 },
+       { 0x15, 0x770000f0 },
+       { 0x16, 0x770000f0 },
+       { 0x17, 0x430000f0 },
+       { 0x18, 0x43ab9030 }, /* Mic */
+       { 0x19, 0x770000f0 },
+       { 0x1a, 0x770000f0 },
+       { 0x1b, 0x770000f0 },
+       { 0x1c, 0x90a00090 },
+       { 0x1d, 0x500000f0 },
+       { 0x1e, 0x500000f0 },
+       { 0x1f, 0x500000f0 },
+       { 0x20, 0x500000f0 },
+       { 0x21, 0x430000f0 },
+       { 0x22, 0x430000f0 },
+       {} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
 
 /*
  * CS4208 support:
- * Its layout is no longer compatible with CS4206/CS4207, and the generic
- * parser seems working fairly well, except for trivial fixups.
+ * Its layout is no longer compatible with CS4206/CS4207
  */
 enum {
+       CS4208_MBA6,
        CS4208_GPIO0,
 };
 
 static const struct hda_model_fixup cs4208_models[] = {
        { .id = CS4208_GPIO0, .name = "gpio0" },
+       { .id = CS4208_MBA6, .name = "mba6" },
        {}
 };
 
 static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
        /* codec SSID */
-       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0),
-       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0),
+       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
        {} /* terminator */
 };
 
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
 }
 
 static const struct hda_fixup cs4208_fixups[] = {
+       [CS4208_MBA6] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = mba6_pincfgs,
+               .chained = true,
+               .chain_id = CS4208_GPIO0,
+       },
        [CS4208_GPIO0] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs4208_fixup_gpio0,
        },
 };
 
+/* correct the 0dB offset of input pins */
+static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
+{
+       unsigned int caps;
+
+       caps = query_amp_caps(codec, adc, HDA_INPUT);
+       caps &= ~(AC_AMPCAP_OFFSET);
+       caps |= 0x02;
+       snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
+}
+
 static int patch_cs4208(struct hda_codec *codec)
 {
        struct cs_spec *spec;
        int err;
 
-       spec = cs_alloc_spec(codec, 0); /* no specific w/a */
+       spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
        if (!spec)
                return -ENOMEM;
 
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
                           cs4208_fixups);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
+       snd_hda_override_wcaps(codec, 0x18,
+                              get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
+       cs4208_fix_amp_caps(codec, 0x18);
+       cs4208_fix_amp_caps(codec, 0x1b);
+       cs4208_fix_amp_caps(codec, 0x1c);
+
        err = cs_parse_auto_config(codec);
        if (err < 0)
                goto error;
index 3d8cd04455a623b888a9efccb7bdd31f92df8188..7ea0245fc6bd5fe5579d81682a2d39ce6cd06af7 100644 (file)
@@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
 }
 
 static void haswell_config_cvts(struct hda_codec *codec,
-                       int pin_id, int mux_id)
+                       hda_nid_t pin_nid, int mux_idx)
 {
        struct hdmi_spec *spec = codec->spec;
-       struct hdmi_spec_per_pin *per_pin;
-       int pin_idx, mux_idx;
-       int curr;
-       int err;
+       hda_nid_t nid, end_nid;
+       int cvt_idx, curr;
+       struct hdmi_spec_per_cvt *per_cvt;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
-               per_pin = get_pin(spec, pin_idx);
+       /* configure all pins, including "no physical connection" ones */
+       end_nid = codec->start_nid + codec->num_nodes;
+       for (nid = codec->start_nid; nid < end_nid; nid++) {
+               unsigned int wid_caps = get_wcaps(codec, nid);
+               unsigned int wid_type = get_wcaps_type(wid_caps);
 
-               if (pin_idx == pin_id)
+               if (wid_type != AC_WID_PIN)
                        continue;
 
-               curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+               if (nid == pin_nid)
+                       continue;
+
+               curr = snd_hda_codec_read(codec, nid, 0,
                                          AC_VERB_GET_CONNECT_SEL, 0);
+               if (curr != mux_idx)
+                       continue;
 
-               /* Choose another unused converter */
-               if (curr == mux_id) {
-                       err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx);
-                       if (err < 0)
-                               return;
-                       snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx);
-                       snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+               /* choose an unassigned converter. The conveters in the
+                * connection list are in the same order as in the codec.
+                */
+               for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
+                       per_cvt = get_cvt(spec, cvt_idx);
+                       if (!per_cvt->assigned) {
+                               snd_printdd("choose cvt %d for pin nid %d\n",
+                                       cvt_idx, nid);
+                               snd_hda_codec_write_cache(codec, nid, 0,
                                            AC_VERB_SET_CONNECT_SEL,
-                                           mux_idx);
+                                           cvt_idx);
+                               break;
+                       }
                }
        }
 }
@@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
 
        /* configure unused pins to choose other converters */
        if (is_haswell(codec))
-               haswell_config_cvts(codec, pin_idx, mux_idx);
+               haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
 
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
 
index bc07d369fac43a548db7f1e45273627cc4e7246c..0e303b99a47ceef3037e7b245ed5d93caf1f7779 100644 (file)
@@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
                /* Set to manual mode */
                val = alc_read_coef_idx(codec, 0x06);
                alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+               /* Enable Line1 input control by verb */
+               val = alc_read_coef_idx(codec, 0x1a);
+               alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
                break;
        }
 }
@@ -3531,6 +3534,7 @@ enum {
        ALC269VB_FIXUP_ORDISSIMO_EVE2,
        ALC283_FIXUP_CHROME_BOOK,
        ALC282_FIXUP_ASUS_TX300,
+       ALC283_FIXUP_INT_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc282_fixup_asus_tx300,
        },
+       [ALC283_FIXUP_INT_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
index 099e7cd022e46d47260103b226b3410beac27133..7c43479623537af4f0d4179f4cce195cbea3e9b1 100644 (file)
@@ -5,7 +5,6 @@
 #include <stdbool.h>
 #include <sys/vfs.h>
 #include <sys/mount.h>
-#include <linux/magic.h>
 #include <linux/kernel.h>
 
 #include "debugfs.h"
index 9570c2b0f83c580454e2610cb3c07a4d7443ee19..b2519e49424f4c42bb389de846d21c8337773036 100644 (file)
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                             struct perf_tsc_conversion *tc)
 {
-       bool cap_usr_time_zero;
+       bool cap_user_time_zero;
        u32 seq;
        int i = 0;
 
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                tc->time_mult = pc->time_mult;
                tc->time_shift = pc->time_shift;
                tc->time_zero = pc->time_zero;
-               cap_usr_time_zero = pc->cap_usr_time_zero;
+               cap_user_time_zero = pc->cap_user_time_zero;
                rmb();
                if (pc->lock == seq && !(seq & 1))
                        break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                }
        }
 
-       if (!cap_usr_time_zero)
+       if (!cap_user_time_zero)
                return -EOPNOTSUPP;
 
        return 0;
index 423875c999b21208a5a6274d0fd47a2fb359ecd8..afe377b2884f740b0c469a367ab3938d99f7df0e 100644 (file)
@@ -321,8 +321,6 @@ found:
        return perf_event__repipe(tool, event_sw, &sample_sw, machine);
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index 8e50d8d77419c7ca3e8ce72209b113d72665c09e..72eae7498c09419c8f1f77a7a005c6dcbbb5eb4b 100644 (file)
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
        return 0;
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
                }
        }
 
+       if (session_done())
+               return 0;
+
        if (nr_samples == 0) {
                ui__error("The %s file has no samples!\n", session->filename);
                return 0;
index 7f31a3ded1b6dc59730a0162d5af279e038d1afc..9c333ff3dfeb3de716eac13d48d7364e998bb50d 100644 (file)
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
        .ordering_requires_timestamps = true,
 };
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index f5aa6375e3e9add4eea3cdd81de685fa37d87c4d..fd485340472790498720470e35ce675da6e0109b 100644 (file)
 #include <sys/mman.h>
 #include <linux/futex.h>
 
+/* For older distros: */
+#ifndef MAP_STACK
+# define MAP_STACK             0x20000
+#endif
+
+#ifndef MADV_HWPOISON
+# define MADV_HWPOISON         100
+#endif
+
+#ifndef MADV_MERGEABLE
+# define MADV_MERGEABLE                12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+# define MADV_UNMERGEABLE      13
+#endif
+
 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
                                         unsigned long arg,
                                         u8 arg_idx __maybe_unused,
index 214e17e97e5c7ba5aa25545b465b8994ac666afc..346ee929d250b7790cf9ed3371e60b036a87e9ca 100644 (file)
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
 ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
   CFLAGS += -DLIBELF_MMAP
 endif
+ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
+  CFLAGS += -DHAVE_ELF_GETPHDRNUM
+endif
 
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
index 708fb8e9822a3ed43bd192470c5da6f01c236c38..d5a8dd44945fcc6483eabfafde3b5d882b46f6f1 100644 (file)
@@ -61,6 +61,15 @@ int main(void)
 }
 endef
 
+define SOURCE_ELF_GETPHDRNUM
+#include <libelf.h>
+int main(void)
+{
+       size_t dst;
+       return elf_getphdrnum(0, &dst);
+}
+endef
+
 ifndef NO_SLANG
 define SOURCE_SLANG
 #include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
 
 int main(void)
 {
+       printf(\"error message: %s\n\", audit_errno_to_name(0));
        return audit_open();
 }
 endef
index bfc5a27597d60e1f09b7f95ef691e37608f8c694..7eae5488ecea47344cac10677104cb9e6cb2cc44 100644 (file)
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
                    end = map__rip_2objdump(map, sym->end);
 
                offset = line_ip - start;
-               if (offset < 0 || (u64)line_ip > end)
+               if ((u64)line_ip < start || (u64)line_ip > end)
                        offset = -1;
                else
                        parsed_line = tmp2 + 1;
index 3e5f5430a28aa929741e2bd3ccbcf554ae62a074..e23bde19d590872c6567c35d0df39fd5d0d68a87 100644 (file)
@@ -262,6 +262,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
                ret == DW_ATE_signed_fixed);
 }
 
+/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+       Dwarf_Attribute attr;
+
+       return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+               dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
 /**
  * die_get_data_member_location - Get the data-member offset
  * @mb_die: a DIE of a member of a data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
 {
        struct __addr_die_search_param *ad = data;
 
+       /*
+        * Since a declaration entry doesn't has given pc, this always returns
+        * function definition entry.
+        */
        if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
            dwarf_haspc(fn_die, ad->addr)) {
                memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
index 6ce1717784b7ab42a14d58431c3026df749f0620..8658d41697d27fbbe06dbd1fbdbc3b9d895b4606 100644 (file)
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
 extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
                        int (*callback)(Dwarf_Die *, void *), void *data);
 
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
 /* Compare diename and tname */
 extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
 
index 26441d0e571bfce2bcd469dfdfe428175facb5ee..ce69901176d864506d39cc0df4f5dbe9479f9383 100644 (file)
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
        return write_padded(fd, name, name_len + 1, len);
 }
 
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
-                               u16 misc, int fd)
+static int __dsos__write_buildid_table(struct list_head *head,
+                                      struct machine *machine,
+                                      pid_t pid, u16 misc, int fd)
 {
+       char nm[PATH_MAX];
        struct dso *pos;
 
        dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
                if (is_vdso_map(pos->short_name)) {
                        name = (char *) VDSO__MAP_NAME;
                        name_len = sizeof(VDSO__MAP_NAME) + 1;
+               } else if (dso__is_kcore(pos)) {
+                       machine__mmap_name(machine, nm, sizeof(nm));
+                       name = nm;
+                       name_len = strlen(nm) + 1;
                } else {
                        name = pos->long_name;
                        name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
                umisc = PERF_RECORD_MISC_GUEST_USER;
        }
 
-       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
-                                         kmisc, fd);
+       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+                                         machine->pid, kmisc, fd);
        if (err == 0)
-               err = __dsos__write_buildid_table(&machine->user_dsos,
+               err = __dsos__write_buildid_table(&machine->user_dsos, machine,
                                                  machine->pid, umisc, fd);
        return err;
 }
@@ -375,23 +381,31 @@ out_free:
        return err;
 }
 
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+                              const char *debugdir)
 {
        bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
        bool is_vdso = is_vdso_map(dso->short_name);
+       char *name = dso->long_name;
+       char nm[PATH_MAX];
 
-       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
-                                    dso->long_name, debugdir,
-                                    is_kallsyms, is_vdso);
+       if (dso__is_kcore(dso)) {
+               is_kallsyms = true;
+               machine__mmap_name(machine, nm, sizeof(nm));
+               name = nm;
+       }
+       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+                                    debugdir, is_kallsyms, is_vdso);
 }
 
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+static int __dsos__cache_build_ids(struct list_head *head,
+                                  struct machine *machine, const char *debugdir)
 {
        struct dso *pos;
        int err = 0;
 
        dsos__for_each_with_build_id(pos, head)
-               if (dso__cache_build_id(pos, debugdir))
+               if (dso__cache_build_id(pos, machine, debugdir))
                        err = -1;
 
        return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
 
 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
 {
-       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
-       ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+                                         debugdir);
+       ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
        return ret;
 }
 
index 46a0d35a05e1f21aae097e7a67cea89bfe9ecddf..9ff6cf3e9a99f69596b37372f60203c11bec88a3 100644 (file)
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
        next = rb_first(root);
 
        while (next) {
+               if (session_done())
+                       break;
                n = rb_entry(next, struct hist_entry, rb_node_in);
                next = rb_next(&n->rb_node_in);
 
index be0329394d5639f77644d8a9079dd6ceb4f27a73..20c7299a9d4e0ad9ee7c06a62b6190fa99a15ee6 100644 (file)
@@ -734,7 +734,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
        }
 
        /* If not a real subprogram, find a real one */
-       if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
+       if (!die_is_func_def(sc_die)) {
                if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
                        pr_warning("Failed to find probe point in any "
                                   "functions.\n");
@@ -980,12 +980,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
        struct dwarf_callback_param *param = data;
        struct probe_finder *pf = param->data;
        struct perf_probe_point *pp = &pf->pev->point;
-       Dwarf_Attribute attr;
 
        /* Check tag and diename */
-       if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
-           !die_compare_name(sp_die, pp->function) ||
-           dwarf_attr(sp_die, DW_AT_declaration, &attr))
+       if (!die_is_func_def(sp_die) ||
+           !die_compare_name(sp_die, pp->function))
                return DWARF_CB_OK;
 
        /* Check declared file */
@@ -1474,7 +1472,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
        return 0;
 }
 
-/* Search function from function name */
+/* Search function definition from function name */
 static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
 {
        struct dwarf_callback_param *param = data;
@@ -1485,7 +1483,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
                return DWARF_CB_OK;
 
-       if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+       if (die_is_func_def(sp_die) &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
                dwarf_decl_line(sp_die, &lr->offset);
index 51f5edf2a6d0d140dd897c58f9d606c9fb25be9d..70ffa41518f34a1bbfaa82e67c46a8a22b133f67 100644 (file)
@@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s,
                return 0;
 
        list_for_each_entry_safe(iter, tmp, head, list) {
+               if (session_done())
+                       return 0;
+
                if (iter->timestamp > limit)
                        break;
 
@@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
        }
 }
 
-#define session_done() (*(volatile int *)(&session_done))
 volatile int session_done;
 
 static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1372,10 +1374,13 @@ more:
                                    "Processing events...");
        }
 
+       err = 0;
+       if (session_done())
+               goto out_err;
+
        if (file_pos < file_size)
                goto more;
 
-       err = 0;
        /* do the final flush for ordered samples */
        session->ordered_samples.next_flush = ULLONG_MAX;
        err = flush_sample_queue(session, tool);
index 3aa75fb2225f7129daa12f7e86219f30928e5e42..04bf7373a7e5fb04222b1a9cdb5c295045c0626f 100644 (file)
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
 
 #define perf_session__set_tracepoints_handlers(session, array) \
        __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+
+extern volatile int session_done;
+
+#define session_done() (*(volatile int *)(&session_done))
 #endif /* __PERF_SESSION_H */
index a7b9ab55738086c24d12d7e9ee8cb6143ffd47aa..a9c829be52169eac5b9f00d9382fd9281152b9e6 100644 (file)
@@ -8,6 +8,22 @@
 #include "symbol.h"
 #include "debug.h"
 
+#ifndef HAVE_ELF_GETPHDRNUM
+static int elf_getphdrnum(Elf *elf, size_t *dst)
+{
+       GElf_Ehdr gehdr;
+       GElf_Ehdr *ehdr;
+
+       ehdr = gelf_getehdr(elf, &gehdr);
+       if (!ehdr)
+               return -1;
+
+       *dst = ehdr->e_phnum;
+
+       return 0;
+}
+#endif
+
 #ifndef NT_GNU_BUILD_ID
 #define NT_GNU_BUILD_ID 3
 #endif
index fe7a27d67d2b7af23a596683509769a4f1bd6e38..e9e1c03f927d27bce1e041a9cc61ca47dae2b987 100644 (file)
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
        char *next = NULL;
        char *addr_str;
        char *mod;
-       char *fmt;
+       char *fmt = NULL;
 
        line = strtok_r(file, "\n", &next);
        while (line) {