]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sfr/next-fixes
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Apr 2013 02:45:15 +0000 (19:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Apr 2013 02:45:15 +0000 (19:45 -0700)
Pull powerpc fixes from Stephen Rothwell:
 "Three regresions in the PowerPC code.  One from v3.7 the others from
  this merge window."

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sfr/next-fixes:
  powerpc: add a missing label in resume_kernel
  powerpc: Fix audit crash due to save/restore PPR changes
  powerpc: fix compiling CONFIG_PPC_TRANSACTIONAL_MEM when CONFIG_ALTIVEC=n

151 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/scsi/LICENSE.qla2xxx
MAINTAINERS
Makefile
arch/arm/boot/dts/imx28-m28evk.dts
arch/arm/boot/dts/imx28-sps1.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-kirkwood/board-iomega_ix2_200.c
arch/arm/mach-mvebu/irq-armada-370-xp.c
arch/arm/mach-s3c24xx/include/mach/irqs.h
arch/arm/mach-s3c24xx/irq.c
arch/c6x/include/asm/irqflags.h
arch/m68k/include/asm/gpio.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/tlb.h
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/paravirt.c
arch/x86/lguest/boot.c
arch/x86/mm/fault.c
arch/x86/mm/pageattr-test.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/xen/mmu.c
crypto/gcm.c
drivers/base/regmap/regmap.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ux500/cryp/cryp_core.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/udl/udl_connector.c
drivers/input/tablet/wacom_wac.c
drivers/irqchip/irq-gic.c
drivers/misc/vmw_vmci/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/Makefile
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2x00mmio.c [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2x00mmio.h [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00pci.h
drivers/net/wireless/rt2x00/rt61pci.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ipr.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/st.c
drivers/target/target_core_alua.c
drivers/tty/mxser.c
drivers/tty/serial/8250/8250_pnp.c
drivers/tty/serial/omap-serial.c
drivers/vhost/tcm_vhost.c
drivers/watchdog/Kconfig
drivers/xen/events.c
fs/btrfs/tree-log.c
fs/cifs/connect.c
fs/inode.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/proc/array.c
include/asm-generic/tlb.h
include/linux/capability.h
include/linux/ftrace.h
include/linux/mm.h
include/linux/sched.h
include/linux/security.h
include/net/iucv/af_iucv.h
include/trace/events/sched.h
kernel/capability.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/hrtimer.c
kernel/kthread.c
kernel/sched/clock.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/smpboot.c
kernel/sys.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_stack.c
lib/kobject.c
mm/memory.c
net/atm/common.c
net/ax25/af_ax25.c
net/bluetooth/af_bluetooth.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/caif/caif_socket.c
net/can/gw.c
net/core/rtnetlink.c
net/ipv4/devinet.c
net/ipv4/tcp_output.c
net/ipv6/tcp_ipv6.c
net/irda/af_irda.c
net/iucv/af_iucv.c
net/l2tp/l2tp_ip6.c
net/llc/af_llc.c
net/netrom/af_netrom.c
net/nfc/llcp/sock.c
net/rose/af_rose.c
net/sunrpc/clnt.c
net/tipc/socket.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/sme.c
security/capability.c
security/security.c
security/selinux/hooks.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm8903.c
sound/soc/samsung/i2s.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/tegra/tegra_pcm.c
sound/usb/mixer_quirks.c
sound/usb/quirks.c

index 7514dbf0a6796c4e216a175fa79db0ed3ff470d9..c36892c072dad4aaff9b2e34f3653ac88c915190 100644 (file)
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
 !Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250.c
+!Edrivers/tty/serial/8250/8250_core.c
   </chapter>
 
   <chapter id="fbdev">
index 27a91cf43d6d66ce4137e9335988175b096423a4..5020b7b5a24410f5cac04c2a8cbc849b3546a8e8 100644 (file)
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2012 QLogic Corporation
+Copyright (c) 2003-2013 QLogic Corporation
 QLogic Linux FC-FCoE Driver
 
 This program includes a device driver for Linux 3.x.
index 836a6183c37f6f6e2ceec36ea72497136fffde4c..8bdd7a7ef2f4687aa26ee66a70fe8ad428e14890 100644 (file)
@@ -4941,6 +4941,12 @@ W:       logfs.org
 S:     Maintained
 F:     fs/logfs/
 
+LPC32XX MACHINE SUPPORT
+M:     Roland Stigge <stigge@antcom.de>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-lpc32xx/
+
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 M:     Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
 M:     Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
@@ -6625,7 +6631,7 @@ S:        Supported
 F:     fs/reiserfs/
 
 REGISTER MAP ABSTRACTION
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:     Mark Brown <broonie@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
 S:     Supported
 F:     drivers/base/regmap/
@@ -6951,7 +6957,6 @@ F:        drivers/scsi/st*
 
 SCTP PROTOCOL
 M:     Vlad Yasevich <vyasevich@gmail.com>
-M:     Sridhar Samudrala <sri@us.ibm.com>
 M:     Neil Horman <nhorman@tuxdriver.com>
 L:     linux-sctp@vger.kernel.org
 W:     http://lksctp.sourceforge.net
@@ -7374,7 +7379,7 @@ F:        sound/
 
 SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
 M:     Liam Girdwood <lgirdwood@gmail.com>
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:     Mark Brown <broonie@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://alsa-project.org/main/index.php/ASoC
@@ -7463,7 +7468,7 @@ F:        drivers/clk/spear/
 
 SPI SUBSYSTEM
 M:     Grant Likely <grant.likely@secretlab.ca>
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:     Mark Brown <broonie@kernel.org>
 L:     spi-devel-general@lists.sourceforge.net
 Q:     http://patchwork.kernel.org/project/spi-devel-general/list/
 T:     git git://git.secretlab.ca/git/linux-2.6.git
@@ -8708,7 +8713,7 @@ F:        drivers/scsi/vmw_pvscsi.h
 
 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
 M:     Liam Girdwood <lrg@ti.com>
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:     Mark Brown <broonie@kernel.org>
 W:     http://opensource.wolfsonmicro.com/node/15
 W:     http://www.slimlogic.co.uk/?p=48
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
index 6db672b15bdaa447cafe1c6413b0fc56f2e9d2fd..ac509b25b701633d480ed095d23f61624ff24104 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
 # Carefully list dependencies so we do not try to build scripts twice
 # in parallel
 PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
+        asm-generic
        $(Q)$(MAKE) $(build)=$(@)
 
 # Objects we will link into vmlinux / subdirs we need to visit
index 6ce3d17c3a29c19c80ef6a04484fd59ec5e8765a..fd36e1cca1042306476c54d4038603780af358a1 100644 (file)
                        i2c0: i2c@80058000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&i2c0_pins_a>;
-                               clock-frequency = <400000>;
                                status = "okay";
 
                                sgtl5000: codec@0a {
index e6cde8aa7ffff5c3a2b670e2584c7475a8419525..6c6a5442800ad78adfe13ada4cf44a2cfd3cfbc5 100644 (file)
@@ -70,7 +70,6 @@
                        i2c0: i2c@80058000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&i2c0_pins_a>;
-                               clock-frequency = <400000>;
                                status = "okay";
 
                                rtc: rtc@51 {
index 06ec460b4581c4163e4053953e84acfaad01a76b..281a223591ff83d5a9eef96c6775c77c99a356ad 100644 (file)
@@ -91,6 +91,7 @@
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x00a00600 0x20>;
                        interrupts = <1 13 0xf01>;
+                       clocks = <&clks 15>;
                };
 
                L2: l2-cache@00a02000 {
index 93c3afbef9eeecc4b0b3741c2bc2360717da81b9..3694e94f6e99dc93aaf91985f55c6245830a92af 100644 (file)
                                marvell,function = "gpio";
                        };
                        pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
-                               marvell,pins = "mpp44";
+                               marvell,pins = "mpp46";
                                marvell,function = "gpio";
                        };
                        pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
-                               marvell,pins = "mpp45";
+                               marvell,pins = "mpp47";
                                marvell,function = "gpio";
                        };
 
                        gpios = <&gpio0 16 0>;
                        linux,default-trigger = "default-on";
                };
-               health_led1 {
+               rebuild_led {
+                       label = "status:white:rebuild_led";
+                       gpios = <&gpio1 4 0>;
+               };
+               health_led {
                        label = "status:red:health_led";
                        gpios = <&gpio1 5 0>;
                };
-               health_led2 {
-                       label = "status:white:health_led";
-                       gpios = <&gpio1 4 0>;
-               };
                backup_led {
                        label = "status:blue:backup_led";
                        gpios = <&gpio0 15 0>;
index e13a8fa5e62c5aa4f04b0aba66f4e77df0ac24ce..2193c834f55ccc68dcbd35cb3ebb1d68dc9f35d6 100644 (file)
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
        clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
        clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
        clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+       clk_register_clkdev(clk[admux_gate], "audmux", NULL);
 
        clk_prepare_enable(clk[spba_gate]);
        clk_prepare_enable(clk[gpio1_gate]);
@@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
        clk_prepare_enable(clk[max_gate]);
+       clk_prepare_enable(clk[iomuxc_gate]);
 
        /*
         * SCC is needed to boot via mmc after a watchdog reset. The clock code
index 2f9ff93a4e6134773befacb68c4dd4ec5c14bb90..d38e54f5b6d7f1dbbbd245a91db8be9a2408b28f 100644 (file)
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[]        = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
 static const char *gpu3d_core_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
 static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
 static const char *ipu_sels[]          = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[]       = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[]       = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
 static const char *ipu_di_pre_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
 static const char *ipu1_di0_sels[]     = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
 static const char *ipu1_di1_sels[]     = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
 
        clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
        clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
-       clk_register_clkdev(clk[twd], NULL, "smp_twd");
        clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
        clk_register_clkdev(clk[ahb], "ahb", NULL);
        clk_register_clkdev(clk[cko1], "cko1", NULL);
index f655b2637b0e2b4822efc185c499095c44bc6b9e..e5f70415905a998e919b5b4aabe8d3121fa8a81e 100644 (file)
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
        .duplex         = DUPLEX_FULL,
 };
 
+static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
+        .phy_addr       = MV643XX_ETH_PHY_ADDR(11),
+};
+
 void __init iomega_ix2_200_init(void)
 {
        /*
         * Basic setup. Needs to be called early.
         */
-       kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
+       kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
+       kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
 }
index 6a9195e10579a8263b3446ccc443bd1d1381f784..d5970f5a1e8d5ea8010c8f243853ec3a28d6bb30 100644 (file)
@@ -61,7 +61,6 @@ static struct irq_domain *armada_370_xp_mpic_domain;
  */
 static void armada_370_xp_irq_mask(struct irq_data *d)
 {
-#ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
        if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -70,15 +69,10 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
        else
                writel(hwirq, per_cpu_int_base +
                                ARMADA_370_XP_INT_SET_MASK_OFFS);
-#else
-       writel(irqd_to_hwirq(d),
-              per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
-#endif
 }
 
 static void armada_370_xp_irq_unmask(struct irq_data *d)
 {
-#ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
        if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -87,10 +81,6 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
        else
                writel(hwirq, per_cpu_int_base +
                                ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#else
-       writel(irqd_to_hwirq(d),
-              per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#endif
 }
 
 #ifdef CONFIG_SMP
@@ -146,7 +136,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
                                      unsigned int virq, irq_hw_number_t hw)
 {
        armada_370_xp_irq_mask(irq_get_irq_data(virq));
-       writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+       if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+               writel(hw, per_cpu_int_base +
+                       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+       else
+               writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
        irq_set_status_flags(virq, IRQ_LEVEL);
 
        if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
index b7a9f4d469e816bf6ca1f389f31d9280d06ecc46..1e73f5fa865983cd0a79fae37623af0f4a0ebbaf 100644 (file)
 
 #if defined(CONFIG_CPU_S3C2416)
 #define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
-#elif defined(CONFIG_CPU_S3C2443)
-#define NR_IRQS (IRQ_S3C2443_AC97+1)
 #else
-#define NR_IRQS (IRQ_S3C2440_AC97+1)
+#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
 #endif
 
 /* compatibility define. */
index cb9f5e011e73a1fb5f055dc6a3c6498aa095f7e0..d8ba9bee4c7e61434aa90852ce7f4e66ff4a5ebd 100644 (file)
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
                base = (void *)0xfd000000;
 
                intc->reg_mask = base + 0xa4;
-               intc->reg_pending = base + 0x08;
+               intc->reg_pending = base + 0xa8;
                irq_num = 20;
                irq_start = S3C2410_IRQ(32);
                irq_offset = 4;
index cf78e09e18c3613fe027cd3ac458ed4a4a15c01b..2c71d5634ec299e181d53a4a2ca430563d385b05 100644 (file)
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
 /* set interrupt enabled status */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
+       asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
 }
 
 /* unconditionally enable interrupts */
index 4395ffc51fdbc4739ae5f22257c42a6f76b06a41..8cc83431805b73eb44d777d8749e6fc3de66fbbe 100644 (file)
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
        return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
 }
 
+static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+       int err;
+
+       err = gpio_request(gpio, label);
+       if (err)
+               return err;
+
+       if (flags & GPIOF_DIR_IN)
+               err = gpio_direction_input(gpio);
+       else
+               err = gpio_direction_output(gpio,
+                       (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+       if (err)
+               gpio_free(gpio);
+
+       return err;
+}
+
 #endif
index 5edd1742cfd02a9368ca28e604366fb428904ad8..7361e47db79f50158ada1cdde859c4871b99ee51 100644 (file)
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
 }
 
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+       PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
 
 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
                                phys_addr_t phys, pgprot_t flags)
index 142236ed83af580c06e2e00893501e3fc38b0e65..b3b0ec1dac86d91ac6289b961afefe65f1f48d11 100644 (file)
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
        /* Set deferred update mode, used for batching operations. */
        void (*enter)(void);
        void (*leave)(void);
+       void (*flush)(void);
 };
 
 struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
 
 void paravirt_enter_lazy_mmu(void);
 void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
 
 void _paravirt_nop(void);
 u32 _paravirt_ident_32(u32);
index 4fef20773b8f9fcf104702ac2d8ef0c29a05c235..c7797307fc2ba5b24adacced83c9587bc5d2a8de 100644 (file)
@@ -7,7 +7,7 @@
 
 #define tlb_flush(tlb)                                                 \
 {                                                                      \
-       if (tlb->fullmm == 0)                                           \
+       if (!tlb->fullmm && !tlb->need_flush_all)                       \
                flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
        else                                                            \
                flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);   \
index b05a575d56f42f4543504b999a3b4875de110772..26830f3af0df70eebb438ddc82f74240b49d618e 100644 (file)
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
        if (top <= at)
                return 0;
 
+       memset(&regs, 0, sizeof(regs));
+
        ds->bts_index = ds->bts_buffer_base;
 
        perf_sample_data_init(&data, 0, event->hw.last_period);
-       regs.ip     = 0;
 
        /*
         * Prepare a generic sample, i.e. fill in the invariant fields.
index 17fff18a103104431c0e10e1543af6f027f64f72..8bfb335f74bb377776adb4b6b1da98dc17ed82ae 100644 (file)
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
        leave_lazy(PARAVIRT_LAZY_MMU);
 }
 
+void paravirt_flush_lazy_mmu(void)
+{
+       preempt_disable();
+
+       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+               arch_leave_lazy_mmu_mode();
+               arch_enter_lazy_mmu_mode();
+       }
+
+       preempt_enable();
+}
+
 void paravirt_start_context_switch(struct task_struct *prev)
 {
        BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
        return this_cpu_read(paravirt_lazy_mode);
 }
 
-void arch_flush_lazy_mmu_mode(void)
-{
-       preempt_disable();
-
-       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
-               arch_leave_lazy_mmu_mode();
-               arch_enter_lazy_mmu_mode();
-       }
-
-       preempt_enable();
-}
-
 struct pv_info pv_info = {
        .name = "bare hardware",
        .paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
        .lazy_mode = {
                .enter = paravirt_nop,
                .leave = paravirt_nop,
+               .flush = paravirt_nop,
        },
 
        .set_fixmap = native_set_fixmap,
index 1cbd89ca5569f2ce5d76a4bed1f21d6d52ed63e5..7114c63f047d06432a453884c4500474ea9681ea 100644 (file)
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
        pv_mmu_ops.read_cr3 = lguest_read_cr3;
        pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
        pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
+       pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
        pv_mmu_ops.pte_update = lguest_pte_update;
        pv_mmu_ops.pte_update_defer = lguest_pte_update;
 
index 2b97525246d43c3397f9d2149adf859b24b2842b..0e883364abb5752d4ac07b031d241d6bd00585ab 100644 (file)
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (pgd_none(*pgd_ref))
                return -1;
 
-       if (pgd_none(*pgd))
+       if (pgd_none(*pgd)) {
                set_pgd(pgd, *pgd_ref);
-       else
+               arch_flush_lazy_mmu_mode();
+       } else {
                BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+       }
 
        /*
         * Below here mismatches are bugs because these lower tables
index b0086567271c9956b2196e3ba302e4a77009e811..0e38951e65eb0940bf57a3f6d8c8d01dff0674c7 100644 (file)
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
                        s->gpg++;
                        i += GPS/PAGE_SIZE;
                } else if (level == PG_LEVEL_2M) {
-                       if (!(pte_val(*pte) & _PAGE_PSE)) {
+                       if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
                                printk(KERN_ERR
                                        "%lx level %d but not PSE %Lx\n",
                                        addr, level, (u64)pte_val(*pte));
index 091934e1d0d97ca26570eb9962aa1890e08e7bd8..fb4e73ec24d8d1b8b894b6c09f7c082a49dc1dcf 100644 (file)
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
         * We are safe now. Check whether the new pgprot is the same:
         */
        old_pte = *kpte;
-       old_prot = new_prot = req_prot = pte_pgprot(old_pte);
+       old_prot = req_prot = pte_pgprot(old_pte);
 
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
         * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
         * for the ancient hardware that doesn't support it.
         */
-       if (pgprot_val(new_prot) & _PAGE_PRESENT)
-               pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+       if (pgprot_val(req_prot) & _PAGE_PRESENT)
+               pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
        else
-               pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+               pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
 
-       new_prot = canon_pgprot(new_prot);
+       req_prot = canon_pgprot(req_prot);
 
        /*
         * old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
         * but that can deadlock->flush only current cpu:
         */
        __flush_tlb_all();
+
+       arch_flush_lazy_mmu_mode();
 }
 
 #ifdef CONFIG_HIBERNATION
index 193350b51f90af74508a8ec97217085bf3acdbab..17fda6a8b3c2df13adaee439b0ad6ab9f50796cb 100644 (file)
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 {
        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
+       /*
+        * NOTE! For PAE, any changes to the top page-directory-pointer-table
+        * entries need a full cr3 reload to flush.
+        */
+#ifdef CONFIG_X86_PAE
+       tlb->need_flush_all = 1;
+#endif
        tlb_remove_page(tlb, virt_to_page(pmd));
 }
 
index 6afbb2ca9a0ac450f79c6628b7cd9cd7c2ceeccf..e006c18d288a39ebec3343b2c240eced7a6fec34 100644 (file)
@@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
 }
 
 /* Set the page permissions on an identity-mapped pages */
-static void set_page_prot(void *addr, pgprot_t prot)
+static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
 {
        unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
        pte_t pte = pfn_pte(pfn, prot);
 
-       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
+       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
                BUG();
 }
+static void set_page_prot(void *addr, pgprot_t prot)
+{
+       return set_page_prot_flags(addr, prot, UVMF_NONE);
+}
 #ifdef CONFIG_X86_32
 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
 {
@@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
                                 unsigned long addr)
 {
        if (*pt_base == PFN_DOWN(__pa(addr))) {
-               set_page_prot((void *)addr, PAGE_KERNEL);
+               set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
                clear_page((void *)addr);
                (*pt_base)++;
        }
        if (*pt_end == PFN_DOWN(__pa(addr))) {
-               set_page_prot((void *)addr, PAGE_KERNEL);
+               set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
                clear_page((void *)addr);
                (*pt_end)--;
        }
@@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .lazy_mode = {
                .enter = paravirt_enter_lazy_mmu,
                .leave = xen_leave_lazy_mmu,
+               .flush = paravirt_flush_lazy_mmu,
        },
 
        .set_fixmap = xen_set_fixmap,
index 137ad1ec54383ceda3e515490ffc794a0242f907..13ccbda34ff979fbeb037b88ae400f4e248789a7 100644 (file)
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
 
 struct crypto_rfc4543_req_ctx {
        u8 auth_tag[16];
+       u8 assocbuf[32];
        struct scatterlist cipher[1];
        struct scatterlist payload[2];
        struct scatterlist assoc[2];
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
        scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
        assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
 
-       sg_init_table(assoc, 2);
-       sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
-                   req->assoc->offset);
+       if (req->assoc->length == req->assoclen) {
+               sg_init_table(assoc, 2);
+               sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+                           req->assoc->offset);
+       } else {
+               BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
+
+               scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
+                                        req->assoclen, 0);
+
+               sg_init_table(assoc, 2);
+               sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
+       }
        scatterwalk_crypto_chain(assoc, payload, 0, 2);
 
        aead_request_set_tfm(subreq, ctx->child);
index d34adef1e63e2bee55aa3cd5a9e7f2d98912103b..58cfb3232428f05b702d5b7b3a569429b9f8fe5c 100644 (file)
@@ -943,7 +943,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                unsigned int ival;
                int val_bytes = map->format.val_bytes;
                for (i = 0; i < val_len / val_bytes; i++) {
-                       ival = map->format.parse_val(val + (i * val_bytes));
+                       memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
+                       ival = map->format.parse_val(map->work_buf);
                        ret = regcache_write(map, reg + (i * map->reg_stride),
                                             ival);
                        if (ret) {
index ad72922919ed79b181a83467dea455216676d56c..6133ef5cf671ba6caf7cf79c67c8afa903fdc23e 100644 (file)
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
 
        sample_time = cpu->pstate_policy->sample_rate_ms;
        delay = msecs_to_jiffies(sample_time);
-       delay -= jiffies % delay;
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
index 8bc5fef07e7a797f7cdde6e8f3f15445afde4993..22c9063e012064a07fb114f5568e6dac23093bc7 100644 (file)
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
        .shutdown = ux500_cryp_shutdown,
        .driver = {
                .owner = THIS_MODULE,
-               .name  = "cryp1"
+               .name  = "cryp1",
                .pm    = &ux500_cryp_pm,
        }
 };
index c4b4fd2acc42b8fc605ccc70a71907ab3668794c..08b43bf3715816951d23cc71dd49ca9b810b55f1 100644 (file)
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
 
        spin_lock_irqsave(&c->vc.lock, flags);
        if (vchan_issue_pending(&c->vc) && !c->desc) {
-               struct omap_dmadev *d = to_omap_dma_dev(chan->device);
-               spin_lock(&d->lock);
-               if (list_empty(&c->node))
-                       list_add_tail(&c->node, &d->pending);
-               spin_unlock(&d->lock);
-               tasklet_schedule(&d->task);
+               /*
+                * c->cyclic is used only by audio and in this case the DMA need
+                * to be started without delay.
+                */
+               if (!c->cyclic) {
+                       struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+                       spin_lock(&d->lock);
+                       if (list_empty(&c->node))
+                               list_add_tail(&c->node, &d->pending);
+                       spin_unlock(&d->lock);
+                       tasklet_schedule(&d->task);
+               } else {
+                       omap_dma_start_desc(c);
+               }
        }
        spin_unlock_irqrestore(&c->vc.lock, flags);
 }
index 7181531227595475a907a4955b69324bb5c5e460..5dbc5946c4c3d9931585ac2440f64274f477fe02 100644 (file)
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 {
        struct dma_pl330_platdata *pdat;
        struct dma_pl330_dmac *pdmac;
-       struct dma_pl330_chan *pch;
+       struct dma_pl330_chan *pch, *_p;
        struct pl330_info *pi;
        struct dma_device *pd;
        struct resource *res;
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        ret = dma_async_device_register(pd);
        if (ret) {
                dev_err(&adev->dev, "unable to register DMAC\n");
-               goto probe_err2;
+               goto probe_err3;
+       }
+
+       if (adev->dev.of_node) {
+               ret = of_dma_controller_register(adev->dev.of_node,
+                                        of_dma_pl330_xlate, pdmac);
+               if (ret) {
+                       dev_err(&adev->dev,
+                       "unable to register DMA to the generic DT DMA helpers\n");
+               }
        }
 
        dev_info(&adev->dev,
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
                pi->pcfg.num_peri, pi->pcfg.num_events);
 
-       ret = of_dma_controller_register(adev->dev.of_node,
-                                        of_dma_pl330_xlate, pdmac);
-       if (ret) {
-               dev_err(&adev->dev,
-               "unable to register DMA to the generic DT DMA helpers\n");
-               goto probe_err2;
-       }
-
        return 0;
+probe_err3:
+       amba_set_drvdata(adev, NULL);
 
+       /* Idle the DMAC */
+       list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+                       chan.device_node) {
+
+               /* Remove the channel */
+               list_del(&pch->chan.device_node);
+
+               /* Flush the channel */
+               pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+               pl330_free_chan_resources(&pch->chan);
+       }
 probe_err2:
        pl330_del(pi);
 probe_err1:
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)
        if (!pdmac)
                return 0;
 
-       of_dma_controller_free(adev->dev.of_node);
+       if (adev->dev.of_node)
+               of_dma_controller_free(adev->dev.of_node);
 
+       dma_async_device_unregister(&pdmac->ddma);
        amba_set_drvdata(adev, NULL);
 
        /* Idle the DMAC */
index 24059462c87fb23f6481304ae8d1bf819c2785e4..9391cf16e990db6a46870004754df0983568bd8e 100644 (file)
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                                                chip->gpio_chip.ngpio,
                                                irq_base,
                                                &pca953x_irq_simple_ops,
-                                               NULL);
+                                               chip);
                if (!chip->domain)
                        return -ENODEV;
 
index 59d6b9bf204bbd2d76b8440b3f7d435def394fe7..892ff9f959754263eb318da91347cbfffcc25ae7 100644 (file)
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
        if (!fb_helper->fb)
                return 0;
 
-       drm_modeset_lock_all(dev);
+       mutex_lock(&fb_helper->dev->mode_config.mutex);
        if (!drm_fb_helper_is_bound(fb_helper)) {
                fb_helper->delayed_hotplug = true;
-               drm_modeset_unlock_all(dev);
+               mutex_unlock(&fb_helper->dev->mode_config.mutex);
                return 0;
        }
        DRM_DEBUG_KMS("\n");
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 
        count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
                                                    max_height);
+       mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+       drm_modeset_lock_all(dev);
        drm_setup_crtcs(fb_helper);
        drm_modeset_unlock_all(dev);
-
        drm_fb_helper_set_par(fb_helper->fbdev);
 
        return 0;
index fe22bb780e1d048efc76e92d7296ab98f94294db..78d8e919509fb1e0a5b6df59d9782693593cbdae 100644 (file)
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
        int i;
        unsigned char misc = 0;
        unsigned char ext_vga[6];
-       unsigned char ext_vga_index24;
-       unsigned char dac_index90 = 0;
        u8 bppshift;
 
        static unsigned char dacvalue[] = {
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                option2 = 0x0000b000;
                break;
        case G200_ER:
-               dac_index90 = 0;
                break;
        }
 
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                WREG_DAC(i, dacvalue[i]);
        }
 
-       if (mdev->type == G200_ER) {
-               WREG_DAC(0x90, dac_index90);
-       }
-
+       if (mdev->type == G200_ER)
+               WREG_DAC(0x90, 0);
 
        if (option)
                pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
        if (mdev->type == G200_WB)
                ext_vga[1] |= 0x88;
 
-       ext_vga_index24 = 0x05;
-
        /* Set pixel clocks */
        misc = 0x2d;
        WREG8(MGA_MISC_OUT, misc);
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
        }
 
        if (mdev->type == G200_ER)
-               WREG_ECRT(24, ext_vga_index24);
+               WREG_ECRT(0x24, 0x5);
 
        if (mdev->type == G200_EV) {
                WREG_ECRT(6, 0);
index 7f0e6c3f37d1fae4983bd6d8270364e52f00fc30..1ddc03e51bf47789b65992db64a3784cf0dad424 100644 (file)
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
 {
        struct nv50_display_flip *flip = data;
        if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
-                                             flip->chan->data);
+                                             flip->chan->data)
                return true;
        usleep_range(1, 2);
        return false;
index fe5cdbcf263605b7b5ec853b4d5f354f7c754b13..b44d548c56f8e43a3e14878622c8f8ed3507bf35 100644 (file)
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
        int ret;
 
        edid = (struct edid *)udl_get_edid(udl);
+       if (!edid) {
+               drm_mode_connector_update_edid_property(connector, NULL);
+               return 0;
+       }
 
        /*
         * We only read the main block, but if the monitor reports extension
index 1daa97913b7d20068845865a405387404fffe1d8..49fdff02b5fed34e24ffda008d9c91dade317aba 100644 (file)
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
        { "Wacom Intuos4 12x19",  WACOM_PKGLEN_INTUOS,    97536, 60960, 2047,
          63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xBC =
-       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40840, 25400, 2047,
+       { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40640, 25400, 2047,
          63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0x26 =
        { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS,  31496, 19685, 2047,
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
        { USB_DEVICE_WACOM(0xF8) },
-       { USB_DEVICE_WACOM(0xF6) },
+       { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xFA) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index a32e0d5aa45f43eb71c91ab9020696436212ae95..fc6aebf1e4b2630493b8e3ff89a2f76650019ef9 100644 (file)
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
        if (gic_arch_extn.irq_retrigger)
                return gic_arch_extn.irq_retrigger(d);
 
-       return -ENXIO;
+       /* the genirq layer expects 0 if we can't retrigger in hardware */
+       return 0;
 }
 
 #ifdef CONFIG_SMP
index 39c2ecadb273d374b41be572bee6431e7b81e784..ea98f7e9ccd19d6ef8ea862616bcc4c0f3055b8d 100644 (file)
@@ -4,7 +4,7 @@
 
 config VMWARE_VMCI
        tristate "VMware VMCI Driver"
-       depends on X86 && PCI
+       depends on X86 && PCI && NET
        help
          This is VMware's Virtual Machine Communication Interface.  It enables
          high-speed communication between host and guest in a virtual
index 171b10f167a501a75fe8c7639c6d8dba0953f4b3..07401a3e256bf3b1c5f56c607d80f757b448105a 100644 (file)
@@ -4846,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net)
 static void __net_exit bond_net_exit(struct net *net)
 {
        struct bond_net *bn = net_generic(net, bond_net_id);
+       struct bonding *bond, *tmp_bond;
+       LIST_HEAD(list);
 
        bond_destroy_sysfs(bn);
        bond_destroy_proc_dir(bn);
+
+       /* Kill off any bonds created after unregistering bond rtnl ops */
+       rtnl_lock();
+       list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+               unregister_netdevice_queue(bond->dev, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 }
 
 static struct pernet_operations bond_net_ops = {
@@ -4902,8 +4911,8 @@ static void __exit bonding_exit(void)
 
        bond_destroy_debugfs();
 
-       unregister_pernet_subsys(&bond_net_ops);
        rtnl_link_unregister(&bond_link_ops);
+       unregister_pernet_subsys(&bond_net_ops);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
        /*
index 77ebae0ac64aa9a4e681a506129c5a835d85ef4e..0283f343b0d1efd82be8f7fcb8b392823d0111e0 100644 (file)
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 {
        struct bnx2x *bp = params->bp;
        u16 base_page, next_page, not_kr2_device, lane;
-       int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
-
-       if (!sigdet) {
-               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
-                       bnx2x_kr2_recovery(params, vars, phy);
-               return;
-       }
+       int sigdet;
 
        /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
         * since some switches tend to reinit the AN process and clear the
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
                vars->check_kr2_recovery_cnt--;
                return;
        }
+
+       sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+       if (!sigdet) {
+               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+                       bnx2x_kr2_recovery(params, vars, phy);
+                       DP(NETIF_MSG_LINK, "No sigdet\n");
+               }
+               return;
+       }
+
        lane = bnx2x_get_warpcore_lane(phy, params);
        CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
                          MDIO_AER_BLOCK_AER_REG, lane);
index e81a747ea8ce0ee4461fea92565fa32b15ea6682..8e58da909f5c1c52a16fb172d364e0eca27e7a91 100644 (file)
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
                                  q);
        }
 
-       if (!NO_FCOE(bp)) {
+       if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
                fp = &bp->fp[FCOE_IDX(bp)];
                queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
 
@@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
        RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
+       bp->cnic_enabled = false;
        kfree(bp->cnic_kwq);
        bp->cnic_kwq = NULL;
 
index ec800b093e7eb22536b7247289d13c0450b9552f..d2bea3f07c73782504dffef8862d5a77944c7290 100644 (file)
@@ -870,7 +870,7 @@ err_unlock:
 }
 
 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
-       void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+       int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
 {
        struct cb *cb;
        unsigned long flags;
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
        nic->cbs_avail--;
        cb->skb = skb;
 
+       err = cb_prepare(nic, cb, skb);
+       if (err)
+               goto err_unlock;
+
        if (unlikely(!nic->cbs_avail))
                err = -ENOSPC;
 
-       cb_prepare(nic, cb, skb);
 
        /* Order is important otherwise we'll be in a race with h/w:
         * set S-bit in current first, then clear S-bit in previous. */
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
        nic->mii.mdio_write = mdio_write;
 }
 
-static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
        struct config *config = &cb->u.config;
        u8 *c = (u8 *)config;
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
        netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
                     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
                     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+       return 0;
 }
 
 /*************************************************************************
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
        return fw;
 }
 
-static void e100_setup_ucode(struct nic *nic, struct cb *cb,
+static int e100_setup_ucode(struct nic *nic, struct cb *cb,
                             struct sk_buff *skb)
 {
        const struct firmware *fw = (void *)skb;
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
        cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
 
        cb->command = cpu_to_le16(cb_ucode | cb_el);
+       return 0;
 }
 
 static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
        return err;
 }
 
-static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
        struct sk_buff *skb)
 {
        cb->command = cpu_to_le16(cb_iaaddr);
        memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
+       return 0;
 }
 
-static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
        cb->command = cpu_to_le16(cb_dump);
        cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
                offsetof(struct mem, dump_buf));
+       return 0;
 }
 
 static int e100_phy_check_without_mii(struct nic *nic)
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
        return 0;
 }
 
-static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
        struct net_device *netdev = nic->netdev;
        struct netdev_hw_addr *ha;
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
                memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
                        ETH_ALEN);
        }
+       return 0;
 }
 
 static void e100_set_multicast_list(struct net_device *netdev)
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
                  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
 }
 
-static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
+static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
        struct sk_buff *skb)
 {
+       dma_addr_t dma_addr;
        cb->command = nic->tx_command;
 
+       dma_addr = pci_map_single(nic->pdev,
+                                 skb->data, skb->len, PCI_DMA_TODEVICE);
+       /* If we can't map the skb, have the upper layer try later */
+       if (pci_dma_mapping_error(nic->pdev, dma_addr))
+               return -ENOMEM;
+
        /*
         * Use the last 4 bytes of the SKB payload packet as the CRC, used for
         * testing, ie sending frames with bad CRC.
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
        cb->u.tcb.tcb_byte_count = 0;
        cb->u.tcb.threshold = nic->tx_threshold;
        cb->u.tcb.tbd_count = 1;
-       cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
-               skb->data, skb->len, PCI_DMA_TODEVICE));
-       /* check for mapping failure? */
+       cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
        cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
        skb_tx_timestamp(skb);
+       return 0;
 }
 
 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
index cd345b8969bcaa75643a34da5819af5d569a1523..1e628ce572018b30dba025f2c39e3a631112abdf 100644 (file)
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
 
        netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
 
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->priv_flags |= IFF_UNICAST_FLT;
+
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register\n");
                goto err_deinit;
        }
 
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->priv_flags |= IFF_UNICAST_FLT;
-
        netdev_info(dev, "mac: %pM\n", dev->dev_addr);
 
        platform_set_drvdata(pdev, pp->dev);
index 1cd77483da50114c2c309ab70a69dff134c3a919..f5f0f09e4cc5e2160a9fc40c8fad83b5ab295287 100644 (file)
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
                        packet->trans_id;
 
                /* Notify the layer above us */
-               nvsc_packet->completion.send.send_completion(
-                       nvsc_packet->completion.send.send_completion_ctx);
+               if (nvsc_packet)
+                       nvsc_packet->completion.send.send_completion(
+                               nvsc_packet->completion.send.
+                               send_completion_ctx);
 
                num_outstanding_sends =
                        atomic_dec_return(&net_device->num_outstanding_sends);
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
        int ret = 0;
        struct nvsp_message sendMessage;
        struct net_device *ndev;
+       u64 req_id;
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
                0xFFFFFFFF;
        sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
 
+       if (packet->completion.send.send_completion)
+               req_id = (u64)packet;
+       else
+               req_id = 0;
+
        if (packet->page_buf_cnt) {
                ret = vmbus_sendpacket_pagebuffer(device->channel,
                                                  packet->page_buf,
                                                  packet->page_buf_cnt,
                                                  &sendMessage,
                                                  sizeof(struct nvsp_message),
-                                                 (unsigned long)packet);
+                                                 req_id);
        } else {
                ret = vmbus_sendpacket(device->channel, &sendMessage,
                                sizeof(struct nvsp_message),
-                               (unsigned long)packet,
+                               req_id,
                                VM_PKT_DATA_INBAND,
                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
        }
 
        if (ret == 0) {
index 5f85205cd12b81284ba7f23dfc89aa7ef50d3c10..8341b62e552166a4f796f7977b6f31a2b945bcdd 100644 (file)
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
 
        if (status == 1) {
                netif_carrier_on(net);
-               netif_wake_queue(net);
                ndev_ctx = netdev_priv(net);
                schedule_delayed_work(&ndev_ctx->dwork, 0);
                schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
        } else {
                netif_carrier_off(net);
-               netif_tx_disable(net);
        }
 }
 
index 2b657d4d63a8cc86950db8f50c805c3f96e017d1..0775f0aefd1e67715ab77750ac898660d3ca6267 100644 (file)
@@ -61,9 +61,6 @@ struct rndis_request {
 
 static void rndis_filter_send_completion(void *ctx);
 
-static void rndis_filter_send_request_completion(void *ctx);
-
-
 
 static struct rndis_device *get_rndis_device(void)
 {
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
                        packet->page_buf[0].len;
        }
 
-       packet->completion.send.send_completion_ctx = req;/* packet; */
-       packet->completion.send.send_completion =
-               rndis_filter_send_request_completion;
-       packet->completion.send.send_completion_tid = (unsigned long)dev;
+       packet->completion.send.send_completion = NULL;
 
        ret = netvsc_send(dev->net_dev->dev, packet);
        return ret;
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
        /* Pass it back to the original handler */
        filter_pkt->completion(filter_pkt->completion_ctx);
 }
-
-
-static void rndis_filter_send_request_completion(void *ctx)
-{
-       /* Noop */
-}
index 6e66f9c6782b966ac249b4bd31ecc25e6a9456b2..988372d218a4dd24015a1b19d104839fab8f0080 100644 (file)
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
        if (r) {
                ath_err(common,
                        "Unable to reset channel, reset status %d\n", r);
+
+               ath9k_hw_enable_interrupts(ah);
+               ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
+
                goto out;
        }
 
index 4469321c0eb3ea3924f68b02ff6863516ec9b333..35fc68be158d5b548e5c273ee85f7e3bf057a162 100644 (file)
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
                goto err;
        }
 
-       /* External image takes precedence if specified */
        if (brcmf_sdbrcm_download_code_file(bus)) {
                brcmf_err("dongle image file download failed\n");
                goto err;
        }
 
-       /* External nvram takes precedence if specified */
-       if (brcmf_sdbrcm_download_nvram(bus))
+       if (brcmf_sdbrcm_download_nvram(bus)) {
                brcmf_err("dongle nvram file download failed\n");
+               goto err;
+       }
 
        /* Take arm out of reset */
        if (brcmf_sdbrcm_download_state(bus, false)) {
index 2af9c0f0798d9e5cc2e484b0599a7d50aede68e5..ec46ffff54092b2a351720e214fb4d008c508947 100644 (file)
@@ -1891,8 +1891,10 @@ static s32
 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
              u8 key_idx, const u8 *mac_addr, struct key_params *params)
 {
+       struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_wsec_key key;
        s32 err = 0;
+       u8 keybuf[8];
 
        memset(&key, 0, sizeof(key));
        key.index = (u32) key_idx;
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
                brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
                memcpy(key.data, params->key, key.len);
 
-               if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
-                       u8 keybuf[8];
+               if ((ifp->vif->mode != WL_MODE_AP) &&
+                   (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+                       brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
                        memcpy(keybuf, &key.data[24], sizeof(keybuf));
                        memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
                        memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                break;
        case WLAN_CIPHER_SUITE_TKIP:
                if (ifp->vif->mode != WL_MODE_AP) {
-                       brcmf_dbg(CONN, "Swapping key\n");
+                       brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
                        memcpy(keybuf, &key.data[24], sizeof(keybuf));
                        memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
                        memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                err = -EAGAIN;
                goto done;
        }
-       switch (wsec & ~SES_OW_ENABLED) {
-       case WEP_ENABLED:
+       if (wsec & WEP_ENABLED) {
                sec = &profile->sec;
                if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
                        params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                        params.cipher = WLAN_CIPHER_SUITE_WEP104;
                        brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
                }
-               break;
-       case TKIP_ENABLED:
+       } else if (wsec & TKIP_ENABLED) {
                params.cipher = WLAN_CIPHER_SUITE_TKIP;
                brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
-               break;
-       case AES_ENABLED:
+       } else if (wsec & AES_ENABLED) {
                params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
                brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
-               break;
-       default:
+       } else  {
                brcmf_err("Invalid algo (0x%x)\n", wsec);
                err = -EINVAL;
                goto done;
@@ -3824,8 +3823,9 @@ exit:
 static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
-       s32 err = -EPERM;
+       s32 err;
        struct brcmf_fil_bss_enable_le bss_enable;
+       struct brcmf_join_params join_params;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
                /* Due to most likely deauths outstanding we sleep */
                /* first to make sure they get processed by fw. */
                msleep(400);
-               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
-               if (err < 0) {
-                       brcmf_err("setting AP mode failed %d\n", err);
-                       goto exit;
-               }
+
+               memset(&join_params, 0, sizeof(join_params));
+               err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+                                            &join_params, sizeof(join_params));
+               if (err < 0)
+                       brcmf_err("SET SSID error (%d)\n", err);
                err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
-               if (err < 0) {
+               if (err < 0)
                        brcmf_err("BRCMF_C_UP error %d\n", err);
-                       goto exit;
-               }
+               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
+               if (err < 0)
+                       brcmf_err("setting AP mode failed %d\n", err);
+               err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
+               if (err < 0)
+                       brcmf_err("setting INFRA mode failed %d\n", err);
        } else {
                bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
                bss_enable.enable = cpu_to_le32(0);
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
        set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
        clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
 
-exit:
        return err;
 }
 
index d215b4d3c51b57db90eee2293f6c153059fe037b..e7f6deaf715e5020c56685c1b8daf4d55ac9b945 100644 (file)
@@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                        queue_work(adapter->workqueue, &adapter->main_work);
 
                        /* Perform internal scan synchronously */
-                       if (!priv->scan_request)
+                       if (!priv->scan_request) {
+                               dev_dbg(adapter->dev, "wait internal scan\n");
                                mwifiex_wait_queue_complete(adapter, cmd_node);
+                       }
                } else {
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
@@ -1793,7 +1795,12 @@ check_next_scan:
                /* Need to indicate IOCTL complete */
                if (adapter->curr_cmd->wait_q_enabled) {
                        adapter->cmd_wait_q.status = 0;
-                       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+                       if (!priv->scan_request) {
+                               dev_dbg(adapter->dev,
+                                       "complete internal scan\n");
+                               mwifiex_complete_cmd(adapter,
+                                                    adapter->curr_cmd);
+                       }
                }
                if (priv->report_scan_result)
                        priv->report_scan_result = false;
index 2bf4efa331867acaea139bb81dcca75a66532ef4..76cd47eb901e089bbf58669c8eb5fc7da6631a7a 100644 (file)
@@ -20,6 +20,7 @@ if RT2X00
 config RT2400PCI
        tristate "Ralink rt2400 (PCI/PCMCIA) support"
        depends on PCI
+       select RT2X00_LIB_MMIO
        select RT2X00_LIB_PCI
        select EEPROM_93CX6
        ---help---
@@ -31,6 +32,7 @@ config RT2400PCI
 config RT2500PCI
        tristate "Ralink rt2500 (PCI/PCMCIA) support"
        depends on PCI
+       select RT2X00_LIB_MMIO
        select RT2X00_LIB_PCI
        select EEPROM_93CX6
        ---help---
@@ -43,6 +45,7 @@ config RT61PCI
        tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
        depends on PCI
        select RT2X00_LIB_PCI
+       select RT2X00_LIB_MMIO
        select RT2X00_LIB_FIRMWARE
        select RT2X00_LIB_CRYPTO
        select CRC_ITU_T
@@ -57,6 +60,7 @@ config RT2800PCI
        tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
        depends on PCI || SOC_RT288X || SOC_RT305X
        select RT2800_LIB
+       select RT2X00_LIB_MMIO
        select RT2X00_LIB_PCI if PCI
        select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
        select RT2X00_LIB_FIRMWARE
@@ -185,6 +189,9 @@ endif
 config RT2800_LIB
        tristate
 
+config RT2X00_LIB_MMIO
+       tristate
+
 config RT2X00_LIB_PCI
        tristate
        select RT2X00_LIB
index 349d5b8284a4f6c64faaa58d6b750864c3a8252e..f069d8bc5b678332151d7a6fe337f44e29205d82 100644 (file)
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
 rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS)    += rt2x00leds.o
 
 obj-$(CONFIG_RT2X00_LIB)               += rt2x00lib.o
+obj-$(CONFIG_RT2X00_LIB_MMIO)          += rt2x00mmio.o
 obj-$(CONFIG_RT2X00_LIB_PCI)           += rt2x00pci.o
 obj-$(CONFIG_RT2X00_LIB_SOC)           += rt2x00soc.o
 obj-$(CONFIG_RT2X00_LIB_USB)           += rt2x00usb.o
index 221beaaa83f12c59554302569639ca87f83765b5..dcfb54e0c516beb21faa4ae0b4409592b8455756 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 
 #include "rt2x00.h"
+#include "rt2x00mmio.h"
 #include "rt2x00pci.h"
 #include "rt2400pci.h"
 
index 39edc59e8d038613a07b4dce764e0033be2501cf..e1d2dc9ed28a6801c727b484a6e596e346d45175 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 
 #include "rt2x00.h"
+#include "rt2x00mmio.h"
 #include "rt2x00pci.h"
 #include "rt2500pci.h"
 
index ded73da4de0b0d52cd6331d837c8c68e86b87749..ba5a05625aaa70b5dcba84081a4bbea5a5970a9d 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/eeprom_93cx6.h>
 
 #include "rt2x00.h"
+#include "rt2x00mmio.h"
 #include "rt2x00pci.h"
 #include "rt2x00soc.h"
 #include "rt2800lib.h"
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
new file mode 100644 (file)
index 0000000..d84a680
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+       Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+       <http://rt2x00.serialmonkey.com>
+
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; either version 2 of the License, or
+       (at your option) any later version.
+
+       This program is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+       GNU General Public License for more details.
+
+       You should have received a copy of the GNU General Public License
+       along with this program; if not, write to the
+       Free Software Foundation, Inc.,
+       59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+       Module: rt2x00mmio
+       Abstract: rt2x00 generic mmio device routines.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+
+/*
+ * Register access.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+                          const unsigned int offset,
+                          const struct rt2x00_field32 field,
+                          u32 *reg)
+{
+       unsigned int i;
+
+       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+               return 0;
+
+       for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+               rt2x00pci_register_read(rt2x00dev, offset, reg);
+               if (!rt2x00_get_field32(*reg, field))
+                       return 1;
+               udelay(REGISTER_BUSY_DELAY);
+       }
+
+       printk_once(KERN_ERR "%s() Indirect register access failed: "
+             "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
+       *reg = ~0;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
+
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue = rt2x00dev->rx;
+       struct queue_entry *entry;
+       struct queue_entry_priv_pci *entry_priv;
+       struct skb_frame_desc *skbdesc;
+       int max_rx = 16;
+
+       while (--max_rx) {
+               entry = rt2x00queue_get_entry(queue, Q_INDEX);
+               entry_priv = entry->priv_data;
+
+               if (rt2x00dev->ops->lib->get_entry_state(entry))
+                       break;
+
+               /*
+                * Fill in desc fields of the skb descriptor
+                */
+               skbdesc = get_skb_frame_desc(entry->skb);
+               skbdesc->desc = entry_priv->desc;
+               skbdesc->desc_len = entry->queue->desc_size;
+
+               /*
+                * DMA is already done, notify rt2x00lib that
+                * it finished successfully.
+                */
+               rt2x00lib_dmastart(entry);
+               rt2x00lib_dmadone(entry);
+
+               /*
+                * Send the frame to rt2x00lib for further processing.
+                */
+               rt2x00lib_rxdone(entry, GFP_ATOMIC);
+       }
+
+       return !max_rx;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
+
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
+{
+       unsigned int i;
+
+       for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
+               msleep(10);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
+
+/*
+ * Device initialization handlers.
+ */
+static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+                                    struct data_queue *queue)
+{
+       struct queue_entry_priv_pci *entry_priv;
+       void *addr;
+       dma_addr_t dma;
+       unsigned int i;
+
+       /*
+        * Allocate DMA memory for descriptor and buffer.
+        */
+       addr = dma_alloc_coherent(rt2x00dev->dev,
+                                 queue->limit * queue->desc_size,
+                                 &dma, GFP_KERNEL);
+       if (!addr)
+               return -ENOMEM;
+
+       memset(addr, 0, queue->limit * queue->desc_size);
+
+       /*
+        * Initialize all queue entries to contain valid addresses.
+        */
+       for (i = 0; i < queue->limit; i++) {
+               entry_priv = queue->entries[i].priv_data;
+               entry_priv->desc = addr + i * queue->desc_size;
+               entry_priv->desc_dma = dma + i * queue->desc_size;
+       }
+
+       return 0;
+}
+
+static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+                                    struct data_queue *queue)
+{
+       struct queue_entry_priv_pci *entry_priv =
+           queue->entries[0].priv_data;
+
+       if (entry_priv->desc)
+               dma_free_coherent(rt2x00dev->dev,
+                                 queue->limit * queue->desc_size,
+                                 entry_priv->desc, entry_priv->desc_dma);
+       entry_priv->desc = NULL;
+}
+
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+       int status;
+
+       /*
+        * Allocate DMA
+        */
+       queue_for_each(rt2x00dev, queue) {
+               status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
+               if (status)
+                       goto exit;
+       }
+
+       /*
+        * Register interrupt handler.
+        */
+       status = request_irq(rt2x00dev->irq,
+                            rt2x00dev->ops->lib->irq_handler,
+                            IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+       if (status) {
+               ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
+                     rt2x00dev->irq, status);
+               goto exit;
+       }
+
+       return 0;
+
+exit:
+       queue_for_each(rt2x00dev, queue)
+               rt2x00pci_free_queue_dma(rt2x00dev, queue);
+
+       return status;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
+
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+
+       /*
+        * Free irq line.
+        */
+       free_irq(rt2x00dev->irq, rt2x00dev);
+
+       /*
+        * Free DMA
+        */
+       queue_for_each(rt2x00dev, queue)
+               rt2x00pci_free_queue_dma(rt2x00dev, queue);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
+
+/*
+ * rt2x00mmio module information.
+ */
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2x00 mmio library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
new file mode 100644 (file)
index 0000000..4ecaf60
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+       Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+       <http://rt2x00.serialmonkey.com>
+
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; either version 2 of the License, or
+       (at your option) any later version.
+
+       This program is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+       GNU General Public License for more details.
+
+       You should have received a copy of the GNU General Public License
+       along with this program; if not, write to the
+       Free Software Foundation, Inc.,
+       59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+       Module: rt2x00mmio
+       Abstract: Data structures for the rt2x00mmio module.
+ */
+
+#ifndef RT2X00MMIO_H
+#define RT2X00MMIO_H
+
+#include <linux/io.h>
+
+/*
+ * Register access.
+ */
+static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
+                                          const unsigned int offset,
+                                          u32 *value)
+{
+       *value = readl(rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
+                                               const unsigned int offset,
+                                               void *value, const u32 length)
+{
+       memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
+}
+
+static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
+                                           const unsigned int offset,
+                                           u32 value)
+{
+       writel(value, rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+                                                const unsigned int offset,
+                                                const void *value,
+                                                const u32 length)
+{
+       __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
+}
+
+/**
+ * rt2x00pci_regbusy_read - Read from register with busy check
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ * @offset: Register offset
+ * @field: Field to check if register is busy
+ * @reg: Pointer to where register contents should be stored
+ *
+ * This function will read the given register, and checks if the
+ * register is busy. If it is, it will sleep for a couple of
+ * microseconds before reading the register again. If the register
+ * is not read after a certain timeout, this function will return
+ * FALSE.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+                          const unsigned int offset,
+                          const struct rt2x00_field32 field,
+                          u32 *reg);
+
+/**
+ * struct queue_entry_priv_pci: Per entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor
+ * @desc_dma: DMA pointer to &desc.
+ * @data: Pointer to device's entry memory.
+ * @data_dma: DMA pointer to &data.
+ */
+struct queue_entry_priv_pci {
+       __le32 *desc;
+       dma_addr_t desc_dma;
+};
+
+/**
+ * rt2x00pci_rxdone - Handle RX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ *
+ * Returns true if there are still rx frames pending and false if all
+ * pending rx frames were processed.
+ */
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00pci_flush_queue - Flush data queue
+ * @queue: Data queue to stop
+ * @drop: True to drop all pending frames.
+ *
+ * This will wait for a maximum of 100ms, waiting for the queues
+ * to become empty.
+ */
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
+
+/*
+ * Device initialization handlers.
+ */
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2X00MMIO_H */
index a0c8caef3b0a8f3616a4dd4a71e6eebeb3b939ae..e87865e331139148c86a19b6efccf6efdaa329e2 100644 (file)
 #include "rt2x00.h"
 #include "rt2x00pci.h"
 
-/*
- * Register access.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
-                          const unsigned int offset,
-                          const struct rt2x00_field32 field,
-                          u32 *reg)
-{
-       unsigned int i;
-
-       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
-               return 0;
-
-       for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-               rt2x00pci_register_read(rt2x00dev, offset, reg);
-               if (!rt2x00_get_field32(*reg, field))
-                       return 1;
-               udelay(REGISTER_BUSY_DELAY);
-       }
-
-       ERROR(rt2x00dev, "Indirect register access failed: "
-             "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
-       *reg = ~0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
-
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue = rt2x00dev->rx;
-       struct queue_entry *entry;
-       struct queue_entry_priv_pci *entry_priv;
-       struct skb_frame_desc *skbdesc;
-       int max_rx = 16;
-
-       while (--max_rx) {
-               entry = rt2x00queue_get_entry(queue, Q_INDEX);
-               entry_priv = entry->priv_data;
-
-               if (rt2x00dev->ops->lib->get_entry_state(entry))
-                       break;
-
-               /*
-                * Fill in desc fields of the skb descriptor
-                */
-               skbdesc = get_skb_frame_desc(entry->skb);
-               skbdesc->desc = entry_priv->desc;
-               skbdesc->desc_len = entry->queue->desc_size;
-
-               /*
-                * DMA is already done, notify rt2x00lib that
-                * it finished successfully.
-                */
-               rt2x00lib_dmastart(entry);
-               rt2x00lib_dmadone(entry);
-
-               /*
-                * Send the frame to rt2x00lib for further processing.
-                */
-               rt2x00lib_rxdone(entry, GFP_ATOMIC);
-       }
-
-       return !max_rx;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
-
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
-{
-       unsigned int i;
-
-       for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
-               msleep(10);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
-
-/*
- * Device initialization handlers.
- */
-static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
-                                    struct data_queue *queue)
-{
-       struct queue_entry_priv_pci *entry_priv;
-       void *addr;
-       dma_addr_t dma;
-       unsigned int i;
-
-       /*
-        * Allocate DMA memory for descriptor and buffer.
-        */
-       addr = dma_alloc_coherent(rt2x00dev->dev,
-                                 queue->limit * queue->desc_size,
-                                 &dma, GFP_KERNEL);
-       if (!addr)
-               return -ENOMEM;
-
-       memset(addr, 0, queue->limit * queue->desc_size);
-
-       /*
-        * Initialize all queue entries to contain valid addresses.
-        */
-       for (i = 0; i < queue->limit; i++) {
-               entry_priv = queue->entries[i].priv_data;
-               entry_priv->desc = addr + i * queue->desc_size;
-               entry_priv->desc_dma = dma + i * queue->desc_size;
-       }
-
-       return 0;
-}
-
-static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
-                                    struct data_queue *queue)
-{
-       struct queue_entry_priv_pci *entry_priv =
-           queue->entries[0].priv_data;
-
-       if (entry_priv->desc)
-               dma_free_coherent(rt2x00dev->dev,
-                                 queue->limit * queue->desc_size,
-                                 entry_priv->desc, entry_priv->desc_dma);
-       entry_priv->desc = NULL;
-}
-
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       int status;
-
-       /*
-        * Allocate DMA
-        */
-       queue_for_each(rt2x00dev, queue) {
-               status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
-               if (status)
-                       goto exit;
-       }
-
-       /*
-        * Register interrupt handler.
-        */
-       status = request_irq(rt2x00dev->irq,
-                            rt2x00dev->ops->lib->irq_handler,
-                            IRQF_SHARED, rt2x00dev->name, rt2x00dev);
-       if (status) {
-               ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
-                     rt2x00dev->irq, status);
-               goto exit;
-       }
-
-       return 0;
-
-exit:
-       queue_for_each(rt2x00dev, queue)
-               rt2x00pci_free_queue_dma(rt2x00dev, queue);
-
-       return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
-
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-
-       /*
-        * Free irq line.
-        */
-       free_irq(rt2x00dev->irq, rt2x00dev);
-
-       /*
-        * Free DMA
-        */
-       queue_for_each(rt2x00dev, queue)
-               rt2x00pci_free_queue_dma(rt2x00dev, queue);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
-
 /*
  * PCI driver handlers.
  */
index e2c99f2b9a1422f5e1dd1af1116f447f8b43c199..60d90b20f8b99216a8dd19b4926364be65725b28 100644 (file)
  */
 #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
 
-/*
- * Register access.
- */
-static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
-                                          const unsigned int offset,
-                                          u32 *value)
-{
-       *value = readl(rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
-                                               const unsigned int offset,
-                                               void *value, const u32 length)
-{
-       memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
-}
-
-static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
-                                           const unsigned int offset,
-                                           u32 value)
-{
-       writel(value, rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
-                                                const unsigned int offset,
-                                                const void *value,
-                                                const u32 length)
-{
-       __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
-}
-
-/**
- * rt2x00pci_regbusy_read - Read from register with busy check
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- * @offset: Register offset
- * @field: Field to check if register is busy
- * @reg: Pointer to where register contents should be stored
- *
- * This function will read the given register, and checks if the
- * register is busy. If it is, it will sleep for a couple of
- * microseconds before reading the register again. If the register
- * is not read after a certain timeout, this function will return
- * FALSE.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
-                          const unsigned int offset,
-                          const struct rt2x00_field32 field,
-                          u32 *reg);
-
-/**
- * struct queue_entry_priv_pci: Per entry PCI specific information
- *
- * @desc: Pointer to device descriptor
- * @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
- */
-struct queue_entry_priv_pci {
-       __le32 *desc;
-       dma_addr_t desc_dma;
-};
-
-/**
- * rt2x00pci_rxdone - Handle RX done events
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- *
- * Returns true if there are still rx frames pending and false if all
- * pending rx frames were processed.
- */
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00pci_flush_queue - Flush data queue
- * @queue: Data queue to stop
- * @drop: True to drop all pending frames.
- *
- * This will wait for a maximum of 100ms, waiting for the queues
- * to become empty.
- */
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
-
-/*
- * Device initialization handlers.
- */
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
-
 /*
  * PCI driver handlers.
  */
index f95792cfcf8922e17222fec2feddc34f5117c308..9e3c8ff53e3fd9e0b4f723e96671eba005d75d62 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/eeprom_93cx6.h>
 
 #include "rt2x00.h"
+#include "rt2x00mmio.h"
 #include "rt2x00pci.h"
 #include "rt61pci.h"
 
index 8c0622399fcd67bd7353b6274a3d370b3d9caaa4..6ccb7457746b29e4e6ac0f53731bba725709f6a8 100644 (file)
@@ -769,6 +769,7 @@ struct qeth_card {
        unsigned long thread_start_mask;
        unsigned long thread_allowed_mask;
        unsigned long thread_running_mask;
+       struct task_struct *recovery_task;
        spinlock_t ip_lock;
        struct list_head ip_list;
        struct list_head *ip_tbd_list;
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;
 extern struct kmem_cache *qeth_core_header_cache;
 extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
 
+void qeth_set_recovery_task(struct qeth_card *);
+void qeth_clear_recovery_task(struct qeth_card *);
 void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
 int qeth_threads_running(struct qeth_card *, unsigned long);
 int qeth_wait_for_threads(struct qeth_card *, unsigned long);
index 0d73a999983d90ff632c0dcdcf6d54bedbd5a289..451f92020599e68add4a6b8a52888f672ad69084 100644 (file)
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
        return "n/a";
 }
 
+void qeth_set_recovery_task(struct qeth_card *card)
+{
+       card->recovery_task = current;
+}
+EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
+
+void qeth_clear_recovery_task(struct qeth_card *card)
+{
+       card->recovery_task = NULL;
+}
+EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
+
+static bool qeth_is_recovery_task(const struct qeth_card *card)
+{
+       return card->recovery_task == current;
+}
+
 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
                         int clear_start_mask)
 {
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
 
 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
 {
+       if (qeth_is_recovery_task(card))
+               return 0;
        return wait_event_interruptible(card->wait_q,
                        qeth_threads_running(card, threads) == 0);
 }
index d690166efeaffbd4d47f1d39d3111af232876e66..155b101bd7309432d32ce9ed915c1a3a1b889a60 100644 (file)
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
+       qeth_set_recovery_task(card);
        __qeth_l2_set_offline(card->gdev, 1);
        rc = __qeth_l2_set_online(card->gdev, 1);
        if (!rc)
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)
                dev_warn(&card->gdev->dev, "The qeth device driver "
                                "failed to recover an error on the device\n");
        }
+       qeth_clear_recovery_task(card);
        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
        qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        return 0;
index 8710337dab3eceb4e5fa692d6a39accd697ba508..1f7edf1b26c33bff2b6a9baa6b163311a79fad6f 100644 (file)
@@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
+       qeth_set_recovery_task(card);
        __qeth_l3_set_offline(card->gdev, 1);
        rc = __qeth_l3_set_online(card->gdev, 1);
        if (!rc)
@@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)
                dev_warn(&card->gdev->dev, "The qeth device driver "
                                "failed to recover an error on the device\n");
        }
+       qeth_clear_recovery_task(card);
        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
        qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        return 0;
index a044f593e8b98437e208cc1b785ca58371483890..d0fa4b6c551fd677bfa14252525717cf4f32469d 100644 (file)
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
                sdev->allow_restart = 1;
                blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
        }
-       scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
        spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
        return 0;
 }
 
index f328089a1060a95ac9872e6b480f5dfe3cb7e120..2197b57fb2251f541e0bfc0eab626ce670b66504 100644 (file)
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
                ipr_trace;
        }
 
-       list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        if (!ipr_is_naca_model(res))
                res->needs_sync_complete = 1;
 
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-       rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+       if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+               rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+       else
+               rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
        if (rc) {
                dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
                return rc;
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-       free_irq(pdev->irq, ioa_cfg);
+       if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+               free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
+       else
+               free_irq(pdev->irq, ioa_cfg);
 
        LEAVE;
 
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev)
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
        flush_work(&ioa_cfg->work_q);
+       INIT_LIST_HEAD(&ioa_cfg->used_res_q);
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
        spin_lock(&ipr_driver_lock);
index aec2e0da50164ca688cc9e2ea95355f0aeaf4669..55cbd018015997bba484bfe5ffdbacabcc7e43db 100644 (file)
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        linkrate  = phy->linkrate;
        memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
 
+       /* Handle vacant phy - rest of dr data is not valid so skip it */
+       if (phy->phy_state == PHY_VACANT) {
+               memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+               phy->attached_dev_type = NO_DEVICE;
+               if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+                       phy->phy_id = phy_id;
+                       goto skip;
+               } else
+                       goto out;
+       }
+
        phy->attached_dev_type = to_dev_type(dr);
        if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
                goto out;
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        phy->phy->maximum_linkrate = dr->pmax_linkrate;
        phy->phy->negotiated_linkrate = phy->linkrate;
 
+ skip:
        if (new_phy)
                if (sas_phy_add(phy->phy)) {
                        sas_phy_free(phy->phy);
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
        if (!disc_req)
                return -ENOMEM;
 
-       disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
+       disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
        if (!disc_resp) {
                kfree(disc_req);
                return -ENOMEM;
index 74b67d98e952b43fd980430b0fa0d9c37c55f741..d43faf34c1e29221ddc02b3aedcb2c566a0ed5e8 100644 (file)
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
        struct lpfc_rqe *temp_hrqe;
        struct lpfc_rqe *temp_drqe;
        struct lpfc_register doorbell;
-       int put_index = hq->host_index;
+       int put_index;
 
        /* sanity check on queue memory */
        if (unlikely(!hq) || unlikely(!dq))
                return -ENOMEM;
+       put_index = hq->host_index;
        temp_hrqe = hq->qe[hq->host_index].rqe;
        temp_drqe = dq->qe[dq->host_index].rqe;
 
index 1d82eef4e1ebf33f2abc59b5b56a9c3b00f7f907..b3db9dcc261957c88d975246dbac839a1f017805 100644 (file)
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
        }
 
-       /* No pending activities shall be there on the vha now */
-       if (ql2xextended_error_logging & ql_dbg_user)
-               msleep(random32()%10);  /* Just to see if something falls on
-                                       * the net we have placed below */
-
        BUG_ON(atomic_read(&vha->vref_count));
 
        qla2x00_free_fcports(vha);
index 1626de52e32a764ae370c7c3bccd3bc65c4a7162..fbc305f1c87c5dafc34e95c9c6aa8004699f3820 100644 (file)
@@ -15,6 +15,7 @@
  * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |
  * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
+ * |                              |                    | 0x1155-0x1158  |
  * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
  * |                              |                    | 0x2016         |
  * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
                void *ring;
        } aq, *aqp;
 
-       if (!ha->tgt.atio_q_length)
+       if (!ha->tgt.atio_ring)
                return ptr;
 
        num_queues = 1;
index c6509911772b5569e9de368bca1a995897359b43..65c5ff75936b1a31c7d37529a74558482168d8af 100644 (file)
@@ -863,7 +863,6 @@ typedef struct {
 #define        MBX_1           BIT_1
 #define        MBX_0           BIT_0
 
-#define RNID_TYPE_SET_VERSION  0x9
 #define RNID_TYPE_ASIC_TEMP    0xC
 
 /*
index eb3ca21a7f17b3c36d08ced7bb6cf0344a85f81e..b310fa97b545c97b81119271a3938ad1ed0c3c4e 100644 (file)
@@ -357,9 +357,6 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
 extern int
 qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
 
-extern int
-qla2x00_set_driver_version(scsi_qla_host_t *, char *);
-
 extern int
 qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
        uint16_t, uint16_t, uint16_t, uint16_t);
index edf4d14a13356f788bd3bbdcf35c245b3bbc440c..b59203393cb22f0a991d299fcc2eac285f7cac9e 100644 (file)
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                qla24xx_read_fcp_prio_cfg(vha);
 
-       qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
-
        return (rval);
 }
 
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                        mq_size += ha->max_rsp_queues *
                            (rsp->length * sizeof(response_t));
                }
-               if (ha->tgt.atio_q_length)
+               if (ha->tgt.atio_ring)
                        mq_size += ha->tgt.atio_q_length * sizeof(request_t);
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
index 186dd59ce4fa097d2cb051a38db5b49f507eb2b0..43345af56431d011850ad5568b5154179b4b3432 100644 (file)
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
        return rval;
 }
 
-int
-qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
-{
-       int rval;
-       mbx_cmd_t mc;
-       mbx_cmd_t *mcp = &mc;
-       int len;
-       uint16_t dwlen;
-       uint8_t *str;
-       dma_addr_t str_dma;
-       struct qla_hw_data *ha = vha->hw;
-
-       if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
-               return QLA_FUNCTION_FAILED;
-
-       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
-           "Entered %s.\n", __func__);
-
-       str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
-       if (!str) {
-               ql_log(ql_log_warn, vha, 0x1156,
-                   "Failed to allocate driver version param.\n");
-               return QLA_MEMORY_ALLOC_FAILED;
-       }
-
-       memcpy(str, "\x7\x3\x11\x0", 4);
-       dwlen = str[0];
-       len = dwlen * sizeof(uint32_t) - 4;
-       memset(str + 4, 0, len);
-       if (len > strlen(version))
-               len = strlen(version);
-       memcpy(str + 4, version, len);
-
-       mcp->mb[0] = MBC_SET_RNID_PARAMS;
-       mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
-       mcp->mb[2] = MSW(LSD(str_dma));
-       mcp->mb[3] = LSW(LSD(str_dma));
-       mcp->mb[6] = MSW(MSD(str_dma));
-       mcp->mb[7] = LSW(MSD(str_dma));
-       mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-       mcp->in_mb = MBX_0;
-       mcp->tov = MBX_TOV_SECONDS;
-       mcp->flags = 0;
-       rval = qla2x00_mailbox_command(vha, mcp);
-
-       if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x1157,
-                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
-       } else {
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
-                   "Done %s.\n", __func__);
-       }
-
-       dma_pool_free(ha->s_dma_pool, str, str_dma);
-
-       return rval;
-}
-
 static int
 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
 {
index 2b6e478d9e33769f881d77b659839b29ae409b64..ec54036d1e12061e4ceee6ee8124270cf97f03f7 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.04.00.08-k"
+#define QLA2XXX_VERSION      "8.04.00.13-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   4
index 86974471af68bf1a27f9f940243fcf044d599a08..2a32036a9404be10941a71ac0f23afe46ca449f9 100644 (file)
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev)
        tpnt->disk = disk;
        disk->private_data = &tpnt->driver;
        disk->queue = SDp->request_queue;
+       /* SCSI tape doesn't register this gendisk via add_disk().  Manually
+        * take queue reference that release_disk() expects. */
+       if (!blk_get_queue(disk->queue))
+               goto out_put_disk;
        tpnt->driver = &st_template;
 
        tpnt->device = SDp;
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev)
        idr_preload_end();
        if (error < 0) {
                pr_warn("st: idr allocation failed: %d\n", error);
-               goto out_put_disk;
+               goto out_put_queue;
        }
        tpnt->index = error;
        sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4211,6 +4215,8 @@ out_remove_devs:
        spin_lock(&st_index_lock);
        idr_remove(&st_index_idr, tpnt->index);
        spin_unlock(&st_index_lock);
+out_put_queue:
+       blk_put_queue(disk->queue);
 out_put_disk:
        put_disk(disk);
        kfree(tpnt);
index ff1c5ee352cb91107d61f09ae277a33acff9fa26..cbe48ab417459de02ec06660934ec8b3ff8804c0 100644 (file)
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby(
        case REPORT_LUNS:
        case RECEIVE_DIAGNOSTIC:
        case SEND_DIAGNOSTIC:
+               return 0;
        case MAINTENANCE_IN:
                switch (cdb[1] & 0x1f) {
                case MI_REPORT_TARGET_PGS:
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(
        switch (cdb[0]) {
        case INQUIRY:
        case REPORT_LUNS:
+               return 0;
        case MAINTENANCE_IN:
                switch (cdb[1] & 0x1f) {
                case MI_REPORT_TARGET_PGS:
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition(
        switch (cdb[0]) {
        case INQUIRY:
        case REPORT_LUNS:
+               return 0;
        case MAINTENANCE_IN:
                switch (cdb[1] & 0x1f) {
                case MI_REPORT_TARGET_PGS:
index 484b6a3c9b03714a3eae559e061f3767cdc3af72..302909ccf18383831e5f6091f167be354b4db989 100644 (file)
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
                                mxvar_sdriver, brd->idx + i, &pdev->dev);
                if (IS_ERR(tty_dev)) {
                        retval = PTR_ERR(tty_dev);
-                       for (i--; i >= 0; i--)
+                       for (; i > 0; i--)
                                tty_unregister_device(mxvar_sdriver,
-                                       brd->idx + i);
+                                       brd->idx + i - 1);
                        goto err_relbrd;
                }
        }
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
                        tty_dev = tty_port_register_device(&brd->ports[i].port,
                                        mxvar_sdriver, brd->idx + i, NULL);
                        if (IS_ERR(tty_dev)) {
-                               for (i--; i >= 0; i--)
+                               for (; i > 0; i--)
                                        tty_unregister_device(mxvar_sdriver,
-                                               brd->idx + i);
+                                               brd->idx + i - 1);
                                for (i = 0; i < brd->info->nports; i++)
                                        tty_port_destroy(&brd->ports[i].port);
                                free_irq(brd->irq, brd);
index b3455a970a1da01c0afcf013d8c31a49e1004005..35d9ab95c5cbb51d6092f67540678f00e4ee52bd 100644 (file)
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
 {
        struct uart_8250_port uart;
        int ret, line, flags = dev_id->driver_data;
-       struct resource *res = NULL;
 
        if (flags & UNKNOWN_DEV) {
                ret = serial_pnp_guess_board(dev);
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
        memset(&uart, 0, sizeof(uart));
        if (pnp_irq_valid(dev, 0))
                uart.port.irq = pnp_irq(dev, 0);
-       if ((flags & CIR_PORT) && pnp_port_valid(dev, 2))
-               res = pnp_get_resource(dev, IORESOURCE_IO, 2);
-       else if (pnp_port_valid(dev, 0))
-               res = pnp_get_resource(dev, IORESOURCE_IO, 0);
-       if (pnp_resource_enabled(res)) {
-               uart.port.iobase = res->start;
+       if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
+               uart.port.iobase = pnp_port_start(dev, 2);
+               uart.port.iotype = UPIO_PORT;
+       } else if (pnp_port_valid(dev, 0)) {
+               uart.port.iobase = pnp_port_start(dev, 0);
                uart.port.iotype = UPIO_PORT;
        } else if (pnp_mem_valid(dev, 0)) {
                uart.port.mapbase = pnp_mem_start(dev, 0);
index 4dc41408ecb7f659c70ee4daedd96148bb96382d..30d4f7a783cd1f0bbfdf6ebaf491f2400153eaf0 100644 (file)
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
        serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
        /* FIFO ENABLE, DMA MODE */
 
+       up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
+       /*
+        * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
+        * sets Enables the granularity of 1 for TRIGGER RX
+        * level. Along with setting RX FIFO trigger level
+        * to 1 (as noted below, 16 characters) and TLR[3:0]
+        * to zero this will result RX FIFO threshold level
+        * to 1 character, instead of 16 as noted in comment
+        * below.
+        */
+
        /* Set receive FIFO threshold to 16 characters and
         * transmit FIFO threshold to 16 spaces
         */
index 2968b4934659aab01dd626334a9ab7fc0e607383..957a0b98a5d90f689c68fb92ba76e60c72f89944 100644 (file)
@@ -74,9 +74,8 @@ enum {
 
 struct vhost_scsi {
        /* Protected by vhost_scsi->dev.mutex */
-       struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
+       struct tcm_vhost_tpg **vs_tpg;
        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
-       bool vs_endpoint;
 
        struct vhost_dev dev;
        struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        }
 }
 
+static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+       struct vhost_virtqueue *vq, int head, unsigned out)
+{
+       struct virtio_scsi_cmd_resp __user *resp;
+       struct virtio_scsi_cmd_resp rsp;
+       int ret;
+
+       memset(&rsp, 0, sizeof(rsp));
+       rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+       resp = vq->iov[out].iov_base;
+       ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+       if (!ret)
+               vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+       else
+               pr_err("Faulted on virtio_scsi_cmd_resp\n");
+}
+
 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
        struct vhost_virtqueue *vq)
 {
+       struct tcm_vhost_tpg **vs_tpg;
        struct virtio_scsi_cmd_req v_req;
        struct tcm_vhost_tpg *tv_tpg;
        struct tcm_vhost_cmd *tv_cmd;
@@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
        int head, ret;
        u8 target;
 
-       /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
-       if (unlikely(!vs->vs_endpoint))
+       /*
+        * We can handle the vq only after the endpoint is setup by calling the
+        * VHOST_SCSI_SET_ENDPOINT ioctl.
+        *
+        * TODO: Check that we are running from vhost_worker which acts
+        * as read-side critical section for vhost kind of RCU.
+        * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
+        */
+       vs_tpg = rcu_dereference_check(vq->private_data, 1);
+       if (!vs_tpg)
                return;
 
        mutex_lock(&vq->mutex);
@@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
 
                /* Extract the tpgt */
                target = v_req.lun[1];
-               tv_tpg = vs->vs_tpg[target];
+               tv_tpg = ACCESS_ONCE(vs_tpg[target]);
 
                /* Target does not exist, fail the request */
                if (unlikely(!tv_tpg)) {
-                       struct virtio_scsi_cmd_resp __user *resp;
-                       struct virtio_scsi_cmd_resp rsp;
-
-                       memset(&rsp, 0, sizeof(rsp));
-                       rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
-                       resp = vq->iov[out].iov_base;
-                       ret = __copy_to_user(resp, &rsp, sizeof(rsp));
-                       if (!ret)
-                               vhost_add_used_and_signal(&vs->dev,
-                                                         vq, head, 0);
-                       else
-                               pr_err("Faulted on virtio_scsi_cmd_resp\n");
-
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
 
@@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
                if (IS_ERR(tv_cmd)) {
                        vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
                                        PTR_ERR(tv_cmd));
-                       break;
+                       goto err_cmd;
                }
                pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
                        ": %d\n", tv_cmd, exp_data_len, data_direction);
 
                tv_cmd->tvc_vhost = vs;
                tv_cmd->tvc_vq = vq;
-
-               if (unlikely(vq->iov[out].iov_len !=
-                               sizeof(struct virtio_scsi_cmd_resp))) {
-                       vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
-                               " bytes, out: %d, in: %d\n",
-                               vq->iov[out].iov_len, out, in);
-                       break;
-               }
-
                tv_cmd->tvc_resp = vq->iov[out].iov_base;
 
                /*
@@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                                scsi_command_size(tv_cmd->tvc_cdb),
                                TCM_VHOST_MAX_CDB_SIZE);
-                       break; /* TODO */
+                       goto err_free;
                }
                tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
 
@@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
                                        data_direction == DMA_TO_DEVICE);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
-                               break; /* TODO */
+                               goto err_free;
                        }
                }
 
@@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
        }
 
        mutex_unlock(&vq->mutex);
+       return;
+
+err_free:
+       vhost_scsi_free_cmd(tv_cmd);
+err_cmd:
+       vhost_scsi_send_bad_target(vs, vq, head, out);
+       mutex_unlock(&vq->mutex);
 }
 
 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
        vhost_scsi_handle_vq(vs, vq);
 }
 
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+       vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+       int i;
+
+       for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+               vhost_scsi_flush_vq(vs, i);
+       vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+}
+
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(
 {
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
+       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_virtqueue *vq;
+       int index, ret, i, len;
        bool match = false;
-       int index, ret;
 
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
@@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(
                }
        }
 
+       len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
+       vs_tpg = kzalloc(len, GFP_KERNEL);
+       if (!vs_tpg) {
+               mutex_unlock(&vs->dev.mutex);
+               return -ENOMEM;
+       }
+       if (vs->vs_tpg)
+               memcpy(vs_tpg, vs->vs_tpg, len);
+
        mutex_lock(&tcm_vhost_mutex);
        list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
                mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(
                tv_tport = tv_tpg->tport;
 
                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
-                       if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
+                       if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
                                mutex_unlock(&tv_tpg->tv_tpg_mutex);
                                mutex_unlock(&tcm_vhost_mutex);
                                mutex_unlock(&vs->dev.mutex);
+                               kfree(vs_tpg);
                                return -EEXIST;
                        }
                        tv_tpg->tv_tpg_vhost_count++;
-                       vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+                       vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
                        smp_mb__after_atomic_inc();
                        match = true;
                }
@@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(
        if (match) {
                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
                       sizeof(vs->vs_vhost_wwpn));
-               vs->vs_endpoint = true;
+               for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+                       vq = &vs->vqs[i];
+                       /* Flushing the vhost_work acts as synchronize_rcu */
+                       mutex_lock(&vq->mutex);
+                       rcu_assign_pointer(vq->private_data, vs_tpg);
+                       vhost_init_used(vq);
+                       mutex_unlock(&vq->mutex);
+               }
                ret = 0;
        } else {
                ret = -EEXIST;
        }
 
+       /*
+        * Act as synchronize_rcu to make sure access to
+        * old vs->vs_tpg is finished.
+        */
+       vhost_scsi_flush(vs);
+       kfree(vs->vs_tpg);
+       vs->vs_tpg = vs_tpg;
+
        mutex_unlock(&vs->dev.mutex);
        return ret;
 }
@@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(
 {
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
+       struct vhost_virtqueue *vq;
+       bool match = false;
        int index, ret, i;
        u8 target;
 
@@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(
                        goto err_dev;
                }
        }
+
+       if (!vs->vs_tpg) {
+               mutex_unlock(&vs->dev.mutex);
+               return 0;
+       }
+
        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
                target = i;
-
                tv_tpg = vs->vs_tpg[target];
                if (!tv_tpg)
                        continue;
@@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(
                }
                tv_tpg->tv_tpg_vhost_count--;
                vs->vs_tpg[target] = NULL;
-               vs->vs_endpoint = false;
+               match = true;
                mutex_unlock(&tv_tpg->tv_tpg_mutex);
        }
+       if (match) {
+               for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+                       vq = &vs->vqs[i];
+                       /* Flushing the vhost_work acts as synchronize_rcu */
+                       mutex_lock(&vq->mutex);
+                       rcu_assign_pointer(vq->private_data, NULL);
+                       mutex_unlock(&vq->mutex);
+               }
+       }
+       /*
+        * Act as synchronize_rcu to make sure access to
+        * old vs->vs_tpg is finished.
+        */
+       vhost_scsi_flush(vs);
+       kfree(vs->vs_tpg);
+       vs->vs_tpg = NULL;
        mutex_unlock(&vs->dev.mutex);
+
        return 0;
 
 err_tpg:
@@ -899,6 +975,24 @@ err_dev:
        return ret;
 }
 
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+       if (features & ~VHOST_SCSI_FEATURES)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&vs->dev.mutex);
+       if ((features & (1 << VHOST_F_LOG_ALL)) &&
+           !vhost_log_access_ok(&vs->dev)) {
+               mutex_unlock(&vs->dev.mutex);
+               return -EFAULT;
+       }
+       vs->dev.acked_features = features;
+       smp_wmb();
+       vhost_scsi_flush(vs);
+       mutex_unlock(&vs->dev.mutex);
+       return 0;
+}
+
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *s;
@@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        return 0;
 }
 
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
-       vhost_poll_flush(&vs->dev.vqs[index].poll);
-}
-
-static void vhost_scsi_flush(struct vhost_scsi *vs)
-{
-       int i;
-
-       for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
-               vhost_scsi_flush_vq(vs, i);
-       vhost_work_flush(&vs->dev, &vs->vs_completion_work);
-}
-
-static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
-{
-       if (features & ~VHOST_SCSI_FEATURES)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&vs->dev.mutex);
-       if ((features & (1 << VHOST_F_LOG_ALL)) &&
-           !vhost_log_access_ok(&vs->dev)) {
-               mutex_unlock(&vs->dev.mutex);
-               return -EFAULT;
-       }
-       vs->dev.acked_features = features;
-       smp_wmb();
-       vhost_scsi_flush(vs);
-       mutex_unlock(&vs->dev.mutex);
-       return 0;
-}
-
 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
                                unsigned long arg)
 {
index 9fcc70c11ceae14b78052dbab2b4eaafdee553db..e89fc313397233bf80a7d210d8b0db2a36492314 100644 (file)
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
 
 config AT91RM9200_WATCHDOG
        tristate "AT91RM9200 watchdog"
-       depends on ARCH_AT91
+       depends on ARCH_AT91RM9200
        help
          Watchdog timer embedded into AT91RM9200 chips. This will reboot your
          system when the timeout is reached.
index aa85881d17b23f7ff6425be0140b6aed0c63c942..2647ad8e1f19c032eaa55a197dce9a5c215ddd8e 100644 (file)
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)
 {
        int start_word_idx, start_bit_idx;
        int word_idx, bit_idx;
-       int i;
+       int i, irq;
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void)
 
        do {
                xen_ulong_t pending_words;
+               xen_ulong_t pending_bits;
+               struct irq_desc *desc;
 
                vcpu_info->evtchn_upcall_pending = 0;
 
@@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void)
                 * selector flag. xchg_xen_ulong must contain an
                 * appropriate barrier.
                 */
+               if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
+                       int evtchn = evtchn_from_irq(irq);
+                       word_idx = evtchn / BITS_PER_LONG;
+                       pending_bits = evtchn % BITS_PER_LONG;
+                       if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
+                               desc = irq_to_desc(irq);
+                               if (desc)
+                                       generic_handle_irq_desc(irq, desc);
+                       }
+               }
+
                pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
 
                start_word_idx = __this_cpu_read(current_word_idx);
@@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void)
                word_idx = start_word_idx;
 
                for (i = 0; pending_words != 0; i++) {
-                       xen_ulong_t pending_bits;
                        xen_ulong_t words;
 
                        words = MASK_LSBS(pending_words, word_idx);
@@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void)
 
                        do {
                                xen_ulong_t bits;
-                               int port, irq;
-                               struct irq_desc *desc;
+                               int port;
 
                                bits = MASK_LSBS(pending_bits, bit_idx);
 
index 451fad96ecd115393207e69433162b7328f74923..ef96381569a4a1229b95f6b3470f232b3ebe8b0e 100644 (file)
@@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
        unsigned long src_ptr;
        unsigned long dst_ptr;
        int overwrite_root = 0;
+       bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
 
        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                overwrite_root = 1;
@@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 
        /* look for the key in the destination tree */
        ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+       if (ret < 0)
+               return ret;
+
        if (ret == 0) {
                char *src_copy;
                char *dst_copy;
@@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                        return 0;
                }
 
+               /*
+                * We need to load the old nbytes into the inode so when we
+                * replay the extents we've logged we get the right nbytes.
+                */
+               if (inode_item) {
+                       struct btrfs_inode_item *item;
+                       u64 nbytes;
+
+                       item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                             struct btrfs_inode_item);
+                       nbytes = btrfs_inode_nbytes(path->nodes[0], item);
+                       item = btrfs_item_ptr(eb, slot,
+                                             struct btrfs_inode_item);
+                       btrfs_set_inode_nbytes(eb, item, nbytes);
+               }
+       } else if (inode_item) {
+               struct btrfs_inode_item *item;
+
+               /*
+                * New inode, set nbytes to 0 so that the nbytes comes out
+                * properly when we replay the extents.
+                */
+               item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+               btrfs_set_inode_nbytes(eb, item, 0);
        }
 insert:
        btrfs_release_path(path);
@@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        int found_type;
        u64 extent_end;
        u64 start = key->offset;
-       u64 saved_nbytes;
+       u64 nbytes = 0;
        struct btrfs_file_extent_item *item;
        struct inode *inode = NULL;
        unsigned long size;
@@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        found_type = btrfs_file_extent_type(eb, item);
 
        if (found_type == BTRFS_FILE_EXTENT_REG ||
-           found_type == BTRFS_FILE_EXTENT_PREALLOC)
-               extent_end = start + btrfs_file_extent_num_bytes(eb, item);
-       else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+           found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+               nbytes = btrfs_file_extent_num_bytes(eb, item);
+               extent_end = start + nbytes;
+
+               /*
+                * We don't add to the inodes nbytes if we are prealloc or a
+                * hole.
+                */
+               if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
+                       nbytes = 0;
+       } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                size = btrfs_file_extent_inline_len(eb, item);
+               nbytes = btrfs_file_extent_ram_bytes(eb, item);
                extent_end = ALIGN(start + size, root->sectorsize);
        } else {
                ret = 0;
@@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       saved_nbytes = inode_get_bytes(inode);
        /* drop any overlapping extents */
        ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
        BUG_ON(ret);
@@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                BUG_ON(ret);
        }
 
-       inode_set_bytes(inode, saved_nbytes);
+       inode_add_bytes(inode, nbytes);
        ret = btrfs_update_inode(trans, root, inode);
 out:
        if (inode)
index 991c63c6bdd053189082c6208407a5df55b29053..21b3a291c327c1e3871d110725ed972ce8ab21ba 100644 (file)
@@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        }
                        break;
                case Opt_blank_pass:
-                       vol->password = NULL;
-                       break;
-               case Opt_pass:
                        /* passwords have to be handled differently
                         * to allow the character used for deliminator
                         * to be passed within them
                         */
 
+                       /*
+                        * Check if this is a case where the  password
+                        * starts with a delimiter
+                        */
+                       tmp_end = strchr(data, '=');
+                       tmp_end++;
+                       if (!(tmp_end < end && tmp_end[1] == delim)) {
+                               /* No it is not. Set the password to NULL */
+                               vol->password = NULL;
+                               break;
+                       }
+                       /* Yes it is. Drop down to Opt_pass below.*/
+               case Opt_pass:
                        /* Obtain the value string */
                        value = strchr(data, '=');
                        value++;
index f5f7c06c36fb159f89a793510c62f3f9170c3e4d..a898b3d43ccf3990986d51be3305bdc3e6d665ef 100644 (file)
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
                 * inode to the back of the list so we don't spin on it.
                 */
                if (!spin_trylock(&inode->i_lock)) {
-                       list_move_tail(&inode->i_lru, &sb->s_inode_lru);
+                       list_move(&inode->i_lru, &sb->s_inode_lru);
                        continue;
                }
 
index ac4fc9a8fdbc6c6133b186ddd5de476db9f03930..66b6664dcd4c4c9cd209d9420eb9de895cd1e344 100644 (file)
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                           struct rpc_cred *cred)
 {
        struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
-       struct nfs_client *pos, *n, *prev = NULL;
+       struct nfs_client *pos, *prev = NULL;
        struct nfs4_setclientid_res clid = {
                .clientid       = new->cl_clientid,
                .confirm        = new->cl_confirm,
@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new,
        int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
-       list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
+       list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos" */
-               if (pos->cl_cons_state < NFS_CS_READY)
+               if (pos->cl_cons_state > NFS_CS_READY) {
+                       atomic_inc(&pos->cl_count);
+                       spin_unlock(&nn->nfs_client_lock);
+
+                       if (prev)
+                               nfs_put_client(prev);
+                       prev = pos;
+
+                       status = nfs_wait_client_init_complete(pos);
+                       spin_lock(&nn->nfs_client_lock);
+                       if (status < 0)
+                               continue;
+               }
+               if (pos->cl_cons_state != NFS_CS_READY)
                        continue;
 
                if (pos->rpc_ops != new->rpc_ops)
@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
                           struct rpc_cred *cred)
 {
        struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
-       struct nfs_client *pos, *n, *prev = NULL;
+       struct nfs_client *pos, *prev = NULL;
        int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
-       list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
+       list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
                /* If "pos" isn't marked ready, we can't trust the
                 * remaining fields in "pos", especially the client
                 * ID and serverowner fields.  Wait for CREATE_SESSION
                 * to finish. */
-               if (pos->cl_cons_state < NFS_CS_READY) {
+               if (pos->cl_cons_state > NFS_CS_READY) {
                        atomic_inc(&pos->cl_count);
                        spin_unlock(&nn->nfs_client_lock);
 
@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
                                nfs_put_client(prev);
                        prev = pos;
 
-                       nfs4_schedule_lease_recovery(pos);
                        status = nfs_wait_client_init_complete(pos);
-                       if (status < 0) {
-                               nfs_put_client(pos);
-                               spin_lock(&nn->nfs_client_lock);
-                               continue;
+                       if (status == 0) {
+                               nfs4_schedule_lease_recovery(pos);
+                               status = nfs4_wait_clnt_recover(pos);
                        }
-                       status = pos->cl_cons_state;
                        spin_lock(&nn->nfs_client_lock);
                        if (status < 0)
                                continue;
                }
+               if (pos->cl_cons_state != NFS_CS_READY)
+                       continue;
 
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
@@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        continue;
 
                atomic_inc(&pos->cl_count);
-               spin_unlock(&nn->nfs_client_lock);
+               *result = pos;
+               status = 0;
                dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
                        __func__, pos, atomic_read(&pos->cl_count));
-
-               *result = pos;
-               return 0;
+               break;
        }
 
        /* No matching nfs_client found. */
        spin_unlock(&nn->nfs_client_lock);
        dprintk("NFS: <-- %s status = %d\n", __func__, status);
+       if (prev)
+               nfs_put_client(prev);
        return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
index 26431cf62ddbc393fd5fe1e432742be37d06e12e..0ad025eb523bc6b651d82497c29a0d27ebdf3dac 100644 (file)
@@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                /* Save the delegation */
                nfs4_stateid_copy(&stateid, &delegation->stateid);
                rcu_read_unlock();
+               nfs_release_seqid(opendata->o_arg.seqid);
                ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
                if (ret != 0)
                        goto out;
index 6ace365c6334db844af0c2c3f221cd871ef3658c..d41a3518509fa5a0cf1dbbdcbed87f39fa3eff45 100644 (file)
@@ -1886,7 +1886,13 @@ again:
                        status = PTR_ERR(clnt);
                        break;
                }
-               clp->cl_rpcclient = clnt;
+               /* Note: this is safe because we haven't yet marked the
+                * client as ready, so we are the only user of
+                * clp->cl_rpcclient
+                */
+               clnt = xchg(&clp->cl_rpcclient, clnt);
+               rpc_shutdown_client(clnt);
+               clnt = clp->cl_rpcclient;
                goto again;
 
        case -NFS4ERR_MINOR_VERS_MISMATCH:
index f7ed9ee46eb9d3818d2210c100731e5eda6ee35e..cbd0f1b324b972b96f036139fcd96bf53a03fb01 100644 (file)
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
        "x (dead)",             /*  64 */
        "K (wakekill)",         /* 128 */
        "W (waking)",           /* 256 */
+       "P (parked)",           /* 512 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
index 25f01d0bc149cc388ce5a8e66d556669809e2b34..b1b1fa6ffffecce461a44945524d60d9b46dd54c 100644 (file)
@@ -99,7 +99,12 @@ struct mmu_gather {
        unsigned int            need_flush : 1, /* Did free PTEs */
                                fast_mode  : 1; /* No batching   */
 
-       unsigned int            fullmm;
+       /* we are in the middle of an operation to clear
+        * a full mm and can make some optimizations */
+       unsigned int            fullmm : 1,
+       /* we have performed an operation which
+        * requires a complete flush of the tlb */
+                               need_flush_all : 1;
 
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
index 98503b7923698e3b527dc278aa32d5db1da1dfe1..d9a4f7f40f329b22c81f86e31c72b259d4acb4bf 100644 (file)
@@ -35,6 +35,7 @@ struct cpu_vfs_cap_data {
 #define _KERNEL_CAP_T_SIZE     (sizeof(kernel_cap_t))
 
 
+struct file;
 struct inode;
 struct dentry;
 struct user_namespace;
@@ -211,6 +212,7 @@ extern bool capable(int cap);
 extern bool ns_capable(struct user_namespace *ns, int cap);
 extern bool nsown_capable(int cap);
 extern bool inode_capable(const struct inode *inode, int cap);
+extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
index 167abf9078020a179143b38c129c8cca0706fa62..52da2a250795f3aefc3d2acc927a229f8a662d59 100644 (file)
@@ -396,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
                            size_t cnt, loff_t *ppos);
 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
                             size_t cnt, loff_t *ppos);
-loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
 int ftrace_regex_release(struct inode *inode, struct file *file);
 
 void __init
@@ -569,6 +568,8 @@ static inline int
 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
+
 /* totally disable ftrace - can not re-enable after this */
 void ftrace_kill(void);
 
index e19ff30ad0a21453cece0df7524d0b336a69078f..e2091b88d24c972be51d80e1f110ad03c50d8902 100644 (file)
@@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+
 
 struct page *follow_page_mask(struct vm_area_struct *vma,
                              unsigned long address, unsigned int foll_flags,
index d35d2b6ddbfb69f098b1660fe280b4d2da8a5ffe..e692a022527bdaaace8b388268b0c280946fc5c2 100644 (file)
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_DEAD              64
 #define TASK_WAKEKILL          128
 #define TASK_WAKING            256
-#define TASK_STATE_MAX         512
+#define TASK_PARKED            512
+#define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
index eee7478cda701ddeabc00150f607f368a4239350..032c366ef1c62b203134e386ad1bc8a92a6c31fe 100644 (file)
@@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     This hook can be used by the module to update any security state
  *     associated with the TUN device's security structure.
  *     @security pointer to the TUN devices's security structure.
+ * @skb_owned_by:
+ *     This hook sets the packet's owning sock.
+ *     @skb is the packet.
+ *     @sk the sock which owns the packet.
  *
  * Security hooks for XFRM operations.
  *
@@ -1638,6 +1642,7 @@ struct security_operations {
        int (*tun_dev_attach_queue) (void *security);
        int (*tun_dev_attach) (struct sock *sk, void *security);
        int (*tun_dev_open) (void *security);
+       void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);
 int security_tun_dev_attach(struct sock *sk, void *security);
 int security_tun_dev_open(void *security);
 
+void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
+
 #else  /* CONFIG_SECURITY_NETWORK */
 static inline int security_unix_stream_connect(struct sock *sock,
                                               struct sock *other,
@@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)
 {
        return 0;
 }
+
+static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+}
+
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index cc7c19732389dba327a92742de4cfeba96b5903e..714cc9a54a4c38575f15581cb379a20f6615b3e2 100644 (file)
@@ -130,6 +130,14 @@ struct iucv_sock {
                                               enum iucv_tx_notify n);
 };
 
+struct iucv_skb_cb {
+       u32     class;          /* target class of message */
+       u32     tag;            /* tag associated with message */
+       u32     offset;         /* offset for skb receival */
+};
+
+#define IUCV_SKB_CB(__skb)     ((struct iucv_skb_cb *)&((__skb)->cb[0]))
+
 /* iucv socket options (SOL_IUCV) */
 #define SO_IPRMDATA_MSG        0x0080          /* send/recv IPRM_DATA msgs */
 #define SO_MSGLIMIT    0x1000          /* get/set IUCV MSGLIMIT */
index 5a8671e8a67ff8fa8f715311300901181555d78e..e5586caff67a973c962a3ed6bf43d4cc01664083 100644 (file)
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "W" }) : "R",
+                               { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index 493d97259484cfef006c59b934be4fcf9663d1ac..f6c2ce5701e1c3c723d03e3d917c62f584c3a074 100644 (file)
@@ -392,6 +392,30 @@ bool ns_capable(struct user_namespace *ns, int cap)
 }
 EXPORT_SYMBOL(ns_capable);
 
+/**
+ * file_ns_capable - Determine if the file's opener had a capability in effect
+ * @file:  The file we want to check
+ * @ns:  The usernamespace we want the capability in
+ * @cap: The capability to be tested for
+ *
+ * Return true if task that opened the file had a capability in effect
+ * when the file was opened.
+ *
+ * This does not set PF_SUPERPRIV because the caller may not
+ * actually be privileged.
+ */
+bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
+{
+       if (WARN_ON_ONCE(!cap_valid(cap)))
+               return false;
+
+       if (security_capable(file->f_cred, ns, cap) == 0)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(file_ns_capable);
+
 /**
  * capable - Determine if the current task has a superior capability in effect
  * @cap: The capability to be tested for
index 59412d037eed2a81163483b89fb38d9feb30623a..7e0962ed7f8ac850092323e56cbcb73a5a399a13 100644 (file)
@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        } else {
                if (arch_vma_name(mmap_event->vma)) {
                        name = strncpy(tmp, arch_vma_name(mmap_event->vma),
-                                      sizeof(tmp));
+                                      sizeof(tmp) - 1);
+                       tmp[sizeof(tmp) - 1] = '\0';
                        goto got_name;
                }
 
@@ -5986,6 +5987,7 @@ skip_type:
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
 
+       ret = -ENOMEM;
        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
        if (!pmu->pmu_cpu_context)
                goto free_dev;
index d56a64c99a8b1ccf07d3ee252d73048181dcdfd9..eb675c4d59dfd440b9f93d92226ee2dc5ffe8290 100644 (file)
@@ -16,7 +16,7 @@ struct ring_buffer {
        int                             page_order;     /* allocation order  */
 #endif
        int                             nr_pages;       /* nr of data pages  */
-       int                             writable;       /* are we writable   */
+       int                             overwrite;      /* can overwrite itself */
 
        atomic_t                        poll;           /* POLL_ for wakeups */
 
index 23cb34ff3973c2dc7e4ce9cf41f744f93a36f83a..97fddb09762b0dc815af7d1f674993d87cd45574 100644 (file)
 static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
                              unsigned long offset, unsigned long head)
 {
-       unsigned long mask;
+       unsigned long sz = perf_data_size(rb);
+       unsigned long mask = sz - 1;
 
-       if (!rb->writable)
+       /*
+        * check if user-writable
+        * overwrite : over-write its own tail
+        * !overwrite: buffer possibly drops events.
+        */
+       if (rb->overwrite)
                return true;
 
-       mask = perf_data_size(rb) - 1;
+       /*
+        * verify that payload is not bigger than buffer
+        * otherwise masking logic may fail to detect
+        * the "not enough space" condition
+        */
+       if ((head - offset) > sz)
+               return false;
 
        offset = (offset - tail) & mask;
        head   = (head   - tail) & mask;
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
                rb->watermark = max_size / 2;
 
        if (flags & RING_BUFFER_WRITABLE)
-               rb->writable = 1;
+               rb->overwrite = 0;
+       else
+               rb->overwrite = 1;
 
        atomic_set(&rb->refcount, 1);
 
index cc47812d3feb0c86cbcf8f03e6ee81340ef7c57c..14be27feda491da1c3dc9990a5ae80ce649570aa 100644 (file)
@@ -63,6 +63,7 @@
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
 
+       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
        .clock_base =
        {
                {
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
        int i;
 
-       raw_spin_lock_init(&cpu_base->lock);
-
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                cpu_base->clock_base[i].cpu_base = cpu_base;
                timerqueue_init_head(&cpu_base->clock_base[i].active);
index 691dc2ef9baf241c121144799360a7e1237afcb5..9eb7fed0bbaa9895973a14e551c76de31fffe533 100644 (file)
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
 
 static void __kthread_parkme(struct kthread *self)
 {
-       __set_current_state(TASK_INTERRUPTIBLE);
+       __set_current_state(TASK_PARKED);
        while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
                if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
                        complete(&self->parked);
                schedule();
-               __set_current_state(TASK_INTERRUPTIBLE);
+               __set_current_state(TASK_PARKED);
        }
        clear_bit(KTHREAD_IS_PARKED, &self->flags);
        __set_current_state(TASK_RUNNING);
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 }
 EXPORT_SYMBOL(kthread_create_on_node);
 
-static void __kthread_bind(struct task_struct *p, unsigned int cpu)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 {
+       /* Must have done schedule() in kthread() before we set_task_cpu */
+       if (!wait_task_inactive(p, state)) {
+               WARN_ON(1);
+               return;
+       }
        /* It's safe because the task is inactive. */
        do_set_cpus_allowed(p, cpumask_of(cpu));
        p->flags |= PF_THREAD_BOUND;
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
  */
 void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
-       /* Must have done schedule() in kthread() before we set_task_cpu */
-       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-               WARN_ON(1);
-               return;
-       }
-       __kthread_bind(p, cpu);
+       __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
        return NULL;
 }
 
+static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
+{
+       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * We clear the IS_PARKED bit here as we don't wait
+        * until the task has left the park code. So if we'd
+        * park before that happens we'd see the IS_PARKED bit
+        * which might be about to be cleared.
+        */
+       if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+               if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+                       __kthread_bind(k, kthread->cpu, TASK_PARKED);
+               wake_up_state(k, TASK_PARKED);
+       }
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
 {
        struct kthread *kthread = task_get_live_kthread(k);
 
-       if (kthread) {
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
-               /*
-                * We clear the IS_PARKED bit here as we don't wait
-                * until the task has left the park code. So if we'd
-                * park before that happens we'd see the IS_PARKED bit
-                * which might be about to be cleared.
-                */
-               if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-                       if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
-                               __kthread_bind(k, kthread->cpu);
-                       wake_up_process(k);
-               }
-       }
+       if (kthread)
+               __kthread_unpark(k, kthread);
        put_task_struct(k);
 }
 
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
        trace_sched_kthread_stop(k);
        if (kthread) {
                set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
-               clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+               __kthread_unpark(k, kthread);
                wake_up_process(k);
                wait_for_completion(&kthread->exited);
        }
index c685e31492dfcd38ff830ffbc0aecbd77e29ee83..c3ae1446461c0ffc22f41cc03a7504a84576b48b 100644 (file)
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
        u64 this_clock, remote_clock;
        u64 *ptr, old_val, val;
 
+#if BITS_PER_LONG != 64
+again:
+       /*
+        * Careful here: The local and the remote clock values need to
+        * be read out atomic as we need to compare the values and
+        * then update either the local or the remote side. So the
+        * cmpxchg64 below only protects one readout.
+        *
+        * We must reread via sched_clock_local() in the retry case on
+        * 32bit as an NMI could use sched_clock_local() via the
+        * tracer and hit between the readout of
+        * the low32bit and the high 32bit portion.
+        */
+       this_clock = sched_clock_local(my_scd);
+       /*
+        * We must enforce atomic readout on 32bit, otherwise the
+        * update on the remote cpu can hit inbetween the readout of
+        * the low32bit and the high 32bit portion.
+        */
+       remote_clock = cmpxchg64(&scd->clock, 0, 0);
+#else
+       /*
+        * On 64bit the read of [my]scd->clock is atomic versus the
+        * update, so we can avoid the above 32bit dance.
+        */
        sched_clock_local(my_scd);
 again:
        this_clock = my_scd->clock;
        remote_clock = scd->clock;
+#endif
 
        /*
         * Use the opportunity that we have both locks
index 7f12624a393c3100506e5a3eaf588b5f463332ee..67d04651f44b294566422615b561310141f69b09 100644 (file)
@@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)
 {
        struct rq *rq = task_rq(p);
 
-       BUG_ON(rq != this_rq());
-       BUG_ON(p == current);
+       if (WARN_ON_ONCE(rq != this_rq()) ||
+           WARN_ON_ONCE(p == current))
+               return;
+
        lockdep_assert_held(&rq->lock);
 
        if (!raw_spin_trylock(&p->pi_lock)) {
@@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
 }
 
 static int min_load_idx = 0;
-static int max_load_idx = CPU_LOAD_IDX_MAX;
+static int max_load_idx = CPU_LOAD_IDX_MAX-1;
 
 static void
 set_table_entry(struct ctl_table *entry,
index ed12cbb135f4b8ded0d3c7be26a67d1df8d2ffc2..e93cca92f38b77cce7ce1f49491a393fb79ee346 100644 (file)
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 
        t = tsk;
        do {
-               task_cputime(tsk, &utime, &stime);
+               task_cputime(t, &utime, &stime);
                times->utime += utime;
                times->stime += stime;
                times->sum_exec_runtime += task_sched_runtime(t);
index 8eaed9aa9cf0c1995520605af1de8ef3b9e95485..02fc5c9336735a834378dbbb6fff8e57b919b0fa 100644 (file)
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
        }
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
-       if (ht->create)
-               ht->create(cpu);
+       if (ht->create) {
+               /*
+                * Make sure that the task has actually scheduled out
+                * into park position, before calling the create
+                * callback. At least the migration thread callback
+                * requires that the task is off the runqueue.
+                */
+               if (!wait_task_inactive(tsk, TASK_PARKED))
+                       WARN_ON(1);
+               else
+                       ht->create(cpu);
+       }
        return 0;
 }
 
index 39c9c4a2949f3166b114f11534957ff2f6060565..0da73cf73e60937ba8eb65a77f20febcb8264c45 100644 (file)
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd)
        system_state = SYSTEM_RESTART;
        usermodehelper_disable();
        device_shutdown();
-       syscore_shutdown();
 }
 
 /**
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd)
 {
        kernel_restart_prepare(cmd);
        disable_nonboot_cpus();
+       syscore_shutdown();
        if (!cmd)
                printk(KERN_EMERG "Restarting system.\n");
        else
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state)
 void kernel_halt(void)
 {
        kernel_shutdown_prepare(SYSTEM_HALT);
+       disable_nonboot_cpus();
        syscore_shutdown();
        printk(KERN_EMERG "System halted.\n");
        kmsg_dump(KMSG_DUMP_HALT);
index 7e897106b7e0f30ae5ff9b5609ba35a97b4e4957..b3fde6d7b7fc47683244a5a432ab35a543551499 100644 (file)
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
                free_page(tmp);
        }
 
-       free_page((unsigned long)stat->pages);
        stat->pages = NULL;
        stat->start = NULL;
 
@@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
+loff_t
+ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
+{
+       loff_t ret;
+
+       if (file->f_mode & FMODE_READ)
+               ret = seq_lseek(file, offset, whence);
+       else
+               file->f_pos = ret = 1;
+
+       return ret;
+}
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
  * routine, you can use ftrace_filter_write() for the write
  * routine if @flag has FTRACE_ITER_FILTER set, or
  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_regex_lseek() should be used as the lseek routine, and
+ * ftrace_filter_lseek() should be used as the lseek routine, and
  * release must call ftrace_regex_release().
  */
 int
@@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
                                 inode, file);
 }
 
-loff_t
-ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
-{
-       loff_t ret;
-
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, whence);
-       else
-               file->f_pos = ret = 1;
-
-       return ret;
-}
-
 static int ftrace_match(char *str, char *regex, int len, int type)
 {
        int matched = 0;
@@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
 
 static int __init set_ftrace_notrace(char *str)
 {
-       strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+       strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_notrace=", set_ftrace_notrace);
 
 static int __init set_ftrace_filter(char *str)
 {
-       strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+       strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_filter=", set_ftrace_filter);
@@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = ftrace_filter_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = seq_read,
        .write = ftrace_notrace_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = ftrace_filter_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = {
        .open           = ftrace_graph_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
+       .llseek         = ftrace_filter_lseek,
        .release        = ftrace_graph_release,
-       .llseek         = seq_lseek,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -4440,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = {
        .open           = ftrace_pid_open,
        .write          = ftrace_pid_write,
        .read           = seq_read,
-       .llseek         = seq_lseek,
+       .llseek         = ftrace_filter_lseek,
        .release        = ftrace_pid_release,
 };
 
index 7ba7fc76f9ebc655980b734391613184cbe22da3..66338c4f7f4bdb6142c2b21a99f14292c708d050 100644 (file)
@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
 
 static int __init set_cmdline_ftrace(char *str)
 {
-       strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
+       strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
        default_bootup_tracer = bootup_tracer_buf;
        /* We are using ftrace early, expand it */
        ring_buffer_expanded = 1;
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
 
 static int __init set_trace_boot_options(char *str)
 {
-       strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+       strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
        trace_boot_options = trace_boot_options_buf;
        return 0;
 }
index 42ca822fc7019670d8bea7291caea4110477749b..83a8b5b7bd35fa01a5e0302d83bf3d33d81f9557 100644 (file)
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
        .open = stack_trace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = ftrace_filter_lseek,
        .release = ftrace_regex_release,
 };
 
index e07ee1fcd6f19da086439ffbeacd8376e4b3e533..a65486613d79775bf87918b390d8edd608d55cef 100644 (file)
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
        return kobj;
 }
 
+static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
+{
+       if (!kref_get_unless_zero(&kobj->kref))
+               kobj = NULL;
+       return kobj;
+}
+
 /*
  * kobject_cleanup - free kobject resources.
  * @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
 
        list_for_each_entry(k, &kset->list, entry) {
                if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
-                       ret = kobject_get(k);
+                       ret = kobject_get_unless_zero(k);
                        break;
                }
        }
index 494526ae024a3173fc200d47e36087f6252852c2..ba94dec5b25900e47093544e11f8c765bf243f9d 100644 (file)
@@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
        tlb->mm = mm;
 
        tlb->fullmm     = fullmm;
+       tlb->need_flush_all = 0;
        tlb->start      = -1UL;
        tlb->end        = 0;
        tlb->need_flush = 0;
@@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+/**
+ * vm_iomap_memory - remap memory to userspace
+ * @vma: user vma to map to
+ * @start: start of area
+ * @len: size of area
+ *
+ * This is a simplified io_remap_pfn_range() for common driver use. The
+ * driver just needs to give us the physical memory range to be mapped,
+ * we'll figure out the rest from the vma information.
+ *
+ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
+ * whatever write-combining details or similar.
+ */
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
+{
+       unsigned long vm_len, pfn, pages;
+
+       /* Check that the physical memory area passed in looks valid */
+       if (start + len < start)
+               return -EINVAL;
+       /*
+        * You *really* shouldn't map things that aren't page-aligned,
+        * but we've historically allowed it because IO memory might
+        * just have smaller alignment.
+        */
+       len += start & ~PAGE_MASK;
+       pfn = start >> PAGE_SHIFT;
+       pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
+       if (pfn + pages < pfn)
+               return -EINVAL;
+
+       /* We start the mapping 'vm_pgoff' pages into the area */
+       if (vma->vm_pgoff > pages)
+               return -EINVAL;
+       pfn += vma->vm_pgoff;
+       pages -= vma->vm_pgoff;
+
+       /* Can we fit all of the mapping? */
+       vm_len = vma->vm_end - vma->vm_start;
+       if (vm_len >> PAGE_SHIFT > pages)
+               return -EINVAL;
+
+       /* Ok, let it rip */
+       return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_iomap_memory);
+
 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     unsigned long addr, unsigned long end,
                                     pte_fn_t fn, void *data)
index 7b491006eaf4000424282979b0c4709325102152..737bef59ce899adc0a3e0ba67ea3d56536e5b7e5 100644 (file)
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        struct sk_buff *skb;
        int copied, error = -EINVAL;
 
+       msg->msg_namelen = 0;
+
        if (sock->state != SS_CONNECTED)
                return -ENOTCONN;
 
index 7b11f8bc5071cba0de51054913fa5f3e8bb3f063..e277e38f736b93a133a6cd43cc7b264acfbad641 100644 (file)
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
                ax25_address src;
                const unsigned char *mac = skb_mac_header(skb);
 
+               memset(sax, 0, sizeof(struct full_sockaddr_ax25));
                ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
                                &digi, NULL, NULL);
                sax->sax25_family = AF_AX25;
index d3ee69b35a78267f35b8ea4a9b32d367160eea21..0d1b08cc76e1d73acba966e4c22f64c0285cce05 100644 (file)
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
+       msg->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
                if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                return err;
        }
 
-       msg->msg_namelen = 0;
-
        copied = skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
index c23bae86263b6e98a21a8f69ae45190b827b2376..7c9224bcce178c493ce0a19f3f8250b1828d39b8 100644 (file)
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
                rfcomm_dlc_accept(d);
+               msg->msg_namelen = 0;
                return 0;
        }
 
index fad0302bdb325e5df700edf4fe323c6226216599..fb6192c9812e81bcac3d8dea31f60095aa40ffda 100644 (file)
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
            test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
                hci_conn_accept(pi->conn->hcon, 0);
                sk->sk_state = BT_CONFIG;
+               msg->msg_namelen = 0;
 
                release_sock(sk);
                return 0;
index 095259f839023a99a2127d1d4173bb29737471df..ff2ff3ce6965a73dcf74918cd5c5bcc1b4a7d51b 100644 (file)
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (m->msg_flags&MSG_OOB)
                goto read_error;
 
+       m->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags, 0 , &ret);
        if (!skb)
                goto read_error;
index 2d117dc5ebea0e027e34e844dc7685720a93c033..117814a7e73c720b20026289f4349528e8ed6f6a 100644 (file)
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
                        if (gwj->src.dev == dev || gwj->dst.dev == dev) {
                                hlist_del(&gwj->list);
                                cgw_unregister_filter(gwj);
-                               kfree(gwj);
+                               kmem_cache_free(cgw_cache, gwj);
                        }
                }
        }
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
        hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
                hlist_del(&gwj->list);
                cgw_unregister_filter(gwj);
-               kfree(gwj);
+               kmem_cache_free(cgw_cache, gwj);
        }
 }
 
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 
                hlist_del(&gwj->list);
                cgw_unregister_filter(gwj);
-               kfree(gwj);
+               kmem_cache_free(cgw_cache, gwj);
                err = 0;
                break;
        }
index b65441da74abd85cc8deacc7650c583013e9dcfb..23854b51a259cb8b65c2bb1ee4be8e32e0ca0eb8 100644 (file)
@@ -1072,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
 
                if (tb[IFLA_EXT_MASK])
@@ -1922,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
 
-       if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
index 96083b7a436bc9efddd0b8b3473caa78286e5f02..c6287cd978c2db7b40f3aa3fe06b5d43bced64d5 100644 (file)
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)
 {
        unsigned long now, next, next_sec, next_sched;
        struct in_ifaddr *ifa;
+       struct hlist_node *n;
        int i;
 
        now = jiffies;
        next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 
-       rcu_read_lock();
        for (i = 0; i < IN4_ADDR_HSIZE; i++) {
+               bool change_needed = false;
+
+               rcu_read_lock();
                hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
                        unsigned long age;
 
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)
 
                        if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
                            age >= ifa->ifa_valid_lft) {
-                               struct in_ifaddr **ifap ;
-
-                               rtnl_lock();
-                               for (ifap = &ifa->ifa_dev->ifa_list;
-                                    *ifap != NULL; ifap = &ifa->ifa_next) {
-                                       if (*ifap == ifa)
-                                               inet_del_ifa(ifa->ifa_dev,
-                                                            ifap, 1);
-                               }
-                               rtnl_unlock();
+                               change_needed = true;
                        } else if (ifa->ifa_preferred_lft ==
                                   INFINITY_LIFE_TIME) {
                                continue;
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)
                                        next = ifa->ifa_tstamp +
                                               ifa->ifa_valid_lft * HZ;
 
-                               if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
-                                       ifa->ifa_flags |= IFA_F_DEPRECATED;
-                                       rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
-                               }
+                               if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+                                       change_needed = true;
                        } else if (time_before(ifa->ifa_tstamp +
                                               ifa->ifa_preferred_lft * HZ,
                                               next)) {
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)
                                       ifa->ifa_preferred_lft * HZ;
                        }
                }
+               rcu_read_unlock();
+               if (!change_needed)
+                       continue;
+               rtnl_lock();
+               hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
+                       unsigned long age;
+
+                       if (ifa->ifa_flags & IFA_F_PERMANENT)
+                               continue;
+
+                       /* We try to batch several events at once. */
+                       age = (now - ifa->ifa_tstamp +
+                              ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+
+                       if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
+                           age >= ifa->ifa_valid_lft) {
+                               struct in_ifaddr **ifap;
+
+                               for (ifap = &ifa->ifa_dev->ifa_list;
+                                    *ifap != NULL; ifap = &(*ifap)->ifa_next) {
+                                       if (*ifap == ifa) {
+                                               inet_del_ifa(ifa->ifa_dev,
+                                                            ifap, 1);
+                                               break;
+                                       }
+                               }
+                       } else if (ifa->ifa_preferred_lft !=
+                                  INFINITY_LIFE_TIME &&
+                                  age >= ifa->ifa_preferred_lft &&
+                                  !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
+                               ifa->ifa_flags |= IFA_F_DEPRECATED;
+                               rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
+                       }
+               }
+               rtnl_unlock();
        }
-       rcu_read_unlock();
 
        next_sec = round_jiffies_up(next);
        next_sched = next;
@@ -804,6 +830,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
                        return -EEXIST;
                ifa = ifa_existing;
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
+               cancel_delayed_work(&check_lifetime_work);
+               schedule_delayed_work(&check_lifetime_work, 0);
                rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
                blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
        }
index 5d0b4387cba6df401166a48f1e4cf3800d6ce6ff..b44cf81d817858737f81e880ac368f07d6ca48de 100644 (file)
@@ -2709,6 +2709,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        skb_reserve(skb, MAX_TCP_HEADER);
 
        skb_dst_set(skb, dst);
+       security_skb_owned_by(skb, sk);
 
        mss = dst_metric_advmss(dst);
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
index f6d629fd6aee152aaff674a03db7a3dd0f194134..46a5be85be87ebed43ec8b01d6b63263df556279 100644 (file)
@@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
                if (dst)
                        dst->ops->redirect(dst, sk, skb);
+               goto out;
        }
 
        if (type == ICMPV6_PKT_TOOBIG) {
index d28e7f014cc639779a4557203b4b2ba2f7e63927..e493b3397ae3337b4582d811febb932e0711a09e 100644 (file)
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
 
        IRDA_DEBUG(4, "%s()\n", __func__);
 
+       msg->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &err);
        if (!skb)
index a7d11ffe428428f25194702c5728562fd87a8e2b..206ce6db2c36a2ae44d694614f8be29317f5b74d 100644 (file)
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
 
 #define TRGCLS_SIZE    (sizeof(((struct iucv_message *)0)->class))
 
-/* macros to set/get socket control buffer at correct offset */
-#define CB_TAG(skb)    ((skb)->cb)             /* iucv message tag */
-#define CB_TAG_LEN     (sizeof(((struct iucv_message *) 0)->tag))
-#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
-#define CB_TRGCLS_LEN  (TRGCLS_SIZE)
-
 #define __iucv_sock_wait(sk, condition, timeo, ret)                    \
 do {                                                                   \
        DEFINE_WAIT(__wait);                                            \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        /* increment and save iucv message tag for msg_completion cbk */
        txmsg.tag = iucv->send_tag++;
-       memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+       IUCV_SKB_CB(skb)->tag = txmsg.tag;
 
        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
                atomic_inc(&iucv->msg_sent);
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
                        return -ENOMEM;
 
                /* copy target class to control buffer of new skb */
-               memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
+               IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
 
                /* copy data fragment */
                memcpy(nskb->data, skb->data + copied, size);
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
 
        /* store msg target class in the second 4 bytes of skb ctrl buffer */
        /* Note: the first 4 bytes are reserved for msg tag */
-       memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
+       IUCV_SKB_CB(skb)->class = msg->class;
 
        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
                }
        }
 
+       IUCV_SKB_CB(skb)->offset = 0;
        if (sock_queue_rcv_skb(sk, skb))
                skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
 }
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        unsigned int copied, rlen;
        struct sk_buff *skb, *rskb, *cskb;
        int err = 0;
+       u32 offset;
+
+       msg->msg_namelen = 0;
 
        if ((sk->sk_state == IUCV_DISCONN) &&
            skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                return err;
        }
 
-       rlen   = skb->len;              /* real length of skb */
+       offset = IUCV_SKB_CB(skb)->offset;
+       rlen   = skb->len - offset;             /* real length of skb */
        copied = min_t(unsigned int, rlen, len);
        if (!rlen)
                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
 
        cskb = skb;
-       if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
+       if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
                if (!(flags & MSG_PEEK))
                        skb_queue_head(&sk->sk_receive_queue, skb);
                return -EFAULT;
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
         * get the trgcls from the control buffer of the skb due to
         * fragmentation of original iucv message. */
        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
-                       CB_TRGCLS_LEN, CB_TRGCLS(skb));
+                      sizeof(IUCV_SKB_CB(skb)->class),
+                      (void *)&IUCV_SKB_CB(skb)->class);
        if (err) {
                if (!(flags & MSG_PEEK))
                        skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
                if (sk->sk_type == SOCK_STREAM) {
-                       skb_pull(skb, copied);
-                       if (skb->len) {
-                               skb_queue_head(&sk->sk_receive_queue, skb);
+                       if (copied < rlen) {
+                               IUCV_SKB_CB(skb)->offset = offset + copied;
                                goto done;
                        }
                }
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                spin_lock_bh(&iucv->message_q.lock);
                rskb = skb_dequeue(&iucv->backlog_skb_q);
                while (rskb) {
+                       IUCV_SKB_CB(rskb)->offset = 0;
                        if (sock_queue_rcv_skb(sk, rskb)) {
                                skb_queue_head(&iucv->backlog_skb_q,
                                                rskb);
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
+                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
        skb_reset_transport_header(skb);
        skb_reset_network_header(skb);
+       IUCV_SKB_CB(skb)->offset = 0;
        spin_lock(&iucv->message_q.lock);
        if (skb_queue_empty(&iucv->backlog_skb_q)) {
                if (sock_queue_rcv_skb(sk, skb)) {
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                /* fall through and receive zero length data */
        case 0:
                /* plain data frame */
-               memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
-                      CB_TRGCLS_LEN);
+               IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
index c74f5a91ff6a3213209c23ff488f0b475a552857..b8a6039314e868781d3130f06fbb78ced6c4a678 100644 (file)
@@ -690,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
                lsa->l2tp_flowinfo = 0;
                lsa->l2tp_scope_id = 0;
+               lsa->l2tp_conn_id = 0;
                if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
index 88709882c4641f7d147fbeaa9f48323cfc8637eb..48aaa89253e037c9b7f2e07f3bc1d03d837266fd 100644 (file)
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
        int target;     /* Read at least this many bytes */
        long timeo;
 
+       msg->msg_namelen = 0;
+
        lock_sock(sk);
        copied = -ENOTCONN;
        if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
index d1fa1d9ffd2e82450d1a6c73fbfb55419cebe0c5..103bd704b5fc39e24d07714dd6831ca2d3bf036a 100644 (file)
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
        }
 
        if (sax != NULL) {
+               memset(sax, 0, sizeof(*sax));
                sax->sax25_family = AF_NETROM;
                skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
                              AX25_ADDR_LEN);
index 8f025746f3370b4296c8064c603e06085adcb42e..6c94447ec414c87479581b45fbcced1071f782cb 100644 (file)
@@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        pr_debug("%p %zu\n", sk, len);
 
+       msg->msg_namelen = 0;
+
        lock_sock(sk);
 
        if (sk->sk_state == LLCP_CLOSED &&
@@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
 
+               memset(sockaddr, 0, sizeof(*sockaddr));
                sockaddr->sa_family = AF_NFC;
                sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
                sockaddr->dsap = ui_cb->dsap;
index cf68e6e4054a6735694b1ce5bbee30fefbb02899..9c834745159786e5e1d4338f51860be00943a0fd 100644 (file)
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
        skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        if (srose != NULL) {
+               memset(srose, 0, msg->msg_namelen);
                srose->srose_family = AF_ROSE;
                srose->srose_addr   = rose->dest_addr;
                srose->srose_call   = rose->dest_call;
index dcc446e7fbf6b6c0d639e2889438bc5f2aa65df2..d5f35f15af983e7a6955c576460a092ec70c0bcb 100644 (file)
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
        err = rpciod_up();
        if (err)
                goto out_no_rpciod;
-       err = -EINVAL;
-       if (!xprt)
-               goto out_no_xprt;
 
+       err = -EINVAL;
        if (args->version >= program->nrvers)
                goto out_err;
        version = program->version[args->version];
@@ -382,10 +380,9 @@ out_no_principal:
 out_no_stats:
        kfree(clnt);
 out_err:
-       xprt_put(xprt);
-out_no_xprt:
        rpciod_down();
 out_no_rpciod:
+       xprt_put(xprt);
        return ERR_PTR(err);
 }
 
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
        new = rpc_new_client(args, xprt);
        if (IS_ERR(new)) {
                err = PTR_ERR(new);
-               goto out_put;
+               goto out_err;
        }
 
        atomic_inc(&clnt->cl_count);
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
        new->cl_chatty = clnt->cl_chatty;
        return new;
 
-out_put:
-       xprt_put(xprt);
 out_err:
        dprintk("RPC:       %s: returned error %d\n", __func__, err);
        return ERR_PTR(err);
index a9622b6cd91689ae17ae92b9226c3705067a1a8a..515ce38e4f4c7286023f32f4c8c692af2787ec80 100644 (file)
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
        if (addr) {
                addr->family = AF_TIPC;
                addr->addrtype = TIPC_ADDR_ID;
+               memset(&addr->addr, 0, sizeof(addr->addr));
                addr->addr.id.ref = msg_origport(msg);
                addr->addr.id.node = msg_orignode(msg);
                addr->addr.name.domain = 0;     /* could leave uninitialized */
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
                goto exit;
        }
 
+       /* will be updated in set_orig_addr() if needed */
+       m->msg_namelen = 0;
+
        timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 restart:
 
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
                goto exit;
        }
 
+       /* will be updated in set_orig_addr() if needed */
+       m->msg_namelen = 0;
+
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
        timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
index d8079daf1bdeaed0f341aaac673c8b9e1da7d96b..7f93e2a42d7a57f0cbdbe2305c942cce7b544d1b 100644 (file)
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
        vsk = vsock_sk(sk);
        err = 0;
 
+       msg->msg_namelen = 0;
+
        lock_sock(sk);
 
        if (sk->sk_state != SS_CONNECTED) {
index 1f6508e249ae5934ce9257ebe5cc3e5941300ffa..5e04d3d96285daaa01ff8d35e58081ce4807f093 100644 (file)
@@ -1736,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
        if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
                return -EOPNOTSUPP;
 
+       msg->msg_namelen = 0;
+
        /* Retrieve the head sk_buff from the socket's receive queue. */
        err = 0;
        skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@@ -1768,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
        if (err)
                goto out;
 
-       msg->msg_namelen = 0;
        if (msg->msg_name) {
                struct sockaddr_vm *vm_addr;
 
index 09d994d192ffa1c78b963933daad73e4240dfe4b..482c70e7012727fe66e400872379e115b7a7caad 100644 (file)
@@ -224,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)
        rtnl_lock();
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                wdev_lock(wdev);
@@ -248,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)
                wdev_unlock(wdev);
        }
 
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        rtnl_unlock();
index 579775088967fdb11a044beb75c70de48d572ad6..6783c3e6c88e24fa5e2dacd07fd64ea5b61ac636 100644 (file)
@@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security)
 {
        return 0;
 }
+
+static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+}
+
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, tun_dev_open);
        set_to_cap_if_null(ops, tun_dev_attach_queue);
        set_to_cap_if_null(ops, tun_dev_attach);
+       set_to_cap_if_null(ops, skb_owned_by);
 #endif /* CONFIG_SECURITY_NETWORK */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        set_to_cap_if_null(ops, xfrm_policy_alloc_security);
index 7b88c6aeaed43e5f37b449fd8ec87610b3b69a93..03f248b84e9fd964a8c2ba706916ce3a4a9a3f34 100644 (file)
@@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security)
 }
 EXPORT_SYMBOL(security_tun_dev_open);
 
+void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+       security_ops->skb_owned_by(skb, sk);
+}
+
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index 2fa28c88900c1cb4ad7a1cc57e7b28421f4c1587..7171a957b9335694c1bbd1c5c9a41e2ec13bbda9 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/tty.h>
 #include <net/icmp.h>
 #include <net/ip.h>            /* for local_port_range[] */
+#include <net/sock.h>
 #include <net/tcp.h>           /* struct or_callable used in sock_rcv_skb */
 #include <net/net_namespace.h>
 #include <net/netlabel.h>
@@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
        selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
 }
 
+static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+       skb_set_owner_w(skb, sk);
+}
+
 static int selinux_secmark_relabel_packet(u32 sid)
 {
        const struct task_security_struct *__tsec;
@@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = {
        .tun_dev_attach_queue =         selinux_tun_dev_attach_queue,
        .tun_dev_attach =               selinux_tun_dev_attach,
        .tun_dev_open =                 selinux_tun_dev_open,
+       .skb_owned_by =                 selinux_skb_owned_by,
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        .xfrm_policy_alloc_security =   selinux_xfrm_policy_alloc,
index b82bbf584146dc0b7c1bbaeb416a96f03f0d67af..34d0201d6a78a86a5dd6f1568dd898919fb8b512 100644 (file)
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
                            struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = w->codec;
-       struct arizona *arizona = dev_get_drvdata(codec->dev);
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
        struct regmap *regmap = codec->control_data;
        const struct reg_default *patch = NULL;
        int i, patch_size;
index 134e41c870b9a0077ae364eef131e471d15c96bf..f8a31ad0b203bcb64a027960775c465974a56970 100644 (file)
@@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
        { "ROP", NULL, "Right Speaker PGA" },
        { "RON", NULL, "Right Speaker PGA" },
 
+       { "Charge Pump", NULL, "CLK_DSP" },
+
        { "Left Headphone Output PGA", NULL, "Charge Pump" },
        { "Right Headphone Output PGA", NULL, "Charge Pump" },
        { "Left Line Output PGA", NULL, "Charge Pump" },
index d7231e336a7c678e4b8b5f86ef0efa4a2eb04e8a..6bbeb0bf1a73a58fa9e3a573e00655dc3e8763d2 100644 (file)
@@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
 static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
 {
        struct i2s_dai *i2s;
+       int ret;
 
        i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);
        if (i2s == NULL)
@@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
                i2s->i2s_dai_drv.capture.channels_max = 2;
                i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
                i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
+               dev_set_drvdata(&i2s->pdev->dev, i2s);
        } else {        /* Create a new platform_device for Secondary */
-               i2s->pdev = platform_device_register_resndata(NULL,
-                               "samsung-i2s-sec", -1, NULL, 0, NULL, 0);
+               i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);
                if (IS_ERR(i2s->pdev))
                        return NULL;
-       }
 
-       /* Pre-assign snd_soc_dai_set_drvdata */
-       dev_set_drvdata(&i2s->pdev->dev, i2s);
+               platform_set_drvdata(i2s->pdev, i2s);
+               ret = platform_device_add(i2s->pdev);
+               if (ret < 0)
+                       return NULL;
+       }
 
        return i2s;
 }
@@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
 
        if (samsung_dai_type == TYPE_SEC) {
                sec_dai = dev_get_drvdata(&pdev->dev);
+               if (!sec_dai) {
+                       dev_err(&pdev->dev, "Unable to get drvdata\n");
+                       return -EFAULT;
+               }
                snd_soc_register_dai(&sec_dai->pdev->dev,
                        &sec_dai->i2s_dai_drv);
                asoc_dma_platform_register(&pdev->dev);
index b5b3db71e25303813bfe216e7f0c7020dcf97ff3..ed0bfb0ddb96ffb8bb0321810085b8f9409623a1 100644 (file)
@@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
        if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
                ret = platform->driver->compr_ops->set_params(cstream, params);
                if (ret < 0)
-                       goto out;
+                       goto err;
        }
 
        if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
                ret = rtd->dai_link->compr_ops->set_params(cstream);
                if (ret < 0)
-                       goto out;
+                       goto err;
        }
 
        snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
                                SND_SOC_DAPM_STREAM_START);
 
-out:
+       /* cancel any delayed stream shutdown that is pending */
+       rtd->pop_wait = 0;
+       mutex_unlock(&rtd->pcm_mutex);
+
+       cancel_delayed_work_sync(&rtd->delayed_work);
+
+       return ret;
+
+err:
        mutex_unlock(&rtd->pcm_mutex);
        return ret;
 }
index 507d251916af66ff81e0812714528ae02640d69e..ff4b45a5d796f0f16dcd09b7611ad6dc3bda5bb0 100644 (file)
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
        val = val << shift;
 
        ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
-       if (ret != 0)
+       if (ret < 0)
                return ret;
 
        if (snd_soc_volsw_is_stereo(mc)) {
index c925ab0adeb6b0a2750106518f825cfd7b1ef864..5e2c55c5b255920ae26f38b705662f31b785d512 100644 (file)
@@ -43,8 +43,6 @@
 static const struct snd_pcm_hardware tegra_pcm_hardware = {
        .info                   = SNDRV_PCM_INFO_MMAP |
                                  SNDRV_PCM_INFO_MMAP_VALID |
-                                 SNDRV_PCM_INFO_PAUSE |
-                                 SNDRV_PCM_INFO_RESUME |
                                  SNDRV_PCM_INFO_INTERLEAVED,
        .formats                = SNDRV_PCM_FMTBIT_S16_LE,
        .channels_min           = 2,
@@ -127,26 +125,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
        return 0;
 }
 
-static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               return snd_dmaengine_pcm_trigger(substream,
-                                       SNDRV_PCM_TRIGGER_START);
-
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               return snd_dmaengine_pcm_trigger(substream,
-                                       SNDRV_PCM_TRIGGER_STOP);
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
 static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
                                struct vm_area_struct *vma)
 {
@@ -164,7 +142,7 @@ static struct snd_pcm_ops tegra_pcm_ops = {
        .ioctl          = snd_pcm_lib_ioctl,
        .hw_params      = tegra_pcm_hw_params,
        .hw_free        = tegra_pcm_hw_free,
-       .trigger        = tegra_pcm_trigger,
+       .trigger        = snd_dmaengine_pcm_trigger,
        .pointer        = snd_dmaengine_pcm_pointer,
        .mmap           = tegra_pcm_mmap,
 };
index 497d2741d1192f7b5b8eea43d9347023faa220f2..ebe91440a068a8f500901652892d5a3575b68e63 100644 (file)
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
        else
                ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
                                  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
-                                 0, cpu_to_le16(wIndex),
+                                 0, wIndex,
                                  &tmp, sizeof(tmp), 1000);
        up_read(&mixer->chip->shutdown_rwsem);
 
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
        else
                ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
                                  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
-                                 cpu_to_le16(wValue), cpu_to_le16(wIndex),
+                                 wValue, wIndex,
                                  NULL, 0, 1000);
        up_read(&mixer->chip->shutdown_rwsem);
 
index 5325a3869bb7fbbb714bbb7d6bc894062e00b2b7..9c5ab22358b159f919612fd20779dc81bb19b7d2 100644 (file)
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
 {
        int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                                  0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                                 cpu_to_le16(1), 0, NULL, 0, 1000);
+                                 1, 0, NULL, 0, 1000);
 
        if (ret < 0)
                return ret;