]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'block/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 01:18:26 +0000 (12:18 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 01:18:26 +0000 (12:18 +1100)
1  2 
MAINTAINERS
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-sysfs.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/nvme/host/pci.c
fs/mpage.c

diff --combined MAINTAINERS
index baf1f077b25a25ce022c4558039a71172ca5abe3,d8be12c57f848255d55db7e18f6ab0057fe9811b..accfbfa673b1d78a628e4350a5c0ab2a25679f4e
@@@ -240,12 -240,6 +240,12 @@@ L:       lm-sensors@lm-sensors.or
  S:    Maintained
  F:    drivers/hwmon/abituguru3.c
  
 +ACCES 104-IDIO-16 GPIO DRIVER
 +M:    "William Breathitt Gray" <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-104-idio-16.c
 +
  ACENIC DRIVER
  M:    Jes Sorensen <jes@trained-monkey.org>
  L:    linux-acenic@sunsite.dk
@@@ -660,6 -654,11 +660,6 @@@ F:        drivers/gpu/drm/radeon/radeon_kfd.
  F:    drivers/gpu/drm/radeon/radeon_kfd.h
  F:    include/uapi/linux/kfd_ioctl.h
  
 -AMD MICROCODE UPDATE SUPPORT
 -M:    Borislav Petkov <bp@alien8.de>
 -S:    Maintained
 -F:    arch/x86/kernel/cpu/microcode/amd*
 -
  AMD XGBE DRIVER
  M:    Tom Lendacky <thomas.lendacky@amd.com>
  L:    netdev@vger.kernel.org
@@@ -789,11 -788,6 +789,11 @@@ S:       Maintaine
  F:    drivers/net/appletalk/
  F:    net/appletalk/
  
 +APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT
 +M:    Duc Dang <dhdang@apm.com>
 +S:    Supported
 +F:    arch/arm64/boot/dts/apm/
 +
  APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
  M:    Iyappan Subramanian <isubramanian@apm.com>
  M:    Keyur Chudgar <kchudgar@apm.com>
@@@ -828,13 -822,12 +828,13 @@@ F:      arch/arm/include/asm/floppy.
  
  ARM PMU PROFILING AND DEBUGGING
  M:    Will Deacon <will.deacon@arm.com>
 +R:    Mark Rutland <mark.rutland@arm.com>
  S:    Maintained
 -F:    arch/arm/kernel/perf_*
 +F:    arch/arm*/kernel/perf_*
  F:    arch/arm/oprofile/common.c
 -F:    arch/arm/kernel/hw_breakpoint.c
 -F:    arch/arm/include/asm/hw_breakpoint.h
 -F:    arch/arm/include/asm/perf_event.h
 +F:    arch/arm*/kernel/hw_breakpoint.c
 +F:    arch/arm*/include/asm/hw_breakpoint.h
 +F:    arch/arm*/include/asm/perf_event.h
  F:    drivers/perf/arm_pmu.c
  F:    include/linux/perf/arm_pmu.h
  
@@@ -901,12 -894,11 +901,12 @@@ M:      Lennert Buytenhek <kernel@wantstofly
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
 -ARM/Allwinner A1X SoC support
 +ARM/Allwinner sunXi SoC support
  M:    Maxime Ripard <maxime.ripard@free-electrons.com>
 +M:    Chen-Yu Tsai <wens@csie.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -N:    sun[x4567]i
 +N:    sun[x456789]i
  
  ARM/Allwinner SoC Clock Support
  M:    Emilio López <emilio@elopez.com.ar>
@@@ -925,7 -917,7 +925,7 @@@ M: Tsahee Zidenberg <tsahee@annapurnala
  S:    Maintained
  F:    arch/arm/mach-alpine/
  
 -ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
 +ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  M:    Alexandre Belloni <alexandre.belloni@free-electrons.com>
  M:    Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
@@@ -1238,13 -1230,6 +1238,13 @@@ ARM/LPC18XX ARCHITECTUR
  M:    Joachim Eastwood <manabian@gmail.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    arch/arm/boot/dts/lpc43*
 +F:    drivers/clk/nxp/clk-lpc18xx*
 +F:    drivers/clocksource/time-lpc32xx.c
 +F:    drivers/i2c/busses/i2c-lpc2k.c
 +F:    drivers/memory/pl172.c
 +F:    drivers/mtd/spi-nor/nxp-spifi.c
 +F:    drivers/rtc/rtc-lpc24xx.c
  N:    lpc18xx
  
  ARM/MAGICIAN MACHINE SUPPORT
@@@ -1459,12 -1444,7 +1459,12 @@@ F:    arch/arm/mach-exynos*
  F:    drivers/*/*s3c2410*
  F:    drivers/*/*/*s3c2410*
  F:    drivers/spi/spi-s3c*
 +F:    drivers/soc/samsung/*
  F:    sound/soc/samsung/*
 +F:    Documentation/arm/Samsung/
 +F:    Documentation/devicetree/bindings/arm/samsung/
 +F:    Documentation/devicetree/bindings/sram/samsung-sram.txt
 +F:    Documentation/devicetree/bindings/power/pd-samsung.txt
  N:    exynos
  
  ARM/SAMSUNG MOBILE MACHINE SUPPORT
@@@ -1499,14 -1479,6 +1499,14 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/platform/s5p-tv/
  
 +ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
 +M:    Andrzej Pietrasiewicz <andrzej.p@samsung.com>
 +M:    Jacek Anaszewski <j.anaszewski@samsung.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/platform/s5p-jpeg/
 +
  ARM/SHMOBILE ARM ARCHITECTURE
  M:    Simon Horman <horms@verge.net.au>
  M:    Magnus Damm <magnus.damm@gmail.com>
@@@ -1519,6 -1491,8 +1519,6 @@@ F:      arch/arm/boot/dts/emev2
  F:    arch/arm/boot/dts/r7s*
  F:    arch/arm/boot/dts/r8a*
  F:    arch/arm/boot/dts/sh*
 -F:    arch/arm/configs/bockw_defconfig
 -F:    arch/arm/configs/marzen_defconfig
  F:    arch/arm/configs/shmobile_defconfig
  F:    arch/arm/include/debug/renesas-scif.S
  F:    arch/arm/mach-shmobile/
@@@ -1553,7 -1527,6 +1553,7 @@@ W:      http://www.stlinux.co
  S:    Maintained
  F:    arch/arm/mach-sti/
  F:    arch/arm/boot/dts/sti*
 +F:    drivers/char/hw_random/st-rng.c
  F:    drivers/clocksource/arm_global_timer.c
  F:    drivers/clocksource/clksrc_st_lpc.c
  F:    drivers/i2c/busses/i2c-st.c
@@@ -1633,10 -1606,7 +1633,10 @@@ M:    Masahiro Yamada <yamada.masahiro@soc
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/boot/dts/uniphier*
 +F:    arch/arm/include/asm/hardware/cache-uniphier.h
  F:    arch/arm/mach-uniphier/
 +F:    arch/arm/mm/cache-uniphier.c
 +F:    drivers/i2c/busses/i2c-uniphier*
  F:    drivers/pinctrl/uniphier/
  F:    drivers/tty/serial/8250/8250_uniphier.c
  N:    uniphier
@@@ -1809,14 -1779,6 +1809,14 @@@ S:    Supporte
  F:    Documentation/aoe/
  F:    drivers/block/aoe/
  
 +ATHEROS 71XX/9XXX GPIO DRIVER
 +M:    Alban Bedel <albeu@free.fr>
 +W:    https://github.com/AlbanBedel/linux
 +T:    git git://github.com/AlbanBedel/linux
 +S:    Maintained
 +F:    drivers/gpio/gpio-ath79.c
 +F:    Documentation/devicetree/bindings/gpio/gpio-ath79.txt
 +
  ATHEROS ATH GENERIC UTILITIES
  M:    "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
  L:    linux-wireless@vger.kernel.org
@@@ -2398,27 -2360,19 +2398,27 @@@ L:   linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/bnx2i/
  
 -BROADCOM CYGNUS/IPROC ARM ARCHITECTURE
 +BROADCOM IPROC ARM ARCHITECTURE
  M:    Ray Jui <rjui@broadcom.com>
  M:    Scott Branden <sbranden@broadcom.com>
 +M:    Jon Mason <jonmason@broadcom.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/cygnus-linux.git
  S:    Maintained
  N:    iproc
  N:    cygnus
 +N:    nsp
  N:    bcm9113*
  N:    bcm9583*
 -N:    bcm583*
 +N:    bcm9585*
 +N:    bcm9586*
 +N:    bcm988312
  N:    bcm113*
 +N:    bcm583*
 +N:    bcm585*
 +N:    bcm586*
 +N:    bcm88312
  
  BROADCOM BRCMSTB GPIO DRIVER
  M:    Gregory Fong <gregory.0xf0@gmail.com>
@@@ -2776,10 -2730,9 +2776,10 @@@ S:    Supporte
  F:    drivers/net/ethernet/cisco/enic/
  
  CISCO VIC LOW LATENCY NIC DRIVER
 -M:    Upinder Malhi <umalhi@cisco.com>
 +M:    Christian Benvenuti <benve@cisco.com>
 +M:    Dave Goodell <dgoodell@cisco.com>
  S:    Supported
 -F:    drivers/infiniband/hw/usnic
 +F:    drivers/infiniband/hw/usnic/
  
  CIRRUS LOGIC EP93XX ETHERNET DRIVER
  M:    Hartley Sweeten <hsweeten@visionengravers.com>
@@@ -3414,7 -3367,6 +3414,7 @@@ M:      Support Opensource <support.opensour
  W:    http://www.dialog-semiconductor.com/products
  S:    Supported
  F:    Documentation/hwmon/da90??
 +F:    Documentation/devicetree/bindings/sound/da[79]*.txt
  F:    drivers/gpio/gpio-da90??.c
  F:    drivers/hwmon/da90??-hwmon.c
  F:    drivers/iio/adc/da91??-*.c
@@@ -3632,7 -3584,6 +3632,7 @@@ M:      Daniel Vetter <daniel.vetter@intel.c
  M:    Jani Nikula <jani.nikula@linux.intel.com>
  L:    intel-gfx@lists.freedesktop.org
  L:    dri-devel@lists.freedesktop.org
 +W:    https://01.org/linuxgraphics/
  Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  T:    git git://anongit.freedesktop.org/drm-intel
  S:    Supported
@@@ -3640,13 -3591,6 +3640,13 @@@ F:    drivers/gpu/drm/i915
  F:    include/drm/i915*
  F:    include/uapi/drm/i915*
  
 +DRM DRIVERS FOR ATMEL HLCDC
 +M:    Boris Brezillon <boris.brezillon@free-electrons.com>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Supported
 +F:    drivers/gpu/drm/atmel-hlcdc/
 +F:    Documentation/devicetree/bindings/drm/atmel/
 +
  DRM DRIVERS FOR EXYNOS
  M:    Inki Dae <inki.dae@samsung.com>
  M:    Joonyoung Shim <jy0922.shim@samsung.com>
@@@ -3675,14 -3619,6 +3675,14 @@@ S:    Maintaine
  F:    drivers/gpu/drm/imx/
  F:    Documentation/devicetree/bindings/drm/imx/
  
 +DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
 +M:    Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
 +L:    dri-devel@lists.freedesktop.org
 +T:    git git://github.com/patjak/drm-gma500
 +S:    Maintained
 +F:    drivers/gpu/drm/gma500
 +F:    include/drm/gma500*
 +
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
  M:    Terje Bergström <tbergstrom@nvidia.com>
@@@ -4067,7 -4003,7 +4067,7 @@@ S:      Maintaine
  F:    sound/usb/misc/ua101.c
  
  EXTENSIBLE FIRMWARE INTERFACE (EFI)
 -M:    Matt Fleming <matt.fleming@intel.com>
 +M:    Matt Fleming <matt@codeblueprint.co.uk>
  L:    linux-efi@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
  S:    Maintained
@@@ -4082,7 -4018,7 +4082,7 @@@ F:      include/linux/efi*.
  EFI VARIABLE FILESYSTEM
  M:    Matthew Garrett <matthew.garrett@nebula.com>
  M:    Jeremy Kerr <jk@ozlabs.org>
 -M:    Matt Fleming <matt.fleming@intel.com>
 +M:    Matt Fleming <matt@codeblueprint.co.uk>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
  L:    linux-efi@vger.kernel.org
  S:    Maintained
@@@ -4476,14 -4412,6 +4476,14 @@@ L:    linuxppc-dev@lists.ozlabs.or
  S:    Maintained
  F:    drivers/net/ethernet/freescale/ucc_geth*
  
 +FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
 +M:    Claudiu Manoil <claudiu.manoil@freescale.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ethernet/freescale/gianfar*
 +X:    drivers/net/ethernet/freescale/gianfar_ptp.c
 +F:    Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
 +
  FREESCALE QUICC ENGINE UCC UART DRIVER
  M:    Timur Tabi <timur@tabi.org>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -5161,7 -5089,6 +5161,7 @@@ S:      Maintaine
  F:    Documentation/devicetree/bindings/i2c/
  F:    Documentation/i2c/
  F:    drivers/i2c/
 +F:    drivers/i2c/*/
  F:    include/linux/i2c.h
  F:    include/linux/i2c-*.h
  F:    include/uapi/linux/i2c.h
@@@ -5503,6 -5430,12 +5503,6 @@@ W:     https://01.org/linux-acp
  S:    Supported
  F:    drivers/platform/x86/intel_menlow.c
  
 -INTEL IA32 MICROCODE UPDATE SUPPORT
 -M:    Borislav Petkov <bp@alien8.de>
 -S:    Maintained
 -F:    arch/x86/kernel/cpu/microcode/core*
 -F:    arch/x86/kernel/cpu/microcode/intel*
 -
  INTEL I/OAT DMA DRIVER
  M:    Dave Jiang <dave.jiang@intel.com>
  R:    Dan Williams <dan.j.williams@intel.com>
@@@ -5613,7 -5546,7 +5613,7 @@@ F:      drivers/net/wireless/iwlegacy
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Johannes Berg <johannes.berg@intel.com>
  M:    Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 -M:    Intel Linux Wireless <ilw@linux.intel.com>
 +M:    Intel Linux Wireless <linuxwifi@intel.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://intellinuxwireless.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
@@@ -6160,13 -6093,6 +6160,13 @@@ F:    Documentation/auxdisplay/ks010
  F:    drivers/auxdisplay/ks0108.c
  F:    include/linux/ks0108.h
  
 +L3MDEV
 +M:    David Ahern <dsa@cumulusnetworks.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    net/l3mdev
 +F:    include/net/l3mdev.h
 +
  LAPB module
  L:    linux-x25@vger.kernel.org
  S:    Orphan
@@@ -6317,6 -6243,14 +6317,14 @@@ F:    drivers/nvdimm/pmem.
  F:    include/linux/pmem.h
  F:    arch/*/include/asm/pmem.h
  
+ LIGHTNVM PLATFORM SUPPORT
+ M:    Matias Bjorling <mb@lightnvm.io>
+ W:    http://github/OpenChannelSSD
+ S:    Maintained
+ F:    drivers/lightnvm/
+ F:    include/linux/lightnvm.h
+ F:    include/uapi/linux/lightnvm.h
  LINUX FOR IBM pSERIES (RS/6000)
  M:    Paul Mackerras <paulus@au.ibm.com>
  W:    http://www.ibm.com/linux/ltc/projects/ppc
@@@ -6634,13 -6568,6 +6642,13 @@@ M:    Guenter Roeck <linux@roeck-us.net
  S:    Maintained
  F:    drivers/net/dsa/mv88e6352.c
  
 +MARVELL CRYPTO DRIVER
 +M:    Boris Brezillon <boris.brezillon@free-electrons.com>
 +M:    Arnaud Ebalard <arno@natisbad.org>
 +F:    drivers/crypto/marvell/
 +S:    Maintained
 +L:    linux-crypto@vger.kernel.org
 +
  MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
  M:    Mirko Lindner <mlindner@marvell.com>
  M:    Stephen Hemminger <stephen@networkplumber.org>
@@@ -6859,6 -6786,7 +6867,6 @@@ F:      drivers/scsi/megaraid
  
  MELLANOX ETHERNET DRIVER (mlx4_en)
  M:    Amir Vadai <amirv@mellanox.com>
 -M:    Ido Shamay <idos@mellanox.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  W:    http://www.mellanox.com
@@@ -7051,7 -6979,6 +7059,7 @@@ M:      Alan Ott <alan@signal11.us
  L:    linux-wpan@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ieee802154/mrf24j40.c
 +F:    Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
  
  MSI LAPTOP SUPPORT
  M:    "Lee, Chun-Yi" <jlee@suse.com>
@@@ -7386,6 -7313,7 +7394,6 @@@ S:      Odd Fixe
  F:    drivers/net/
  F:    include/linux/if_*
  F:    include/linux/netdevice.h
 -F:    include/linux/arcdevice.h
  F:    include/linux/etherdevice.h
  F:    include/linux/fcdevice.h
  F:    include/linux/fddidevice.h
@@@ -7524,11 -7452,13 +7532,13 @@@ F:   drivers/video/fbdev/riva
  F:    drivers/video/fbdev/nvidia/
  
  NVM EXPRESS DRIVER
- M:    Matthew Wilcox <willy@linux.intel.com>
+ M:    Keith Busch <keith.busch@intel.com>
+ M:    Jens Axboe <axboe@fb.com>
  L:    linux-nvme@lists.infradead.org
- T:    git git://git.infradead.org/users/willy/linux-nvme.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
+ W:    https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
  S:    Supported
- F:    drivers/block/nvme*
+ F:    drivers/nvme/host/
  F:    include/linux/nvme.h
  
  NVMEM FRAMEWORK
@@@ -8023,14 -7953,6 +8033,14 @@@ F:    include/linux/pci
  F:    arch/x86/pci/
  F:    arch/x86/kernel/quirks.c
  
 +PCI DRIVER FOR ALTERA PCIE IP
 +M:    Ley Foon Tan <lftan@altera.com>
 +L:    rfi@lists.rocketboards.org (moderated for non-subscribers)
 +L:    linux-pci@vger.kernel.org
 +S:    Supported
 +F:    Documentation/devicetree/bindings/pci/altera-pcie.txt
 +F:    drivers/pci/host/pcie-altera.c
 +
  PCI DRIVER FOR ARM VERSATILE PLATFORM
  M:    Rob Herring <robh@kernel.org>
  L:    linux-pci@vger.kernel.org
@@@ -8132,14 -8054,6 +8142,14 @@@ L:    linux-pci@vger.kernel.or
  S:    Maintained
  F:    drivers/pci/host/*spear*
  
 +PCI MSI DRIVER FOR ALTERA MSI IP
 +M:    Ley Foon Tan <lftan@altera.com>
 +L:    rfi@lists.rocketboards.org (moderated for non-subscribers)
 +L:    linux-pci@vger.kernel.org
 +S:    Supported
 +F:    Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
 +F:    drivers/pci/host/pcie-altera-msi.c
 +
  PCI MSI DRIVER FOR APPLIEDMICRO XGENE
  M:    Duc Dang <dhdang@apm.com>
  L:    linux-pci@vger.kernel.org
@@@ -8148,13 -8062,6 +8158,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
  F:    drivers/pci/host/pci-xgene-msi.c
  
 +PCIE DRIVER FOR HISILICON
 +M:    Zhou Wang <wangzhou1@hisilicon.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
 +F:    drivers/pci/host/pcie-hisi.c
 +
  PCMCIA SUBSYSTEM
  P:    Linux PCMCIA Team
  L:    linux-pcmcia@lists.infradead.org
@@@ -8261,13 -8168,6 +8271,13 @@@ L:    linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    drivers/pinctrl/pinctrl-at91.*
  
 +PIN CONTROLLER - ATMEL AT91 PIO4
 +M:    Ludovic Desroches <ludovic.desroches@atmel.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    linux-gpio@vger.kernel.org
 +S:    Supported
 +F:    drivers/pinctrl/pinctrl-at91-pio4.*
 +
  PIN CONTROLLER - INTEL
  M:    Mika Westerberg <mika.westerberg@linux.intel.com>
  M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@@ -8371,6 -8271,12 +8381,6 @@@ M:     "Rafael J. Wysocki" <rafael.j.wysock
  S:    Maintained
  F:    drivers/pnp/
  
 -PNXxxxx I2C DRIVER
 -M:    Vitaly Wool <vitalywool@gmail.com>
 -L:    linux-i2c@vger.kernel.org
 -S:    Maintained
 -F:    drivers/i2c/busses/i2c-pnx.c
 -
  PPP PROTOCOL DRIVERS AND COMPRESSORS
  M:    Paul Mackerras <paulus@samba.org>
  L:    linux-ppp@vger.kernel.org
@@@ -8623,16 -8529,6 +8633,16 @@@ L:    netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qlge/
  
 +QLOGIC QL4xxx ETHERNET DRIVER
 +M:    Yuval Mintz <Yuval.Mintz@qlogic.com>
 +M:    Ariel Elior <Ariel.Elior@qlogic.com>
 +M:    everest-linux-l2@qlogic.com
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ethernet/qlogic/qed/
 +F:    include/linux/qed/
 +F:    drivers/net/ethernet/qlogic/qede/
 +
  QNX4 FILESYSTEM
  M:    Anders Larsen <al@alarsen.net>
  W:    http://www.alarsen.net/linux/qnx4fs/
@@@ -8984,13 -8880,6 +8994,13 @@@ S:    Maintaine
  F:    drivers/net/wireless/rtlwifi/
  F:    drivers/net/wireless/rtlwifi/rtl8192ce/
  
 +RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 +M:    Jes Sorensen <Jes.Sorensen@redhat.com>
 +L:    linux-wireless@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
 +S:    Maintained
 +F:    drivers/net/wireless/realtek/rtl8xxxu/
 +
  S3 SAVAGE FRAMEBUFFER DRIVER
  M:    Antonino Daplas <adaplas@gmail.com>
  L:    linux-fbdev@vger.kernel.org
@@@ -9222,15 -9111,6 +9232,15 @@@ S: Supporte
  F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
  F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
  
 +SYNOPSYS DESIGNWARE I2C DRIVER
 +M:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 +M:    Jarkko Nikula <jarkko.nikula@linux.intel.com>
 +M:    Mika Westerberg <mika.westerberg@linux.intel.com>
 +L:    linux-i2c@vger.kernel.org
 +S:    Maintained
 +F:    drivers/i2c/busses/i2c-designware-*
 +F:    include/linux/platform_data/i2c-designware.h
 +
  SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
  M:    Seungwon Jeon <tgih.jun@samsung.com>
  M:    Jaehoon Chung <jh80.chung@samsung.com>
@@@ -9283,16 -9163,6 +9293,16 @@@ W:    http://www.sunplus.co
  S:    Supported
  F:    arch/score/
  
 +SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
 +M:    Sudeep Holla <sudeep.holla@arm.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/arm/arm,scpi.txt
 +F:    drivers/clk/clk-scpi.c
 +F:    drivers/cpufreq/scpi-cpufreq.c
 +F:    drivers/firmware/arm_scpi.c
 +F:    include/linux/scpi_protocol.h
 +
  SCSI CDROM DRIVER
  M:    Jens Axboe <axboe@kernel.dk>
  L:    linux-scsi@vger.kernel.org
@@@ -10054,6 -9924,7 +10064,6 @@@ S:    Maintaine
  F:    drivers/staging/lustre
  
  STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
 -M:    Julian Andres Klode <jak@jak-linux.org>
  M:    Marc Dietrich <marvin24@gmx.de>
  L:    ac100@lists.launchpad.net (moderated for non-subscribers)
  L:    linux-tegra@vger.kernel.org
@@@ -10208,7 -10079,6 +10218,7 @@@ F:   include/net/switchdev.
  
  SYNOPSYS ARC ARCHITECTURE
  M:    Vineet Gupta <vgupta@synopsys.com>
 +L:    linux-snps-arc@lists.infraded.org
  S:    Supported
  F:    arch/arc/
  F:    Documentation/devicetree/bindings/arc/*
@@@ -11210,12 -11080,6 +11220,12 @@@ S: Maintaine
  F:    Documentation/fb/uvesafb.txt
  F:    drivers/video/fbdev/uvesafb.*
  
 +VF610 NAND DRIVER
 +M:    Stefan Agner <stefan@agner.ch>
 +L:    linux-mtd@lists.infradead.org
 +S:    Supported
 +F:    drivers/mtd/nand/vf610_nfc.c
 +
  VFAT/FAT/MSDOS FILESYSTEM
  M:    OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
  S:    Maintained
@@@ -11246,12 -11110,6 +11256,12 @@@ S: Maintaine
  F:    drivers/media/v4l2-core/videobuf2-*
  F:    include/media/videobuf2-*
  
 +VIRTUAL SERIO DEVICE DRIVER
 +M:    Stephen Chandler Paul <thatslyude@gmail.com>
 +S:    Maintained
 +F:    drivers/input/serio/userio.c
 +F:    include/uapi/linux/userio.h
 +
  VIRTIO CONSOLE DRIVER
  M:    Amit Shah <amit.shah@redhat.com>
  L:    virtualization@lists.linux-foundation.org
@@@ -11417,6 -11275,7 +11427,6 @@@ M:   Shrijeet Mukherjee <shm@cumulusnetwo
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/vrf.c
 -F:    include/net/vrf.h
  F:    Documentation/networking/vrf.txt
  
  VT1211 HARDWARE MONITOR DRIVER
@@@ -11529,6 -11388,15 +11539,6 @@@ W:  http://oops.ghostprotocols.net:81/bl
  S:    Maintained
  F:    drivers/net/wireless/wl3501*
  
 -WM97XX TOUCHSCREEN DRIVERS
 -M:    Mark Brown <broonie@kernel.org>
 -M:    Liam Girdwood <lrg@slimlogic.co.uk>
 -L:    linux-input@vger.kernel.org
 -W:    https://github.com/CirrusLogic/linux-drivers/wiki
 -S:    Supported
 -F:    drivers/input/touchscreen/*wm97*
 -F:    include/linux/wm97xx.h
 -
  WOLFSON MICROELECTRONICS DRIVERS
  L:    patches@opensource.wolfsonmicro.com
  T:    git https://github.com/CirrusLogic/linux-drivers.git
@@@ -11603,11 -11471,6 +11613,11 @@@ L: linux-edac@vger.kernel.or
  S:    Maintained
  F:    arch/x86/kernel/cpu/mcheck/*
  
 +X86 MICROCODE UPDATE SUPPORT
 +M:    Borislav Petkov <bp@alien8.de>
 +S:    Maintained
 +F:    arch/x86/kernel/cpu/microcode/*
 +
  X86 VDSO
  M:    Andy Lutomirski <luto@amacapital.net>
  L:    linux-kernel@vger.kernel.org
@@@ -11808,7 -11671,6 +11818,7 @@@ F:   drivers/tty/serial/zs.
  ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
  M:    Minchan Kim <minchan@kernel.org>
  M:    Nitin Gupta <ngupta@vflare.org>
 +R:    Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/zsmalloc.c
diff --combined block/blk-core.c
index 18e92a6645e24741b786bc35f14b9d06f1355569,16bb626ff8c849b3f15886bdc26889beea3b3892..89eec79658702a7e53712bc52178dae25bddcc22
@@@ -554,29 -554,30 +554,30 @@@ void blk_cleanup_queue(struct request_q
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               spin_lock_irq(lock);
-       } else {
-               spin_lock_irq(lock);
+       blk_freeze_queue(q);
+       spin_lock_irq(lock);
+       if (!q->mq_ops)
                __blk_drain_queue(q, true);
-       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
  
+       /* for synchronous bio-based driver finish in-flight integrity i/o */
+       blk_flush_integrity();
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
  
        if (q->mq_ops)
                blk_mq_free_queue(q);
+       percpu_ref_exit(&q->q_usage_counter);
  
        spin_lock_irq(lock);
        if (q->queue_lock != &q->__queue_lock)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
  
 -      bdi_destroy(&q->backing_dev_info);
 +      bdi_unregister(&q->backing_dev_info);
  
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
@@@ -629,6 -630,40 +630,40 @@@ struct request_queue *blk_alloc_queue(g
  }
  EXPORT_SYMBOL(blk_alloc_queue);
  
+ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+ {
+       while (true) {
+               int ret;
+               if (percpu_ref_tryget_live(&q->q_usage_counter))
+                       return 0;
+               if (!(gfp & __GFP_WAIT))
+                       return -EBUSY;
+               ret = wait_event_interruptible(q->mq_freeze_wq,
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
+               if (blk_queue_dying(q))
+                       return -ENODEV;
+               if (ret)
+                       return ret;
+       }
+ }
+ void blk_queue_exit(struct request_queue *q)
+ {
+       percpu_ref_put(&q->q_usage_counter);
+ }
+ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+ {
+       struct request_queue *q =
+               container_of(ref, struct request_queue, q_usage_counter);
+       wake_up_all(&q->mq_freeze_wq);
+ }
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  {
        struct request_queue *q;
  
        init_waitqueue_head(&q->mq_freeze_wq);
  
-       if (blkcg_init_queue(q))
+       /*
+        * Init percpu_ref in atomic mode so that it's faster to shutdown.
+        * See blk_register_queue() for details.
+        */
+       if (percpu_ref_init(&q->q_usage_counter,
+                               blk_queue_usage_counter_release,
+                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
                goto fail_bdi;
  
+       if (blkcg_init_queue(q))
+               goto fail_ref;
        return q;
  
+ fail_ref:
+       percpu_ref_exit(&q->q_usage_counter);
  fail_bdi:
        bdi_destroy(&q->backing_dev_info);
  fail_split:
        return ret;
  }
  
+ unsigned int blk_plug_queued_count(struct request_queue *q)
+ {
+       struct blk_plug *plug;
+       struct request *rq;
+       struct list_head *plug_list;
+       unsigned int ret = 0;
+       plug = current->plug;
+       if (!plug)
+               goto out;
+       if (q->mq_ops)
+               plug_list = &plug->mq_list;
+       else
+               plug_list = &plug->list;
+       list_for_each_entry(rq, plug_list, queuelist) {
+               if (rq->q == q)
+                       ret++;
+       }
+ out:
+       return ret;
+ }
  void init_request_from_bio(struct request *req, struct bio *bio)
  {
        req->cmd_type = REQ_TYPE_FS;
@@@ -1641,9 -1711,11 +1711,11 @@@ static void blk_queue_bio(struct reques
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return;
+       if (!blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
  
        spin_lock_irq(q->queue_lock);
  
@@@ -1966,9 -2038,19 +2038,19 @@@ void generic_make_request(struct bio *b
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  
-               q->make_request_fn(q, bio);
+               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+                       q->make_request_fn(q, bio);
+                       blk_queue_exit(q);
  
-               bio = bio_list_pop(current->bio_list);
+                       bio = bio_list_pop(current->bio_list);
+               } else {
+                       struct bio *bio_next = bio_list_pop(current->bio_list);
+                       bio_io_error(bio);
+                       bio = bio_next;
+               }
        } while (bio);
        current->bio_list = NULL; /* deactivate */
  }
diff --combined block/blk-mq-tag.c
index ec2d11915142a8f9b7a49e839e41a2f54a55aa09,7a6b6e27fc26faca87db30c43b7caa6bd179b230..60ac684c8b8c52f26fe8a83d290fbd8f75f76581
@@@ -75,6 -75,10 +75,10 @@@ void blk_mq_tag_wakeup_all(struct blk_m
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
  
+       /*
+        * Make sure all changes prior to this are visible from other CPUs.
+        */
+       smp_mb();
        bt = &tags->bitmap_tags;
        wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
@@@ -641,7 -645,6 +645,7 @@@ void blk_mq_free_tags(struct blk_mq_tag
  {
        bt_free(&tags->bitmap_tags);
        bt_free(&tags->breserved_tags);
 +      free_cpumask_var(tags->cpumask);
        kfree(tags);
  }
  
diff --combined block/blk-mq.c
index 85f014327342efc775c31833a52f531b69a66329,70819b7b021161a4a806ac1dd1c191544061d340..1c27b3eaef645ab1ff12d93ac400fcbdd3ebb655
@@@ -9,6 -9,7 +9,7 @@@
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
+ #include <linux/kmemleak.h>
  #include <linux/mm.h>
  #include <linux/init.h>
  #include <linux/slab.h>
@@@ -77,47 -78,13 +78,13 @@@ static void blk_mq_hctx_clear_pending(s
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
  }
  
- static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
- {
-       while (true) {
-               int ret;
-               if (percpu_ref_tryget_live(&q->mq_usage_counter))
-                       return 0;
-               if (!(gfp & __GFP_WAIT))
-                       return -EBUSY;
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
-               if (blk_queue_dying(q))
-                       return -ENODEV;
-               if (ret)
-                       return ret;
-       }
- }
- static void blk_mq_queue_exit(struct request_queue *q)
- {
-       percpu_ref_put(&q->mq_usage_counter);
- }
- static void blk_mq_usage_counter_release(struct percpu_ref *ref)
- {
-       struct request_queue *q =
-               container_of(ref, struct request_queue, mq_usage_counter);
-       wake_up_all(&q->mq_freeze_wq);
- }
  void blk_mq_freeze_queue_start(struct request_queue *q)
  {
        int freeze_depth;
  
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
  }
@@@ -125,18 -92,34 +92,34 @@@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_s
  
  static void blk_mq_freeze_queue_wait(struct request_queue *q)
  {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  }
  
  /*
   * Guarantee no request is in use, so we can change any data structure of
   * the queue afterward.
   */
- void blk_mq_freeze_queue(struct request_queue *q)
+ void blk_freeze_queue(struct request_queue *q)
  {
+       /*
+        * In the !blk_mq case we are only calling this to kill the
+        * q_usage_counter, otherwise this increases the freeze depth
+        * and waits for it to return to zero.  For this reason there is
+        * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+        * exported to drivers as the only user for unfreeze is blk_mq.
+        */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
  }
+ void blk_mq_freeze_queue(struct request_queue *q)
+ {
+       /*
+        * ...just an alias to keep freeze and unfreeze actions balanced
+        * in the blk_mq_* namespace
+        */
+       blk_freeze_queue(q);
+ }
  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
  
  void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
-               percpu_ref_reinit(&q->mq_usage_counter);
+               percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
  }
@@@ -255,7 -238,7 +238,7 @@@ struct request *blk_mq_alloc_request(st
        struct blk_mq_alloc_data alloc_data;
        int ret;
  
-       ret = blk_mq_queue_enter(q, gfp);
+       ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
  
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
-               blk_mq_queue_exit(q);
+               blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@@ -297,7 -280,7 +280,7 @@@ static void __blk_mq_free_request(struc
  
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
-       blk_mq_queue_exit(q);
+       blk_queue_exit(q);
  }
  
  void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@@ -989,18 -972,25 +972,25 @@@ void blk_mq_delay_queue(struct blk_mq_h
  }
  EXPORT_SYMBOL(blk_mq_delay_queue);
  
- static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq, bool at_head)
+ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+                                           struct blk_mq_ctx *ctx,
+                                           struct request *rq,
+                                           bool at_head)
  {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
        trace_block_rq_insert(hctx->queue, rq);
  
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
+ }
+ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq, bool at_head)
+ {
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
  
+       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
  }
  
@@@ -1056,8 -1046,9 +1046,9 @@@ static void blk_mq_insert_requests(stru
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
-               __blk_mq_insert_request(hctx, rq, false);
+               __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
+       blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
  
        blk_mq_run_hw_queue(hctx, from_schedule);
@@@ -1139,7 -1130,7 +1130,7 @@@ static inline bool blk_mq_merge_queue_i
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
  {
-       if (!hctx_allow_merges(hctx)) {
+       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
  insert_rq:
@@@ -1176,11 -1167,7 +1167,7 @@@ static struct request *blk_mq_map_reque
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
  
-       if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-               bio_io_error(bio);
-               return NULL;
-       }
+       blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
  
@@@ -1267,9 -1254,12 +1254,12 @@@ static void blk_mq_make_request(struct 
  
        blk_queue_split(q, &bio, q->bio_split);
  
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
-               return;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count,
+                                          &same_queue_rq))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
  
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@@ -1376,7 -1366,7 +1366,7 @@@ static void blk_sq_make_request(struct 
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
-               if (list_empty(&plug->mq_list))
+               if (!request_count)
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
@@@ -1430,6 -1420,11 +1420,11 @@@ static void blk_mq_free_rq_map(struct b
        while (!list_empty(&tags->page_list)) {
                page = list_first_entry(&tags->page_list, struct page, lru);
                list_del_init(&page->lru);
+               /*
+                * Remove kmemleak object previously allocated in
+                * blk_mq_init_rq_map().
+                */
+               kmemleak_free(page_address(page));
                __free_pages(page, page->private);
        }
  
@@@ -1502,6 -1497,11 +1497,11 @@@ static struct blk_mq_tags *blk_mq_init_
                list_add_tail(&page->lru, &tags->page_list);
  
                p = page_address(page);
+               /*
+                * Allow kmemleak to scan these pages as they contain pointers
+                * to additional allocations like via ops->init_request().
+                */
+               kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
                entries_per_page = order_to_size(this_order) / rq_size;
                to_do = min(entries_per_page, set->queue_depth - i);
                left -= to_do * rq_size;
@@@ -1673,7 -1673,7 +1673,7 @@@ static int blk_mq_init_hctx(struct requ
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
-       hctx->flags = set->flags;
+       hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
  
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
@@@ -1860,27 -1860,26 +1860,26 @@@ static void blk_mq_map_swqueue(struct r
        }
  }
  
- static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
  {
        struct blk_mq_hw_ctx *hctx;
-       struct request_queue *q;
-       bool shared;
        int i;
  
-       if (set->tag_list.next == set->tag_list.prev)
-               shared = false;
-       else
-               shared = true;
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (shared)
+                       hctx->flags |= BLK_MQ_F_TAG_SHARED;
+               else
+                       hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+       }
+ }
+ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+ {
+       struct request_queue *q;
  
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_freeze_queue(q);
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       if (shared)
-                               hctx->flags |= BLK_MQ_F_TAG_SHARED;
-                       else
-                               hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
-               }
+               queue_set_hctx_shared(q, shared);
                blk_mq_unfreeze_queue(q);
        }
  }
@@@ -1891,7 -1890,12 +1890,12 @@@ static void blk_mq_del_queue_tag_set(st
  
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
-       blk_mq_update_tag_set_depth(set);
+       if (list_is_singular(&set->tag_list)) {
+               /* just transitioned to unshared */
+               set->flags &= ~BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, false);
+       }
        mutex_unlock(&set->tag_list_lock);
  }
  
@@@ -1901,8 -1905,17 +1905,17 @@@ static void blk_mq_add_queue_tag_set(st
        q->tag_set = set;
  
        mutex_lock(&set->tag_list_lock);
+       /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+       if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+               set->flags |= BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, true);
+       }
+       if (set->flags & BLK_MQ_F_TAG_SHARED)
+               queue_set_hctx_shared(q, true);
        list_add_tail(&q->tag_set_list, &set->tag_list);
-       blk_mq_update_tag_set_depth(set);
        mutex_unlock(&set->tag_list_lock);
  }
  
@@@ -1989,14 -2002,6 +2002,6 @@@ struct request_queue *blk_mq_init_alloc
                hctxs[i]->queue_num = i;
        }
  
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-        * See blk_register_queue() for details.
-        */
-       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
-                           PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_hctxs;
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
  
@@@ -2077,8 -2082,6 +2082,6 @@@ void blk_mq_free_queue(struct request_q
  
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
-       percpu_ref_exit(&q->mq_usage_counter);
  }
  
  /* Basically redo blk_mq_init_queue with queue frozen */
@@@ -2296,8 -2299,10 +2299,8 @@@ void blk_mq_free_tag_set(struct blk_mq_
        int i;
  
        for (i = 0; i < set->nr_hw_queues; i++) {
 -              if (set->tags[i]) {
 +              if (set->tags[i])
                        blk_mq_free_rq_map(set, set->tags[i], i);
 -                      free_cpumask_var(set->tags[i]->cpumask);
 -              }
        }
  
        kfree(set->tags);
diff --combined block/blk-sysfs.c
index 07b42f5ad797b7b5e6367d5c70051d3b34fca55c,61fc2633bbeabf25cb3292290f79c9d16fb089f3..31849e328b452a8fdd26d202a30edc86e6d451df
@@@ -540,7 -540,6 +540,7 @@@ static void blk_release_queue(struct ko
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
  
 +      bdi_exit(&q->backing_dev_info);
        blkcg_exit_queue(q);
  
        if (q->elevator) {
@@@ -600,9 -599,8 +600,8 @@@ int blk_register_queue(struct gendisk *
         */
        if (!blk_queue_init_done(q)) {
                queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               percpu_ref_switch_to_percpu(&q->q_usage_counter);
                blk_queue_bypass_end(q);
-               if (q->mq_ops)
-                       blk_mq_finish_init(q);
        }
  
        ret = blk_trace_init_sysfs(dev);
diff --combined drivers/md/dm.c
index 1b5c6047e4f19882fbbe9facbc29aeee54dc8723,f4d953e10e2f03b858b38a91ed3a48142205a54f..485760ebba762f41f4cf526cd880da93848bc47a
@@@ -1001,7 -1001,6 +1001,7 @@@ static void end_clone_bio(struct bio *c
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
        unsigned int nr_bytes = info->orig->bi_iter.bi_size;
 +      int error = clone->bi_error;
  
        bio_put(clone);
  
                 * the remainder.
                 */
                return;
 -      else if (bio->bi_error) {
 +      else if (error) {
                /*
                 * Don't notice the error to the upper layer yet.
                 * The error handling decision is made by the target driver,
                 * when the request is completed.
                 */
 -              tio->error = bio->bi_error;
 +              tio->error = error;
                return;
        }
  
@@@ -2234,8 -2233,6 +2234,6 @@@ static void cleanup_mapped_device(struc
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
                spin_unlock(&_minor_lock);
-               if (blk_get_integrity(md->disk))
-                       blk_integrity_unregister(md->disk);
                del_gendisk(md->disk);
                put_disk(md->disk);
        }
@@@ -2838,6 -2835,8 +2836,6 @@@ static void __dm_destroy(struct mapped_
  
        might_sleep();
  
 -      map = dm_get_live_table(md, &srcu_idx);
 -
        spin_lock(&_minor_lock);
        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
        set_bit(DMF_FREEING, &md->flags);
         * do not race with internal suspend.
         */
        mutex_lock(&md->suspend_lock);
 +      map = dm_get_live_table(md, &srcu_idx);
        if (!dm_suspended_md(md)) {
                dm_table_presuspend_targets(map);
                dm_table_postsuspend_targets(map);
        }
 -      mutex_unlock(&md->suspend_lock);
 -
        /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
        dm_put_live_table(md, srcu_idx);
 +      mutex_unlock(&md->suspend_lock);
  
        /*
         * Rare, but there may be I/O requests still going to complete,
diff --combined drivers/md/md.c
index 3fe3d04a968ad1ae5e148abbc93cde43be576104,714aa92db174b9457f2011fd220327d5a99ee67d..740ee9bc5ad34c7a00ea719c7ff29ae3a4052d9b
@@@ -1962,12 -1962,9 +1962,9 @@@ int md_integrity_register(struct mddev 
         * All component devices are integrity capable and have matching
         * profiles, register the common profile for the md device.
         */
-       if (blk_integrity_register(mddev->gendisk,
-                       bdev_get_integrity(reference->bdev)) != 0) {
-               printk(KERN_ERR "md: failed to register integrity for %s\n",
-                       mdname(mddev));
-               return -EINVAL;
-       }
+       blk_integrity_register(mddev->gendisk,
+                              bdev_get_integrity(reference->bdev));
        printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
        if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
                printk(KERN_ERR "md: failed to create integrity pool for %s\n",
@@@ -1997,6 -1994,7 +1994,7 @@@ void md_integrity_add_rdev(struct md_rd
        if (bi_rdev && blk_integrity_compare(mddev->gendisk,
                                             rdev->bdev->bd_disk) >= 0)
                return;
+       WARN_ON_ONCE(!mddev->suspended);
        printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
        blk_integrity_unregister(mddev->gendisk);
  }
@@@ -5542,7 -5540,6 +5540,6 @@@ static int do_md_stop(struct mddev *mdd
                if (mddev->hold_active == UNTIL_STOP)
                        mddev->hold_active = 0;
        }
-       blk_integrity_unregister(disk);
        md_new_event(mddev);
        sysfs_notify_dirent_safe(mddev->sysfs_state);
        return 0;
@@@ -8040,7 -8037,8 +8037,7 @@@ static int remove_and_add_spares(struc
                       !test_bit(Bitmap_sync, &rdev->flags)))
                        continue;
  
 -              if (rdev->saved_raid_disk < 0)
 -                      rdev->recovery_offset = 0;
 +              rdev->recovery_offset = 0;
                if (mddev->pers->
                    hot_add_disk(mddev, rdev) == 0) {
                        if (sysfs_link_rdev(mddev, rdev))
diff --combined drivers/md/raid1.c
index d9d031ede4bf5d73993a0fc607fab4274627c890,a881b111fa35d126b93142cc70cccceec6661dbd..33e59876678baab234174cf887ec7edcc7bbe5c7
@@@ -1621,7 -1621,9 +1621,9 @@@ static int raid1_add_disk(struct mddev 
                        break;
                }
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
@@@ -2195,7 -2197,7 +2197,7 @@@ static int narrow_write_error(struct r1
                bio_trim(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
 -              if (submit_bio_wait(WRITE, wbio) == 0)
 +              if (submit_bio_wait(WRITE, wbio) < 0)
                        /* failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@@ -2258,16 -2260,15 +2260,16 @@@ static void handle_write_finished(struc
                        rdev_dec_pending(conf->mirrors[m].rdev,
                                         conf->mddev);
                }
 -      if (test_bit(R1BIO_WriteError, &r1_bio->state))
 -              close_write(r1_bio);
        if (fail) {
                spin_lock_irq(&conf->device_lock);
                list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
                spin_unlock_irq(&conf->device_lock);
                md_wakeup_thread(conf->mddev->thread);
 -      } else
 +      } else {
 +              if (test_bit(R1BIO_WriteError, &r1_bio->state))
 +                      close_write(r1_bio);
                raid_end_bio_io(r1_bio);
 +      }
  }
  
  static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@@ -2383,13 -2384,9 +2385,13 @@@ static void raid1d(struct md_thread *th
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
 -                      r1_bio = list_first_entry(&conf->bio_end_io_list,
 -                                                struct r1bio, retry_list);
 +                      r1_bio = list_first_entry(&tmp, struct r1bio,
 +                                                retry_list);
                        list_del(&r1_bio->retry_list);
 +                      if (mddev->degraded)
 +                              set_bit(R1BIO_Degraded, &r1_bio->state);
 +                      if (test_bit(R1BIO_WriteError, &r1_bio->state))
 +                              close_write(r1_bio);
                        raid_end_bio_io(r1_bio);
                }
        }
diff --combined drivers/md/raid10.c
index 96f36596830696c2f1ba1bd8bc6fbb24d30a43d8,6f0ec107996a063f0220e27a23daf77026dea0d9..826210f095be8f1ce968f707e4d8cb00c01a0083
@@@ -39,7 -39,6 +39,7 @@@
   *    far_copies (stored in second byte of layout)
   *    far_offset (stored in bit 16 of layout )
   *    use_far_sets (stored in bit 17 of layout )
 + *    use_far_sets_bugfixed (stored in bit 18 of layout )
   *
   * The data to be stored is divided into chunks using chunksize.  Each device
   * is divided into far_copies sections.   In each section, chunks are laid out
@@@ -1498,8 -1497,6 +1498,8 @@@ static void status(struct seq_file *seq
                        seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
                else
                        seq_printf(seq, " %d far-copies", conf->geo.far_copies);
 +              if (conf->geo.far_set_size != conf->geo.raid_disks)
 +                      seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
        }
        seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
                                        conf->geo.raid_disks - mddev->degraded);
@@@ -1739,7 -1736,9 +1739,9 @@@ static int raid10_add_disk(struct mdde
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  
@@@ -2470,7 -2469,7 +2472,7 @@@ static int narrow_write_error(struct r1
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
 -              if (submit_bio_wait(WRITE, wbio) == 0)
 +              if (submit_bio_wait(WRITE, wbio) < 0)
                        /* Failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@@ -2657,17 -2656,16 +2659,17 @@@ static void handle_write_completed(stru
                                rdev_dec_pending(rdev, conf->mddev);
                        }
                }
 -              if (test_bit(R10BIO_WriteError,
 -                           &r10_bio->state))
 -                      close_write(r10_bio);
                if (fail) {
                        spin_lock_irq(&conf->device_lock);
                        list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
                        spin_unlock_irq(&conf->device_lock);
                        md_wakeup_thread(conf->mddev->thread);
 -              } else
 +              } else {
 +                      if (test_bit(R10BIO_WriteError,
 +                                   &r10_bio->state))
 +                              close_write(r10_bio);
                        raid_end_bio_io(r10_bio);
 +              }
        }
  }
  
@@@ -2692,15 -2690,9 +2694,15 @@@ static void raid10d(struct md_thread *t
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
 -                      r10_bio = list_first_entry(&conf->bio_end_io_list,
 -                                                struct r10bio, retry_list);
 +                      r10_bio = list_first_entry(&tmp, struct r10bio,
 +                                                 retry_list);
                        list_del(&r10_bio->retry_list);
 +                      if (mddev->degraded)
 +                              set_bit(R10BIO_Degraded, &r10_bio->state);
 +
 +                      if (test_bit(R10BIO_WriteError,
 +                                   &r10_bio->state))
 +                              close_write(r10_bio);
                        raid_end_bio_io(r10_bio);
                }
        }
@@@ -3397,7 -3389,7 +3399,7 @@@ static int setup_geo(struct geom *geo, 
                disks = mddev->raid_disks + mddev->delta_disks;
                break;
        }
 -      if (layout >> 18)
 +      if (layout >> 19)
                return -1;
        if (chunk < (PAGE_SIZE >> 9) ||
            !is_power_of_2(chunk))
        geo->near_copies = nc;
        geo->far_copies = fc;
        geo->far_offset = fo;
 -      geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
 +      switch (layout >> 17) {
 +      case 0: /* original layout.  simple but not always optimal */
 +              geo->far_set_size = disks;
 +              break;
 +      case 1: /* "improved" layout which was buggy.  Hopefully no-one is
 +               * actually using this, but leave code here just in case.*/
 +              geo->far_set_size = disks/fc;
 +              WARN(geo->far_set_size < fc,
 +                   "This RAID10 layout does not provide data safety - please backup and create new array\n");
 +              break;
 +      case 2: /* "improved" layout fixed to match documentation */
 +              geo->far_set_size = fc * nc;
 +              break;
 +      default: /* Not a valid layout */
 +              return -1;
 +      }
        geo->chunk_mask = chunk - 1;
        geo->chunk_shift = ffz(~chunk);
        return nc*fc;
diff --combined drivers/nvme/host/pci.c
index a2b3e40f1dc5a562ab512d044a8d72f068ae8c09,264716effc6ce5d449a794f4d4effec058351c15..9f4fe3a5f41e459624ee245874c9b2d50bad2404
@@@ -12,7 -12,6 +12,6 @@@
   * more details.
   */
  
- #include <linux/nvme.h>
  #include <linux/bitops.h>
  #include <linux/blkdev.h>
  #include <linux/blk-mq.h>
  #include <linux/slab.h>
  #include <linux/t10-pi.h>
  #include <linux/types.h>
+ #include <linux/pr.h>
  #include <scsi/sg.h>
 -#include <asm-generic/io-64-nonatomic-lo-hi.h>
 +#include <linux/io-64-nonatomic-lo-hi.h>
+ #include <asm/unaligned.h>
+ #include <uapi/linux/nvme_ioctl.h>
+ #include "nvme.h"
  
  #define NVME_MINORS           (1U << MINORBITS)
  #define NVME_Q_DEPTH          1024
@@@ -84,9 -88,10 +88,10 @@@ static wait_queue_head_t nvme_kthread_w
  
  static struct class *nvme_class;
  
- static void nvme_reset_failed_dev(struct work_struct *ws);
+ static int __nvme_reset(struct nvme_dev *dev);
  static int nvme_reset(struct nvme_dev *dev);
  static int nvme_process_cq(struct nvme_queue *nvmeq);
+ static void nvme_dead_ctrl(struct nvme_dev *dev);
  
  struct async_cmd_info {
        struct kthread_work work;
@@@ -535,7 -540,7 +540,7 @@@ static void nvme_dif_remap(struct reque
        virt = bip_get_seed(bip);
        phys = nvme_block_nr(ns, blk_rq_pos(req));
        nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-       ts = ns->disk->integrity->tuple_size;
+       ts = ns->disk->queue->integrity.tuple_size;
  
        for (i = 0; i < nlb; i++, virt++, phys++) {
                pi = (struct t10_pi_tuple *)p;
        kunmap_atomic(pmap);
  }
  
- static int nvme_noop_verify(struct blk_integrity_iter *iter)
- {
-       return 0;
- }
- static int nvme_noop_generate(struct blk_integrity_iter *iter)
- {
-       return 0;
- }
- struct blk_integrity nvme_meta_noop = {
-       .name                   = "NVME_META_NOOP",
-       .generate_fn            = nvme_noop_generate,
-       .verify_fn              = nvme_noop_verify,
- };
  static void nvme_init_integrity(struct nvme_ns *ns)
  {
        struct blk_integrity integrity;
  
        switch (ns->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
-               integrity = t10_pi_type3_crc;
+               integrity.profile = &t10_pi_type3_crc;
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
-               integrity = t10_pi_type1_crc;
+               integrity.profile = &t10_pi_type1_crc;
                break;
        default:
-               integrity = nvme_meta_noop;
+               integrity.profile = NULL;
                break;
        }
        integrity.tuple_size = ns->ms;
@@@ -604,7 -593,6 +593,7 @@@ static void req_completion(struct nvme_
        struct request *req = iod_get_private(iod);
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
        u16 status = le16_to_cpup(&cqe->status) >> 1;
 +      bool requeue = false;
        int error = 0;
  
        if (unlikely(status)) {
                    && (jiffies - req->start_time) < req->timeout) {
                        unsigned long flags;
  
 +                      requeue = true;
                        blk_mq_requeue_request(req);
                        spin_lock_irqsave(req->q->queue_lock, flags);
                        if (!blk_queue_stopped(req->q))
                                blk_mq_kick_requeue_list(req->q);
                        spin_unlock_irqrestore(req->q->queue_lock, flags);
 -                      return;
 +                      goto release_iod;
                }
  
                if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                        "completing aborted command with status:%04x\n",
                        error);
  
 +release_iod:
        if (iod->nents) {
                dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        }
        nvme_free_iod(nvmeq->dev, iod);
  
 -      blk_mq_complete_request(req, error);
 +      if (likely(!requeue))
 +              blk_mq_complete_request(req, error);
  }
  
  /* length is in bytes.  gfp flags indicates whether we may sleep. */
@@@ -1283,18 -1268,13 +1272,13 @@@ static void nvme_abort_req(struct reque
        struct nvme_command cmd;
  
        if (!nvmeq->qid || cmd_rq->aborted) {
-               unsigned long flags;
-               spin_lock_irqsave(&dev_list_lock, flags);
-               if (work_busy(&dev->reset_work))
-                       goto out;
-               list_del_init(&dev->node);
-               dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
-                                                       req->tag, nvmeq->qid);
-               dev->reset_workfn = nvme_reset_failed_dev;
-               queue_work(nvme_workq, &dev->reset_work);
-  out:
-               spin_unlock_irqrestore(&dev_list_lock, flags);
+               spin_lock(&dev_list_lock);
+               if (!__nvme_reset(dev)) {
+                       dev_warn(dev->dev,
+                                "I/O %d QID %d timeout, reset controller\n",
+                                req->tag, nvmeq->qid);
+               }
+               spin_unlock(&dev_list_lock);
                return;
        }
  
@@@ -1949,6 -1929,23 +1933,23 @@@ static int nvme_compat_ioctl(struct blo
  #define nvme_compat_ioctl     NULL
  #endif
  
+ static void nvme_free_dev(struct kref *kref);
+ static void nvme_free_ns(struct kref *kref)
+ {
+       struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
+       if (ns->type == NVME_NS_LIGHTNVM)
+               nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+       spin_lock(&dev_list_lock);
+       ns->disk->private_data = NULL;
+       spin_unlock(&dev_list_lock);
+       kref_put(&ns->dev->kref, nvme_free_dev);
+       put_disk(ns->disk);
+       kfree(ns);
+ }
  static int nvme_open(struct block_device *bdev, fmode_t mode)
  {
        int ret = 0;
        ns = bdev->bd_disk->private_data;
        if (!ns)
                ret = -ENXIO;
-       else if (!kref_get_unless_zero(&ns->dev->kref))
+       else if (!kref_get_unless_zero(&ns->kref))
                ret = -ENXIO;
        spin_unlock(&dev_list_lock);
  
        return ret;
  }
  
- static void nvme_free_dev(struct kref *kref);
  static void nvme_release(struct gendisk *disk, fmode_t mode)
  {
        struct nvme_ns *ns = disk->private_data;
-       struct nvme_dev *dev = ns->dev;
-       kref_put(&dev->kref, nvme_free_dev);
+       kref_put(&ns->kref, nvme_free_ns);
  }
  
  static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
@@@ -2013,6 -2006,16 +2010,16 @@@ static int nvme_revalidate_disk(struct 
                return -ENODEV;
        }
  
+       if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
+               if (nvme_nvm_register(ns->queue, disk->disk_name)) {
+                       dev_warn(dev->dev,
+                               "%s: LightNVM init failure\n", __func__);
+                       kfree(id);
+                       return -ENODEV;
+               }
+               ns->type = NVME_NS_LIGHTNVM;
+       }
        old_ms = ns->ms;
        lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
        ns->lba_shift = id->lbaf[lbaf].ds;
        pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
                                        id->dps & NVME_NS_DPS_PI_MASK : 0;
  
+       blk_mq_freeze_queue(disk->queue);
        if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
                                ns->ms != old_ms ||
                                bs != queue_logical_block_size(disk->queue) ||
        ns->pi_type = pi_type;
        blk_queue_logical_block_size(ns->queue, bs);
  
-       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
-                                                               !ns->ext)
+       if (ns->ms && !ns->ext)
                nvme_init_integrity(ns);
  
-       if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
+       if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
+                                               !blk_get_integrity(disk)) ||
+                                               ns->type == NVME_NS_LIGHTNVM)
                set_capacity(disk, 0);
        else
                set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
  
        if (dev->oncs & NVME_CTRL_ONCS_DSM)
                nvme_config_discard(ns);
+       blk_mq_unfreeze_queue(disk->queue);
  
        kfree(id);
        return 0;
  }
  
+ static char nvme_pr_type(enum pr_type type)
+ {
+       switch (type) {
+       case PR_WRITE_EXCLUSIVE:
+               return 1;
+       case PR_EXCLUSIVE_ACCESS:
+               return 2;
+       case PR_WRITE_EXCLUSIVE_REG_ONLY:
+               return 3;
+       case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+               return 4;
+       case PR_WRITE_EXCLUSIVE_ALL_REGS:
+               return 5;
+       case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+               return 6;
+       default:
+               return 0;
+       }
+ };
+ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
+                               u64 key, u64 sa_key, u8 op)
+ {
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       struct nvme_command c;
+       u8 data[16] = { 0, };
+       put_unaligned_le64(key, &data[0]);
+       put_unaligned_le64(sa_key, &data[8]);
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = op;
+       c.common.nsid = cpu_to_le32(ns->ns_id);
+       c.common.cdw10[0] = cpu_to_le32(cdw10);
+       return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ }
+ static int nvme_pr_register(struct block_device *bdev, u64 old,
+               u64 new, unsigned flags)
+ {
+       u32 cdw10;
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+       cdw10 = old ? 2 : 0;
+       cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
+       cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
+ }
+ static int nvme_pr_reserve(struct block_device *bdev, u64 key,
+               enum pr_type type, unsigned flags)
+ {
+       u32 cdw10;
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+       cdw10 = nvme_pr_type(type) << 8;
+       cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
+ }
+ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
+               enum pr_type type, bool abort)
+ {
+       u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
+ }
+ static int nvme_pr_clear(struct block_device *bdev, u64 key)
+ {
+       u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ }
+ static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+ {
+       u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+ }
+ static const struct pr_ops nvme_pr_ops = {
+       .pr_register    = nvme_pr_register,
+       .pr_reserve     = nvme_pr_reserve,
+       .pr_release     = nvme_pr_release,
+       .pr_preempt     = nvme_pr_preempt,
+       .pr_clear       = nvme_pr_clear,
+ };
  static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
        .release        = nvme_release,
        .getgeo         = nvme_getgeo,
        .revalidate_disk= nvme_revalidate_disk,
+       .pr_ops         = &nvme_pr_ops,
  };
  
  static int nvme_kthread(void *data)
  
                        if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
                                                        csts & NVME_CSTS_CFS) {
-                               if (work_busy(&dev->reset_work))
-                                       continue;
-                               list_del_init(&dev->node);
-                               dev_warn(dev->dev,
-                                       "Failed status: %x, reset controller\n",
-                                       readl(&dev->bar->csts));
-                               dev->reset_workfn = nvme_reset_failed_dev;
-                               queue_work(nvme_workq, &dev->reset_work);
+                               if (!__nvme_reset(dev)) {
+                                       dev_warn(dev->dev,
+                                               "Failed status: %x, reset controller\n",
+                                               readl(&dev->bar->csts));
+                               }
                                continue;
                        }
                        for (i = 0; i < dev->queue_count; i++) {
@@@ -2132,6 -2228,7 +2232,7 @@@ static void nvme_alloc_ns(struct nvme_d
        if (!disk)
                goto out_free_queue;
  
+       kref_init(&ns->kref);
        ns->ns_id = nsid;
        ns->disk = disk;
        ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
  
-       add_disk(ns->disk);
-       if (ns->ms) {
-               struct block_device *bd = bdget_disk(ns->disk, 0);
-               if (!bd)
-                       return;
-               if (blkdev_get(bd, FMODE_READ, NULL)) {
-                       bdput(bd);
-                       return;
+       kref_get(&dev->kref);
+       if (ns->type != NVME_NS_LIGHTNVM) {
+               add_disk(ns->disk);
+               if (ns->ms) {
+                       struct block_device *bd = bdget_disk(ns->disk, 0);
+                       if (!bd)
+                               return;
+                       if (blkdev_get(bd, FMODE_READ, NULL)) {
+                               bdput(bd);
+                               return;
+                       }
+                       blkdev_reread_part(bd);
+                       blkdev_put(bd, FMODE_READ);
                }
-               blkdev_reread_part(bd);
-               blkdev_put(bd, FMODE_READ);
        }
        return;
   out_free_disk:
        kfree(ns);
  }
  
+ /*
+  * Create I/O queues.  Failing to create an I/O queue is not an issue,
+  * we can continue with less than the desired amount of queues, and
+  * even a controller without I/O queues an still be used to issue
+  * admin commands.  This might be useful to upgrade a buggy firmware
+  * for example.
+  */
  static void nvme_create_io_queues(struct nvme_dev *dev)
  {
        unsigned i;
                        break;
  
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
-               if (nvme_create_queue(dev->queues[i], i))
+               if (nvme_create_queue(dev->queues[i], i)) {
+                       nvme_free_queues(dev, i);
                        break;
+               }
  }
  
  static int set_queue_count(struct nvme_dev *dev, int count)
@@@ -2363,18 -2472,6 +2476,6 @@@ static int nvme_setup_io_queues(struct 
        return result;
  }
  
- static void nvme_free_namespace(struct nvme_ns *ns)
- {
-       list_del(&ns->list);
-       spin_lock(&dev_list_lock);
-       ns->disk->private_data = NULL;
-       spin_unlock(&dev_list_lock);
-       put_disk(ns->disk);
-       kfree(ns);
- }
  static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
  {
        struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
@@@ -2408,15 -2505,14 +2509,14 @@@ static void nvme_ns_remove(struct nvme_
  
        if (kill)
                blk_set_queue_dying(ns->queue);
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
+       if (ns->disk->flags & GENHD_FL_UP)
                del_gendisk(ns->disk);
-       }
        if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
-         }
+       }
+       list_del_init(&ns->list);
+       kref_put(&ns->kref, nvme_free_ns);
  }
  
  static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
        for (i = 1; i <= nn; i++) {
                ns = nvme_find_ns(dev, i);
                if (ns) {
-                       if (revalidate_disk(ns->disk)) {
+                       if (revalidate_disk(ns->disk))
                                nvme_ns_remove(ns);
-                               nvme_free_namespace(ns);
-                       }
                } else
                        nvme_alloc_ns(dev, i);
        }
        list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-               if (ns->ns_id > nn) {
+               if (ns->ns_id > nn)
                        nvme_ns_remove(ns);
-                       nvme_free_namespace(ns);
-               }
        }
        list_sort(NULL, &dev->namespaces, ns_cmp);
  }
@@@ -2828,9 -2920,9 +2924,9 @@@ static void nvme_dev_shutdown(struct nv
  
  static void nvme_dev_remove(struct nvme_dev *dev)
  {
-       struct nvme_ns *ns;
+       struct nvme_ns *ns, *next;
  
-       list_for_each_entry(ns, &dev->namespaces, list)
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list)
                nvme_ns_remove(ns);
  }
  
@@@ -2886,21 -2978,12 +2982,12 @@@ static void nvme_release_instance(struc
        spin_unlock(&dev_list_lock);
  }
  
- static void nvme_free_namespaces(struct nvme_dev *dev)
- {
-       struct nvme_ns *ns, *next;
-       list_for_each_entry_safe(ns, next, &dev->namespaces, list)
-               nvme_free_namespace(ns);
- }
  static void nvme_free_dev(struct kref *kref)
  {
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
  
        put_device(dev->dev);
        put_device(dev->device);
-       nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        if (dev->tagset.tags)
                blk_mq_free_tag_set(&dev->tagset);
@@@ -2974,14 -3057,15 +3061,15 @@@ static const struct file_operations nvm
        .compat_ioctl   = nvme_dev_ioctl,
  };
  
- static int nvme_dev_start(struct nvme_dev *dev)
+ static void nvme_probe_work(struct work_struct *work)
  {
-       int result;
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
        bool start_thread = false;
+       int result;
  
        result = nvme_dev_map(dev);
        if (result)
-               return result;
+               goto out;
  
        result = nvme_configure_admin_queue(dev);
        if (result)
                goto free_tags;
  
        dev->event_limit = 1;
-       return result;
+       /*
+        * Keep the controller around but remove all namespaces if we don't have
+        * any working I/O queue.
+        */
+       if (dev->online_queues < 2) {
+               dev_warn(dev->dev, "IO queues not created\n");
+               nvme_dev_remove(dev);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_dev_add(dev);
+       }
+       return;
  
   free_tags:
        nvme_dev_remove_admin(dev);
        nvme_dev_list_remove(dev);
   unmap:
        nvme_dev_unmap(dev);
-       return result;
+  out:
+       if (!work_busy(&dev->reset_work))
+               nvme_dead_ctrl(dev);
  }
  
  static int nvme_remove_dead_ctrl(void *arg)
        return 0;
  }
  
- static void nvme_remove_disks(struct work_struct *ws)
- {
-       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
-       nvme_free_queues(dev, 1);
-       nvme_dev_remove(dev);
- }
- static int nvme_dev_resume(struct nvme_dev *dev)
- {
-       int ret;
-       ret = nvme_dev_start(dev);
-       if (ret)
-               return ret;
-       if (dev->online_queues < 2) {
-               spin_lock(&dev_list_lock);
-               dev->reset_workfn = nvme_remove_disks;
-               queue_work(nvme_workq, &dev->reset_work);
-               spin_unlock(&dev_list_lock);
-       } else {
-               nvme_unfreeze_queues(dev);
-               nvme_dev_add(dev);
-       }
-       return 0;
- }
  static void nvme_dead_ctrl(struct nvme_dev *dev)
  {
        dev_warn(dev->dev, "Device failed to resume\n");
        }
  }
  
- static void nvme_dev_reset(struct nvme_dev *dev)
+ static void nvme_reset_work(struct work_struct *ws)
  {
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
        bool in_probe = work_busy(&dev->probe_work);
  
        nvme_dev_shutdown(dev);
        schedule_work(&dev->probe_work);
  }
  
- static void nvme_reset_failed_dev(struct work_struct *ws)
+ static int __nvme_reset(struct nvme_dev *dev)
  {
-       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
-       nvme_dev_reset(dev);
- }
- static void nvme_reset_workfn(struct work_struct *work)
- {
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
-       dev->reset_workfn(work);
+       if (work_pending(&dev->reset_work))
+               return -EBUSY;
+       list_del_init(&dev->node);
+       queue_work(nvme_workq, &dev->reset_work);
+       return 0;
  }
  
  static int nvme_reset(struct nvme_dev *dev)
  {
-       int ret = -EBUSY;
+       int ret;
  
        if (!dev->admin_q || blk_queue_dying(dev->admin_q))
                return -ENODEV;
  
        spin_lock(&dev_list_lock);
-       if (!work_pending(&dev->reset_work)) {
-               dev->reset_workfn = nvme_reset_failed_dev;
-               queue_work(nvme_workq, &dev->reset_work);
-               ret = 0;
-       }
+       ret = __nvme_reset(dev);
        spin_unlock(&dev_list_lock);
  
        if (!ret) {
@@@ -3153,7 -3219,6 +3223,6 @@@ static ssize_t nvme_sysfs_reset(struct 
  }
  static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
  
- static void nvme_async_probe(struct work_struct *work);
  static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
        int node, result = -ENOMEM;
                goto free;
  
        INIT_LIST_HEAD(&dev->namespaces);
-       dev->reset_workfn = nvme_reset_failed_dev;
-       INIT_WORK(&dev->reset_work, nvme_reset_workfn);
+       INIT_WORK(&dev->reset_work, nvme_reset_work);
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
  
        INIT_LIST_HEAD(&dev->node);
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
-       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       INIT_WORK(&dev->probe_work, nvme_probe_work);
        schedule_work(&dev->probe_work);
        return 0;
  
        return result;
  }
  
- static void nvme_async_probe(struct work_struct *work)
- {
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
-       if (nvme_dev_resume(dev) && !work_busy(&dev->reset_work))
-               nvme_dead_ctrl(dev);
- }
  static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
        if (prepare)
                nvme_dev_shutdown(dev);
        else
-               nvme_dev_resume(dev);
+               schedule_work(&dev->probe_work);
  }
  
  static void nvme_shutdown(struct pci_dev *pdev)
@@@ -3294,10 -3350,7 +3354,7 @@@ static int nvme_resume(struct device *d
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
  
-       if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
-               ndev->reset_workfn = nvme_reset_failed_dev;
-               queue_work(nvme_workq, &ndev->reset_work);
-       }
+       schedule_work(&ndev->probe_work);
        return 0;
  }
  #endif
diff --combined fs/mpage.c
index a7c34274f2076bc36e9cb9797f682988e617417f,2ebf91652ecbeba44578d30479a4c17d49f54231..09abba7653aa8db8189d05d7c2094b77ef1998a9
@@@ -139,8 -139,7 +139,8 @@@ map_buffer_to_page(struct page *page, s
  static struct bio *
  do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                sector_t *last_block_in_bio, struct buffer_head *map_bh,
 -              unsigned long *first_logical_block, get_block_t get_block)
 +              unsigned long *first_logical_block, get_block_t get_block,
 +              gfp_t gfp)
  {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
@@@ -278,7 -277,8 +278,7 @@@ alloc_new
                                goto out;
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
 -                              min_t(int, nr_pages, BIO_MAX_PAGES),
 -                              GFP_KERNEL);
 +                              min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
                if (bio == NULL)
                        goto confused;
        }
@@@ -361,7 -361,6 +361,7 @@@ mpage_readpages(struct address_space *m
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
 +      gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
  
        map_bh.b_state = 0;
        map_bh.b_size = 0;
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
 -                                      page->index, GFP_KERNEL)) {
 +                                      page->index,
 +                                      gfp)) {
                        bio = do_mpage_readpage(bio, page,
                                        nr_pages - page_idx,
                                        &last_block_in_bio, &map_bh,
                                        &first_logical_block,
 -                                      get_block);
 +                                      get_block, gfp);
                }
                page_cache_release(page);
        }
@@@ -397,12 -395,11 +397,12 @@@ int mpage_readpage(struct page *page, g
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
 +      gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
  
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
 -                      &map_bh, &first_logical_block, get_block);
 +                      &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
                mpage_bio_submit(READ, bio);
        return 0;
@@@ -485,6 -482,7 +485,7 @@@ static int __mpage_writepage(struct pag
        struct buffer_head map_bh;
        loff_t i_size = i_size_read(inode);
        int ret = 0;
+       int wr = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
  
        if (page_has_buffers(page)) {
                struct buffer_head *head = page_buffers(page);
@@@ -593,7 -591,7 +594,7 @@@ page_is_mapped
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
  
  alloc_new:
        if (bio == NULL) {
        wbc_account_io(wbc, page, PAGE_SIZE);
        length = first_unmapped << blkbits;
        if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                goto alloc_new;
        }
  
        set_page_writeback(page);
        unlock_page(page);
        if (boundary || (first_unmapped != blocks_per_page)) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                if (boundary_block) {
                        write_boundary_block(boundary_bdev,
                                        boundary_block, 1 << blkbits);
  
  confused:
        if (bio)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
  
        if (mpd->use_writepage) {
                ret = mapping->a_ops->writepage(page, wbc);
@@@ -698,8 -696,11 +699,11 @@@ mpage_writepages(struct address_space *
                };
  
                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
-               if (mpd.bio)
-                       mpage_bio_submit(WRITE, mpd.bio);
+               if (mpd.bio) {
+                       int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                                 WRITE_SYNC : WRITE);
+                       mpage_bio_submit(wr, mpd.bio);
+               }
        }
        blk_finish_plug(&plug);
        return ret;
@@@ -716,8 -717,11 +720,11 @@@ int mpage_writepage(struct page *page, 
                .use_writepage = 0,
        };
        int ret = __mpage_writepage(page, wbc, &mpd);
-       if (mpd.bio)
-               mpage_bio_submit(WRITE, mpd.bio);
+       if (mpd.bio) {
+               int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                         WRITE_SYNC : WRITE);
+               mpage_bio_submit(wr, mpd.bio);
+       }
        return ret;
  }
  EXPORT_SYMBOL(mpage_writepage);