]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'fbdev-fixes-for-3.6-1' of git://github.com/schandinat/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Sep 2012 01:38:02 +0000 (18:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Sep 2012 01:38:02 +0000 (18:38 -0700)
Pull fbdev fixes from Florian Tobias Schandinat:
 - a fix by Paul Cercueil to prevent a possible buffer overflow
 - a fix by Bruno PrĂ©mont to prevent a rare sleep in invalid context
 - a fix by Julia Lawall for a double free in auo_k190x
 - a fix by Dan Carpenter to prevent a division by zero in mb862xxfb
 - a regression fix by Tomi Valkeinen for the SDI output in OMAP
 - a fix by Grazvydas Ignotas to fix the console colors in OMAP

* tag 'fbdev-fixes-for-3.6-1' of git://github.com/schandinat/linux-2.6:
  OMAPFB: fix framebuffer console colors
  OMAPDSS: Fix SDI PLL locking
  video: mb862xxfb: prevent divide by zero bug
  drivers/video/auo_k190x.c: drop kfree of devm_kzalloc's data
  fbcon: Fix bit_putcs() call to kmalloc(s, GFP_KERNEL)
  fbcon: prevent possible buffer overflow.

286 files changed:
Documentation/block/00-INDEX
Documentation/block/cfq-iosched.txt
Documentation/block/queue-sysfs.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/watchdog/src/watchdog-test.c
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/kirkwood-iconnect.dts
arch/arm/boot/dts/twl6030.dtsi
arch/arm/configs/u8500_defconfig
arch/arm/mach-dove/common.c
arch/arm/mach-exynos/mach-origen.c
arch/arm/mach-exynos/mach-smdkv310.c
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/headsmp.S [moved from arch/arm/mach-imx/head-v7.S with 100% similarity]
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-kirkwood/Makefile.boot
arch/arm/mach-kirkwood/common.c
arch/arm/mach-mmp/sram.c
arch/arm/mach-mv78xx0/addr-map.c
arch/arm/mach-mv78xx0/common.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/common-board-devices.h
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/opp4xxx_data.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/sleep44xx.S
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-s3c24xx/include/mach/dma.h
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/board-mop500-msp.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/multi.h
arch/arm/plat-omap/include/plat/uncompress.h
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/hdmi.h [new file with mode: 0644]
arch/arm/plat-samsung/pm.c
arch/mips/Kconfig
arch/mips/alchemy/board-mtx1.c
arch/mips/ath79/dev-usb.c
arch/mips/ath79/gpio.c
arch/mips/bcm63xx/dev-spi.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/include/asm/mach-ath79/ar71xx_regs.h
arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-cavium-octeon/irq.h
arch/mips/include/asm/module.h
arch/mips/include/asm/r4k-timer.h
arch/mips/kernel/module.c
arch/mips/kernel/smp.c
arch/mips/kernel/sync-r4k.c
arch/mips/mti-malta/malta-pci.c
arch/mips/pci/pci-ar724x.c
arch/parisc/include/asm/atomic.h
arch/parisc/kernel/process.c
arch/parisc/kernel/sys_parisc.c
arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
arch/powerpc/configs/85xx/p1023rds_defconfig
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/mpc83xx_defconfig
arch/powerpc/configs/mpc85xx_defconfig
arch/powerpc/configs/mpc85xx_smp_defconfig
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/mpic_msgr.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/kvm/book3s_32_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/e500_tlb.c
arch/powerpc/lib/copyuser_power7.S
arch/powerpc/lib/memcpy_power7.S
arch/powerpc/mm/mem.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/mpic_msgr.c
arch/powerpc/xmon/xmon.c
arch/s390/include/asm/elf.h
arch/s390/include/asm/posix_types.h
arch/s390/include/asm/smp.h
arch/x86/include/asm/spinlock.h
arch/x86/kernel/alternative.c
arch/x86/kernel/irq.c
arch/x86/kernel/microcode_amd.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/suspend.c
arch/x86/xen/xen-ops.h
block/blk-lib.c
block/blk-merge.c
block/genhd.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-acpi.c
drivers/ata/libata-core.c
drivers/ata/pata_atiixp.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_req.c
drivers/cpufreq/omap-cpufreq.c
drivers/crypto/caam/jr.c
drivers/crypto/hifn_795x.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/gma500/psb_intel_display.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/reg_srcs/r600
drivers/hid/hid-core.c
drivers/hwmon/asus_atk0110.c
drivers/ide/ide-pm.c
drivers/mtd/ubi/vtbl.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/softing/softing_fw.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac100.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/fddi/skfp/pmf.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/eeprom.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netfront.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_ioctl.c
drivers/spi/spi-bcm63xx.c
drivers/watchdog/booke_wdt.c
drivers/watchdog/da9052_wdt.c
drivers/xen/platform-pci.c
fs/bio.c
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/locking.c
fs/btrfs/qgroup.c
fs/btrfs/root-tree.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/cifs/cifssmb.c
fs/cifs/dir.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/smb2misc.c
fs/cifs/smb2pdu.h
fs/cifs/transport.c
fs/direct-io.c
fs/jbd/journal.c
fs/logfs/dev_bdev.c
fs/logfs/inode.c
fs/logfs/journal.c
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/nfsd/nfs4callback.c
fs/nfsd/state.h
fs/quota/dquot.c
fs/reiserfs/bitmap.c
fs/reiserfs/inode.c
fs/ubifs/debug.h
fs/ubifs/lpt.c
fs/ubifs/recovery.c
fs/ubifs/replay.c
fs/ubifs/super.c
fs/udf/inode.c
fs/udf/super.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_rtalloc.c
include/drm/drm_crtc.h
include/drm/drm_mode.h
include/linux/blkdev.h
include/linux/cpuidle.h
include/linux/ktime.h
include/linux/mv643xx_eth.h
include/linux/pci_ids.h
include/linux/time.h
include/net/netfilter/nf_conntrack_ecache.h
include/xen/events.h
kernel/fork.c
kernel/time/timekeeping.c
kernel/trace/trace_syscalls.c
mm/filemap.c
mm/mmap.c
mm/slab.c
net/core/netpoll.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_nat_sip.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv6/esp6.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink_log.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/socket.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/xfrm/xfrm_state.c
tools/perf/util/python-ext-sources
virt/kvm/kvm_main.c

index d111e3b23db0bbc0bdf23d81c27e74158ccd9a66..d18ecd827c408d0fb42a8d8a7fc669ce88c95fc2 100644 (file)
@@ -3,15 +3,21 @@
 biodoc.txt
        - Notes on the Generic Block Layer Rewrite in Linux 2.5
 capability.txt
-       - Generic Block Device Capability (/sys/block/<disk>/capability)
+       - Generic Block Device Capability (/sys/block/<device>/capability)
+cfq-iosched.txt
+       - CFQ IO scheduler tunables
+data-integrity.txt
+       - Block data integrity
 deadline-iosched.txt
        - Deadline IO scheduler tunables
 ioprio.txt
        - Block io priorities (in CFQ scheduler)
+queue-sysfs.txt
+       - Queue's sysfs entries
 request.txt
        - The members of struct request (in include/linux/blkdev.h)
 stat.txt
-       - Block layer statistics in /sys/block/<dev>/stat
+       - Block layer statistics in /sys/block/<device>/stat
 switching-sched.txt
        - Switching I/O schedulers at runtime
 writeback_cache_control.txt
index 6d670f570451a14c1ce4f8c2eb1c69d52736ee01..d89b4fe724d75393a003b33c22bf531252c29672 100644 (file)
@@ -1,3 +1,14 @@
+CFQ (Complete Fairness Queueing)
+===============================
+
+The main aim of CFQ scheduler is to provide a fair allocation of the disk
+I/O bandwidth for all the processes which requests an I/O operation.
+
+CFQ maintains the per process queue for the processes which request I/O
+operation(syncronous requests). In case of asynchronous requests, all the
+requests from all the processes are batched together according to their
+process's I/O priority.
+
 CFQ ioscheduler tunables
 ========================
 
@@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID
 controller or for storage arrays), setting slice_idle=0 might end up in better
 throughput and acceptable latencies.
 
+back_seek_max
+-------------
+This specifies, given in Kbytes, the maximum "distance" for backward seeking.
+The distance is the amount of space from the current head location to the
+sectors that are backward in terms of distance.
+
+This parameter allows the scheduler to anticipate requests in the "backward"
+direction and consider them as being the "next" if they are within this
+distance from the current head location.
+
+back_seek_penalty
+-----------------
+This parameter is used to compute the cost of backward seeking. If the
+backward distance of request is just 1/back_seek_penalty from a "front"
+request, then the seeking cost of two requests is considered equivalent.
+
+So scheduler will not bias toward one or the other request (otherwise scheduler
+will bias toward front request). Default value of back_seek_penalty is 2.
+
+fifo_expire_async
+-----------------
+This parameter is used to set the timeout of asynchronous requests. Default
+value of this is 248ms.
+
+fifo_expire_sync
+----------------
+This parameter is used to set the timeout of synchronous requests. Default
+value of this is 124ms. In case to favor synchronous requests over asynchronous
+one, this value should be decreased relative to fifo_expire_async.
+
+slice_async
+-----------
+This parameter is same as of slice_sync but for asynchronous queue. The
+default value is 40ms.
+
+slice_async_rq
+--------------
+This parameter is used to limit the dispatching of asynchronous request to
+device request queue in queue's slice time. The maximum number of request that
+are allowed to be dispatched also depends upon the io priority. Default value
+for this is 2.
+
+slice_sync
+----------
+When a queue is selected for execution, the queues IO requests are only
+executed for a certain amount of time(time_slice) before switching to another
+queue. This parameter is used to calculate the time slice of synchronous
+queue.
+
+time_slice is computed using the below equation:-
+time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the
+time_slice of synchronous queue, increase the value of slice_sync. Default
+value is 100ms.
+
+quantum
+-------
+This specifies the number of request dispatched to the device queue. In a
+queue's time slice, a request will not be dispatched if the number of request
+in the device exceeds this parameter. This parameter is used for synchronous
+request.
+
+In case of storage with several disk, this setting can limit the parallel
+processing of request. Therefore, increasing the value can imporve the
+performace although this can cause the latency of some I/O to increase due
+to more number of requests.
+
 CFQ IOPS Mode for group scheduling
 ===================================
 Basic CFQ design is to provide priority based time slices. Higher priority
index 6518a55273e7094f62f84a5d83467fd96b26fd26..e54ac1d53403094c59e019b1f5e5397e8f4f637d 100644 (file)
@@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory.
 Files denoted with a RO postfix are readonly and the RW postfix means
 read-write.
 
+add_random (RW)
+----------------
+This file allows to trun off the disk entropy contribution. Default
+value of this file is '1'(on).
+
+discard_granularity (RO)
+-----------------------
+This shows the size of internal allocation of the device in bytes, if
+reported by the device. A value of '0' means device does not support
+the discard functionality.
+
+discard_max_bytes (RO)
+----------------------
+Devices that support discard functionality may have internal limits on
+the number of bytes that can be trimmed or unmapped in a single operation.
+The discard_max_bytes parameter is set by the device driver to the maximum
+number of bytes that can be discarded in a single operation. Discard
+requests issued to the device must not exceed this limit. A discard_max_bytes
+value of 0 means that the device does not support discard functionality.
+
+discard_zeroes_data (RO)
+------------------------
+When read, this file will show if the discarded block are zeroed by the
+device or not. If its value is '1' the blocks are zeroed otherwise not.
+
 hw_sector_size (RO)
 -------------------
 This is the hardware sector size of the device, in bytes.
 
+iostats (RW)
+-------------
+This file is used to control (on/off) the iostats accounting of the
+disk.
+
+logical_block_size (RO)
+-----------------------
+This is the logcal block size of the device, in bytes.
+
 max_hw_sectors_kb (RO)
 ----------------------
 This is the maximum number of kilobytes supported in a single data transfer.
 
+max_integrity_segments (RO)
+---------------------------
+When read, this file shows the max limit of integrity segments as
+set by block layer which a hardware controller can handle.
+
 max_sectors_kb (RW)
 -------------------
 This is the maximum number of kilobytes that the block layer will allow
 for a filesystem request. Must be smaller than or equal to the maximum
 size allowed by the hardware.
 
+max_segments (RO)
+-----------------
+Maximum number of segments of the device.
+
+max_segment_size (RO)
+---------------------
+Maximum segment size of the device.
+
+minimum_io_size (RO)
+--------------------
+This is the smallest preferred io size reported by the device.
+
 nomerges (RW)
 -------------
 This enables the user to disable the lookup logic involved with IO
@@ -45,11 +96,24 @@ per-block-cgroup request pool.  IOW, if there are N block cgroups,
 each request queue may have upto N request pools, each independently
 regulated by nr_requests.
 
+optimal_io_size (RO)
+--------------------
+This is the optimal io size reported by the device.
+
+physical_block_size (RO)
+------------------------
+This is the physical block size of device, in bytes.
+
 read_ahead_kb (RW)
 ------------------
 Maximum number of kilobytes to read-ahead for filesystems on this block
 device.
 
+rotational (RW)
+---------------
+This file is used to stat if the device is of rotational type or
+non-rotational type.
+
 rq_affinity (RW)
 ----------------
 If this option is '1', the block layer will migrate request completions to the
index 70cd49b1caa8c07e71b7d0855a9469fc989cb7f6..1dd622546d06b711bf262358b387b4bb9af3f68a 100644 (file)
@@ -10,8 +10,8 @@ Required properties:
 - compatible : Should be "fsl,<chip>-esdhc"
 
 Optional properties:
-- fsl,cd-internal : Indicate to use controller internal card detection
-- fsl,wp-internal : Indicate to use controller internal write protection
+- fsl,cd-controller : Indicate to use controller internal card detection
+- fsl,wp-controller : Indicate to use controller internal write protection
 
 Examples:
 
@@ -19,8 +19,8 @@ esdhc@70004000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70004000 0x4000>;
        interrupts = <1>;
-       fsl,cd-internal;
-       fsl,wp-internal;
+       fsl,cd-controller;
+       fsl,wp-controller;
 };
 
 esdhc@70008000 {
index 73ff5cc93e05db4c93321464cb8ae948571a18fe..3da822967ee0e13d1a2dbcd88d2d75f5a4aca370 100644 (file)
@@ -31,7 +31,7 @@ static void keep_alive(void)
  * or "-e" to enable the card.
  */
 
-void term(int sig)
+static void term(int sig)
 {
     close(fd);
     fprintf(stderr, "Stopping watchdog ticks...\n");
index 354026873b13329e5add40f1863903ffc2fba9dc..371ce8899f5c758633ed25ffdacef01b45fa6e99 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 6d6e18fee9fe08bacf876e749837d4b0156a5fd9..c5f9ae5dbd1ad04da804d68e475d3b4b230832f6 100644 (file)
@@ -2144,6 +2144,7 @@ source "drivers/cpufreq/Kconfig"
 config CPU_FREQ_IMX
        tristate "CPUfreq driver for i.MX CPUs"
        depends on ARCH_MXC && CPU_FREQ
+       select CPU_FREQ_TABLE
        help
          This enables the CPUfreq driver for i.MX CPUs.
 
index 59509c48d7e5a5c2474f73ac028f73537e0a6659..bd0cff3f808c7c5be55ce79dd31b343d37e6297a 100644 (file)
                        #size-cells = <0>;
                        ti,hwmods = "i2c3";
                };
+
+               wdt2: wdt@44e35000 {
+                       compatible = "ti,omap3-wdt";
+                       ti,hwmods = "wd_timer2";
+               };
        };
 };
index cd86177a3ea21aa1dbe3d0dcea633b50661e5552..59d9789e550898cc041e6670ca0430776b204c2b 100644 (file)
@@ -25,8 +25,8 @@
                aips@70000000 { /* aips-1 */
                        spba@70000000 {
                                esdhc@70004000 { /* ESDHC1 */
-                                       fsl,cd-internal;
-                                       fsl,wp-internal;
+                                       fsl,cd-controller;
+                                       fsl,wp-controller;
                                        status = "okay";
                                };
 
index 52d9470451069f5445661c79ffa902a7cc49eee1..f8ca6fa88192a5d36edb0c484a7a13c7c11e6e7c 100644 (file)
                };
                power-blue {
                        label = "power:blue";
-                       gpios = <&gpio1 11 0>;
+                       gpios = <&gpio1 10 0>;
                        linux,default-trigger = "timer";
                };
+               power-red {
+                       label = "power:red";
+                       gpios = <&gpio1 11 0>;
+               };
                usb1 {
                        label = "usb1:blue";
                        gpios = <&gpio1 12 0>;
index 3b2f3510d7eb91ca5ce520692682ad26aca89d31..d351b27d7213f65c50680e50965d41cdfd4218f2 100644 (file)
@@ -66,6 +66,7 @@
 
        vcxio: regulator@8 {
                compatible = "ti,twl6030-vcxio";
+               regulator-always-on;
        };
 
        vusb: regulator@9 {
 
        v1v8: regulator@10 {
                compatible = "ti,twl6030-v1v8";
+               regulator-always-on;
        };
 
        v2v1: regulator@11 {
                compatible = "ti,twl6030-v2v1";
+               regulator-always-on;
        };
 
        clk32kg: regulator@12 {
index 2d4f661d1cf6e757739429a4ba7730ea7be14eeb..da6845493caabae29842d959f0b80bdcc1bd7790 100644 (file)
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_GPIO=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AB8500=y
 CONFIG_RTC_DRV_PL031=y
index 4db5de54b6a7e1658639dc8888a79ec8cede8cc7..6321567d8eaa0c97a3cdaba3fe2664cf7a1ece6c 100644 (file)
@@ -102,7 +102,8 @@ void __init dove_ehci1_init(void)
 void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
-                       IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR);
+                       IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
+                       1600);
 }
 
 /*****************************************************************************
index 5ca80307d6d7789c9fe1781dd6d56658f64f72e5..4e574c24581ca2869fa37f96d487879ffe0c016a 100644 (file)
@@ -42,6 +42,7 @@
 #include <plat/backlight.h>
 #include <plat/fb.h>
 #include <plat/mfc.h>
+#include <plat/hdmi.h>
 
 #include <mach/ohci.h>
 #include <mach/map.h>
@@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)
        s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
 }
 
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+       I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
 static void s5p_tv_setup(void)
 {
        /* Direct HPD to HDMI chip */
@@ -781,6 +787,7 @@ static void __init origen_machine_init(void)
 
        s5p_tv_setup();
        s5p_i2c_hdmiphy_set_platdata(NULL);
+       s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
 
 #ifdef CONFIG_DRM_EXYNOS
        s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
index 3cfa688d274a8bda5b5657924509d9d0726f41f6..73f2bce097e179822d9a08d4dc14bba6f104888c 100644 (file)
@@ -40,6 +40,7 @@
 #include <plat/mfc.h>
 #include <plat/ehci.h>
 #include <plat/clock.h>
+#include <plat/hdmi.h>
 
 #include <mach/map.h>
 #include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
        .pwm_period_ns  = 1000,
 };
 
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+       I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
 static void s5p_tv_setup(void)
 {
        /* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
 
        s5p_tv_setup();
        s5p_i2c_hdmiphy_set_platdata(NULL);
+       s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
 
        samsung_keypad_set_platdata(&smdkv310_keypad_data);
 
index 07f7c226e4cfe6eec6181e5e506655223c2f1e1a..d004d37ad9d8595648dbbf981e56391e58432eda 100644 (file)
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
 obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
 obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
 
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+imx5-pm-$(CONFIG_PM) += pm-imx5.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
 
 obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
                            clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
 obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
 obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
 obj-$(CONFIG_HAVE_IMX_SRC) += src.o
-obj-$(CONFIG_CPU_V7) += head-v7.o
-AFLAGS_head-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_SMP) += platsmp.o
+AFLAGS_headsmp.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
 obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
 
 ifeq ($(CONFIG_PM),y)
-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
 endif
 
 # i.MX5 based machines
index ea89520b6e223fd3ab4db3227574c7299605e265..4233d9e3531d838e3cad29c9d2dceb012606d378 100644 (file)
@@ -152,7 +152,7 @@ enum mx6q_clks {
        ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
        usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
        pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
-       ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+       ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
        clk_max
 };
 
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
        clk[gpu3d_shader]     = imx_clk_divider("gpu3d_shader",     "gpu3d_shader_sel",  base + 0x18, 29, 3);
        clk[ipu1_podf]        = imx_clk_divider("ipu1_podf",        "ipu1_sel",          base + 0x3c, 11, 3);
        clk[ipu2_podf]        = imx_clk_divider("ipu2_podf",        "ipu2_sel",          base + 0x3c, 16, 3);
-       clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_sel",       base + 0x20, 10, 1);
-       clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_sel",       base + 0x20, 11, 1);
+       clk[ldb_di0_div_3_5]  = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+       clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_div_3_5",       base + 0x20, 10, 1);
+       clk[ldb_di1_div_3_5]  = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+       clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_div_3_5",   base + 0x20, 11, 1);
        clk[ipu1_di0_pre]     = imx_clk_divider("ipu1_di0_pre",     "ipu1_di0_pre_sel",  base + 0x34, 3,  3);
        clk[ipu1_di1_pre]     = imx_clk_divider("ipu1_di1_pre",     "ipu1_di1_pre_sel",  base + 0x34, 12, 3);
        clk[ipu2_di0_pre]     = imx_clk_divider("ipu2_di0_pre",     "ipu2_di0_pre_sel",  base + 0x38, 3,  3);
index 20ed2d56c1af6a3109ff3ea10843cda25d2a289e..f8f7437c83b82dbb07307e78895e66bb6af320e6 100644 (file)
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
          : "cc");
 }
 
-static inline void cpu_leave_lowpower(void)
-{
-       unsigned int v;
-
-       asm volatile(
-               "mrc    p15, 0, %0, c1, c0, 0\n"
-       "       orr     %0, %0, %1\n"
-       "       mcr     p15, 0, %0, c1, c0, 0\n"
-       "       mrc     p15, 0, %0, c1, c0, 1\n"
-       "       orr     %0, %0, %2\n"
-       "       mcr     p15, 0, %0, c1, c0, 1\n"
-         : "=&r" (v)
-         : "Ir" (CR_C), "Ir" (0x40)
-         : "cc");
-}
-
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
        imx_enable_cpu(cpu, false);
-       cpu_do_idle();
-       cpu_leave_lowpower();
 
-       /* We should never return from idle */
-       panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+       /* spin here until hardware takes it down */
+       while (1)
+               ;
 }
 
 int platform_cpu_disable(unsigned int cpu)
index 5ec0608f2a764a9be584ea1bebbe1339f981a259..045b3f6a387dadef095f2900dc5b786464b525fa 100644 (file)
@@ -71,7 +71,7 @@ soft:
 /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
 static int ksz9021rn_phy_fixup(struct phy_device *phydev)
 {
-       if (IS_ENABLED(CONFIG_PHYLIB)) {
+       if (IS_BUILTIN(CONFIG_PHYLIB)) {
                /* min rx data delay */
                phy_write(phydev, 0x0b, 0x8105);
                phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
 
 static void __init imx6q_sabrelite_init(void)
 {
-       if (IS_ENABLED(CONFIG_PHYLIB))
+       if (IS_BUILTIN(CONFIG_PHYLIB))
                phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
                                ksz9021rn_phy_fixup);
        imx6q_sabrelite_cko1_setup();
index a5717558ee892fd61aac83997bb3ccff5411a0df..a13299d758e15540cfd6fef81b399efbe53d1691 100644 (file)
@@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
 dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
 dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
 dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
-dtb-$(CONFIG_MACH_TS219_DT)    += kirkwood-qnap-ts219.dtb
+dtb-$(CONFIG_MACH_TS219_DT)    += kirkwood-ts219-6281.dtb
+dtb-$(CONFIG_MACH_TS219_DT)    += kirkwood-ts219-6282.dtb
 dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
 dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
 dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
index c4b64adcbfce4be58c7a9b149df9e33d19e02e51..3226077735b1d7104627f8f40307e84c97d4ca5b 100644 (file)
@@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
-                       IRQ_KIRKWOOD_GE00_ERR);
+                       IRQ_KIRKWOOD_GE00_ERR, 1600);
        /* The interface forgets the MAC address assigned by u-boot if
        the clock is turned off, so claim the clk now. */
        clk_prepare_enable(ge0);
@@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
-                       IRQ_KIRKWOOD_GE01_ERR);
+                       IRQ_KIRKWOOD_GE01_ERR, 1600);
        clk_prepare_enable(ge1);
 }
 
index 4304f9519372d972c1d798a5c7a9aa33d3dce954..7e8a5a2e1ec7c210aaf2212d10b32905cebda72b 100644 (file)
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
        struct resource *res;
        int ret = 0;
 
-       if (!pdata && !pdata->pool_name)
+       if (!pdata || !pdata->pool_name)
                return -ENODEV;
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
index 62b53d710efde1a0a71095b2e9e54186cb454886..a9bc84180d21fb378ff27eafb32bcb2ea81f2e89 100644 (file)
@@ -37,7 +37,7 @@
 #define WIN0_OFF(n)            (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
 #define WIN8_OFF(n)            (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
 
-static void __init __iomem *win_cfg_base(int win)
+static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
 {
        /*
         * Find the control register base address for this window.
index b4c53b846c9caa8402ce764aec9f9f254379031a..3057f7d4329a7f3a17b247e5ce31df4e7f21643a 100644 (file)
@@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
-                       IRQ_MV78XX0_GE_ERR);
+                       IRQ_MV78XX0_GE_ERR,
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
@@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
-                       NO_IRQ);
+                       NO_IRQ,
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
index dd2db025f7787e590d94bb229cb143559f9a8317..fcd4e85c4ddcce66921a83b29e45669c50704918 100644 (file)
@@ -62,13 +62,14 @@ config ARCH_OMAP4
        select PM_OPP if PM
        select USB_ARCH_HAS_EHCI if USB_SUPPORT
        select ARM_CPU_SUSPEND if PM
-       select ARCH_NEEDS_CPU_IDLE_COUPLED
+       select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
 
 config SOC_OMAP5
        bool "TI OMAP5"
        select CPU_V7
        select ARM_GIC
        select HAVE_SMP
+       select ARM_CPU_SUSPEND if PM
 
 comment "OMAP Core Type"
        depends on ARCH_OMAP2
index 74915295482ec849e0d0fefe5fc9a22703ee1f2c..28214483aaba24420e96fde47cf5ce91482a0d29 100644 (file)
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+       /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
+       OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
        { .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #endif
index ef230a0eb5eb13e5cedde6f0a6c540f6a348eae6..0d362e9f9cb9a9854c32b378c5dd058f2f785327 100644 (file)
@@ -58,6 +58,7 @@
 #include "hsmmc.h"
 #include "common-board-devices.h"
 
+#define OMAP3_EVM_TS_GPIO      175
 #define OMAP3_EVM_EHCI_VBUS    22
 #define OMAP3_EVM_EHCI_SELECT  61
 
index 14734746457c2bd8bdfcabd1ea4681dc4edab36e..c1875862679fc7092044644bf83e6948ecdbe4c8 100644 (file)
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
        .turbo_mode     = 0,
 };
 
-/*
- * ADS7846 driver maybe request a gpio according to the value
- * of pdata->get_pendown_state, but we have done this. So set
- * get_pendown_state to avoid twice gpio requesting.
- */
-static int omap3_get_pendown_state(void)
-{
-       return !gpio_get_value(OMAP3_EVM_TS_GPIO);
-}
-
 static struct ads7846_platform_data ads7846_config = {
        .x_max                  = 0x0fff,
        .y_max                  = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
        .debounce_rep           = 1,
        .gpio_pendown           = -EINVAL,
        .keep_vref_on           = 1,
-       .get_pendown_state      = &omap3_get_pendown_state,
 };
 
 static struct spi_board_info ads7846_spi_board_info __initdata = {
index 4c4ef6a6166ba28b768ee46580b7f35dbafb7885..a0b4a42836ab9f7a29f1757ee410e37a237af00c 100644 (file)
@@ -4,7 +4,6 @@
 #include "twl-common.h"
 
 #define NAND_BLOCK_SIZE        SZ_128K
-#define OMAP3_EVM_TS_GPIO      175
 
 struct mtd_partition;
 struct ads7846_platform_data;
index ee05e193fc61e317b21b368c583f31ce71bbb76b..288bee6cbb76f701b9b675476d87c3b8f16b90cb 100644 (file)
@@ -238,8 +238,9 @@ int __init omap4_idle_init(void)
        for_each_cpu(cpu_id, cpu_online_mask) {
                dev = &per_cpu(omap4_idle_dev, cpu_id);
                dev->cpu = cpu_id;
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
                dev->coupled_cpus = *cpu_online_mask;
-
+#endif
                cpuidle_register_driver(&omap4_idle_driver);
 
                if (cpuidle_register_device(dev)) {
index 471e62a74a166fb64a7486670a54854239cfb3b6..76f9b3c2f586c577668d7f923f0aabff7bbd53a3 100644 (file)
@@ -127,7 +127,6 @@ struct omap_mux_partition {
  * @gpio:      GPIO number
  * @muxnames:  available signal modes for a ball
  * @balls:     available balls on the package
- * @partition: mux partition
  */
 struct omap_mux {
        u16     reg_offset;
index 2293ba27101b96fae1f387d5b366c5d77181f99b..c95415da23c275b184d2817372a990a371ddaf0c 100644 (file)
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
 {
        int r = -ENODEV;
 
-       if (!cpu_is_omap44xx())
+       if (!cpu_is_omap443x())
                return r;
 
        r = omap_init_opp_table(omap44xx_opp_def_list,
index e4fc88c65dbd6a868b6dac07229de3ee3b7791ea..05bd8f02723f2966bfc559ae30b9c33feee9feb3 100644 (file)
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
        per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
        core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
 
-       if (mpu_next_state < PWRDM_POWER_ON) {
-               pwrdm_pre_transition(mpu_pwrdm);
-               pwrdm_pre_transition(neon_pwrdm);
-       }
+       pwrdm_pre_transition(NULL);
 
        /* PER */
        if (per_next_state < PWRDM_POWER_ON) {
-               pwrdm_pre_transition(per_pwrdm);
                per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
                omap2_gpio_prepare_for_idle(per_going_off);
        }
 
        /* CORE */
        if (core_next_state < PWRDM_POWER_ON) {
-               pwrdm_pre_transition(core_pwrdm);
                if (core_next_state == PWRDM_POWER_OFF) {
                        omap3_core_save_context();
                        omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
                        omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
                                               OMAP3430_GR_MOD,
                                               OMAP3_PRM_VOLTCTRL_OFFSET);
-               pwrdm_post_transition(core_pwrdm);
        }
        omap3_intc_resume_idle();
 
+       pwrdm_post_transition(NULL);
+
        /* PER */
-       if (per_next_state < PWRDM_POWER_ON) {
+       if (per_next_state < PWRDM_POWER_ON)
                omap2_gpio_resume_after_idle();
-               pwrdm_post_transition(per_pwrdm);
-       }
-
-       if (mpu_next_state < PWRDM_POWER_ON) {
-               pwrdm_post_transition(mpu_pwrdm);
-               pwrdm_post_transition(neon_pwrdm);
-       }
 }
 
 static void omap3_pm_idle(void)
index 9f6b83d1b193348a7af9e628cff41956c8684898..91e71d8f46f0aa731c9678e88423490aa1192cf9 100644 (file)
@@ -56,9 +56,13 @@ ppa_por_params:
  * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  * It returns to the caller for CPU INACTIVE and ON power states or in case
  * CPU failed to transition to targeted OFF/DORMANT state.
+ *
+ * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
+ * stack frame and it expects the caller to take care of it. Hence the entire
+ * stack frame is saved to avoid possible stack corruption.
  */
 ENTRY(omap4_finish_suspend)
-       stmfd   sp!, {lr}
+       stmfd   sp!, {r4-r12, lr}
        cmp     r0, #0x0
        beq     do_WFI                          @ No lowpower state, jump to WFI
 
@@ -226,7 +230,7 @@ scu_gp_clear:
 skip_scu_gp_clear:
        isb
        dsb
-       ldmfd   sp!, {pc}
+       ldmfd   sp!, {r4-r12, pc}
 ENDPROC(omap4_finish_suspend)
 
 /*
index de47f170ba50abf2506c363838d7cd82c70109ee..db5ff664237517562766ffdbc34887c6416c272f 100644 (file)
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
                           const char *pmic_type, int pmic_irq,
                           struct twl4030_platform_data *pmic_data)
 {
+       omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        strncpy(pmic_i2c_board_info.type, pmic_type,
                sizeof(pmic_i2c_board_info.type));
        pmic_i2c_board_info.irq = pmic_irq;
index 9148b229d0de925b4f95fea5154421fa462331ba..410291c676668befcfe5b43723af071ba153e5db 100644 (file)
@@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
                        ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
-                       IRQ_ORION5X_ETH_ERR);
+                       IRQ_ORION5X_ETH_ERR,
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
index 454831b66037f9c64d37521bf14e118dcc92bc9a..ee99fd56c0439f5bdc22152fa2c530282424fd70 100644 (file)
@@ -24,7 +24,8 @@
 */
 
 enum dma_ch {
-       DMACH_XD0,
+       DMACH_DT_PROP = -1,     /* not yet supported, do not use */
+       DMACH_XD0 = 0,
        DMACH_XD1,
        DMACH_SDI,
        DMACH_SPI0,
index c013bbf79cac0d0eff64edfd5221d7815bb04879..53d3d46dec1290b4265a3c3c26d7ffc0449f223e 100644 (file)
@@ -41,7 +41,6 @@ config MACH_HREFV60
 config MACH_SNOWBALL
        bool "U8500 Snowball platform"
        select MACH_MOP500
-       select LEDS_GPIO
        help
          Include support for the snowball development platform.
 
index 996048038743f777e114ff6a48bc2bb682271a46..df15646036aacd9bec713b7beebb926dafa707fa 100644 (file)
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
        return pdev;
 }
 
-/* Platform device for ASoC U8500 machine */
-static struct platform_device snd_soc_u8500 = {
-               .name = "snd-soc-u8500",
+/* Platform device for ASoC MOP500 machine */
+static struct platform_device snd_soc_mop500 = {
+               .name = "snd-soc-mop500",
                .id = 0,
                .dev = {
                        .platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
 {
        struct platform_device *msp1;
 
-       pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__);
-       platform_device_register(&snd_soc_u8500);
+       pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
+       platform_device_register(&snd_soc_mop500);
 
        pr_info("Initialize MSP I2S-devices.\n");
        db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
index 8674a890fd1c7071ae2efce55a3a7c888a97c091..a534d8880de12e2a92b0c7a35ec1ebdc3034c44e 100644 (file)
@@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)
                                ARRAY_SIZE(mop500_platform_devs));
 
                mop500_sdi_init(parent);
+               mop500_msp_init(parent);
                i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
                i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
                i2c_register_board_info(2, mop500_i2c2_devices,
@@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)
 
                mop500_uib_init();
 
+       } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
+               mop500_msp_init(parent);
        } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
                /*
                 * The HREFv60 board removed a GPIO expander and routed
@@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)
                                ARRAY_SIZE(mop500_platform_devs));
 
                hrefv60_sdi_init(parent);
+               mop500_msp_init(parent);
 
                i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
                i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
index 626ad8cad7a9486d71408f611b4f99b46c85b995..938b50a33439b092202de715e81267b18b5f73cb 100644 (file)
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
                timer->reserved = 1;
                break;
        }
+       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (timer) {
                ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
                        timer = NULL;
                }
        }
-       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (!timer)
                pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
                        break;
                }
        }
+       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (timer) {
                ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
                        timer = NULL;
                }
        }
-       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (!timer)
                pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
 
 void omap_dm_timer_disable(struct omap_dm_timer *timer)
 {
-       pm_runtime_put(&timer->pdev->dev);
+       pm_runtime_put_sync(&timer->pdev->dev);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
 
index 68b180edcfffd0e05c9256aaa153993500177f84..bb5d08a70dbc64ac961945669655e2c47d380750 100644 (file)
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define cpu_class_is_omap1()   (cpu_is_omap7xx() || cpu_is_omap15xx() || \
                                cpu_is_omap16xx())
 #define cpu_class_is_omap2()   (cpu_is_omap24xx() || cpu_is_omap34xx() || \
-                               cpu_is_omap44xx() || soc_is_omap54xx())
+                               cpu_is_omap44xx() || soc_is_omap54xx() || \
+                               soc_is_am33xx())
 
 /* Various silicon revisions for omap2 */
 #define OMAP242X_CLASS         0x24200024
index 045e320f1067408abc1ef08e6a3667fe911c63f7..324d31b14852e632d614635e40af358d24a36aab 100644 (file)
 # endif
 #endif
 
+#ifdef CONFIG_SOC_AM33XX
+# ifdef OMAP_NAME
+#  undef  MULTI_OMAP2
+#  define MULTI_OMAP2
+# else
+#  define OMAP_NAME am33xx
+# endif
+#endif
+
 #endif /* __PLAT_OMAP_MULTI_H */
index b8d19a136781e4ba89382cc151ba6fe0190ccbc3..7f7b112acccb897b90066a6b8357a943284c3a14 100644 (file)
@@ -110,7 +110,7 @@ static inline void flush(void)
        _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT,   \
                AM33XXUART##p)
 
-static inline void __arch_decomp_setup(unsigned long arch_id)
+static inline void arch_decomp_setup(void)
 {
        int port = 0;
 
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
        } while (0);
 }
 
-#define arch_decomp_setup()    __arch_decomp_setup(arch_id)
-
 /*
  * nothing to do
  */
index d245a87dc014d4c6cf3d1c9a3285add47a56ecb5..b8b747a9d360110e9ca24b505f5249a730209e3e 100644 (file)
@@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err)
+                           unsigned long irq_err,
+                           unsigned int tx_csum_limit)
 {
        fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
+       orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
        ge_complete(&orion_ge00_shared_data,
                    orion_ge00_resources, irq, &orion_ge00_shared,
                    eth_data, &orion_ge00);
@@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err)
+                           unsigned long irq_err,
+                           unsigned int tx_csum_limit)
 {
        fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
+       orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
        ge_complete(&orion_ge01_shared_data,
                    orion_ge01_resources, irq, &orion_ge01_shared,
                    eth_data, &orion_ge01);
index e00fdb2136090154ea930c9f224365677644444e..ae2377ef63e5d9c455e07a4c8db7590f8935d4dc 100644 (file)
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err);
+                           unsigned long irq_err,
+                           unsigned int tx_csum_limit);
 
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err);
+                           unsigned long irq_err,
+                           unsigned int tx_csum_limit);
 
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
index 28f898f75380a8ff39036805afdc5b5d40803df8..db98e7021f0daf7507fe55f91f1e73ef761a6dc3 100644 (file)
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
  * when necessary.
 */
 
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                        dma_addr_t data, int size)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
index 74e31ce355388bd7c2062b35fb30e09ca085b3f5..fc49f3dabd7653624b7f9ccc8e78ef12b102d527 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/platform_data/s3c-hsudc.h>
 #include <linux/platform_data/s3c-hsotg.h>
 
+#include <media/s5p_hdmi.h>
+
 #include <asm/irq.h>
 #include <asm/pmu.h>
 #include <asm/mach/arch.h>
@@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
        if (!pd) {
                pd = &default_i2c_data;
 
-               if (soc_is_exynos4210())
+               if (soc_is_exynos4210() ||
+                   soc_is_exynos4212() || soc_is_exynos4412())
                        pd->bus_num = 8;
                else if (soc_is_s5pv210())
                        pd->bus_num = 3;
@@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
        npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
                               &s5p_device_i2c_hdmiphy);
 }
+
+struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
+
+void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+                                 struct i2c_board_info *mhl_info, int mhl_bus)
+{
+       struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
+
+       if (soc_is_exynos4210() ||
+           soc_is_exynos4212() || soc_is_exynos4412())
+               pd->hdmiphy_bus = 8;
+       else if (soc_is_s5pv210())
+               pd->hdmiphy_bus = 3;
+       else
+               pd->hdmiphy_bus = 0;
+
+       pd->hdmiphy_info = hdmiphy_info;
+       pd->mhl_info = mhl_info;
+       pd->mhl_bus = mhl_bus;
+
+       s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
+                        &s5p_device_hdmi);
+}
+
 #endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
 
 /* I2S */
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644 (file)
index 0000000..331d046
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PLAT_SAMSUNG_HDMI_H
+#define __PLAT_SAMSUNG_HDMI_H __FILE__
+
+extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+                                 struct i2c_board_info *mhl_info, int mhl_bus);
+
+#endif /* __PLAT_SAMSUNG_HDMI_H */
index 64ab65f0fdbc652cd1e0f165ec13bf5a61cc6377..15070284343ee43070c7130d3f8ce6bff6a0b4ff 100644 (file)
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
 
 #ifdef CONFIG_SAMSUNG_PM_DEBUG
 
-struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
 
 static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
 {
index 331d574df99c8d86ecdce41c9366164fac63f396..faf65286574e9f28d32d9cf31e1c097afcfc7acb 100644 (file)
@@ -89,6 +89,7 @@ config ATH79
        select CEVT_R4K
        select CSRC_R4K
        select DMA_NONCOHERENT
+       select HAVE_CLK
        select IRQ_CPU
        select MIPS_MACHINE
        select SYS_HAS_CPU_MIPS32_R2
index 99969484c475c7fc7366d12fa50efd495656a9b9..a124c251c0c92a2bbbcc8f86ab2674aa5a8ad6a4 100644 (file)
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
         * adapter on the mtx-1 "singleboard" variant. It triggers a custom
         * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
         */
+       udelay(1);
+
        if (assert && devsel != 0)
                /* Suppress signal to Cardbus */
                alchemy_gpio_set_value(1, 0);   /* set EXT_IO3 OFF */
index 36e9570e7bc4a250fbbc05c6312c5b2bf53adcb5..b2a2311ec85b492d1dbbbd22df5a57db0ba978c4 100644 (file)
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
 
        ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
        ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
+       ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
+       ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
        platform_device_register(&ath79_ohci_device);
 }
 
index 29054f211832505d371930d3e13f752af56f69f3..48fe762d2526885908c192545dbc70fa69d4cb9c 100644 (file)
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
 
        if (soc_is_ar71xx())
                ath79_gpio_count = AR71XX_GPIO_COUNT;
-       else if (soc_is_ar724x())
-               ath79_gpio_count = AR724X_GPIO_COUNT;
+       else if (soc_is_ar7240())
+               ath79_gpio_count = AR7240_GPIO_COUNT;
+       else if (soc_is_ar7241() || soc_is_ar7242())
+               ath79_gpio_count = AR7241_GPIO_COUNT;
        else if (soc_is_ar913x())
                ath79_gpio_count = AR913X_GPIO_COUNT;
        else if (soc_is_ar933x())
index e39f73048d4f653c48ba3b52a31976f290699bae..f1c9c3e2f678146e83772f3c7cb05a0989743fce 100644 (file)
@@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)
        if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
                spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
        }
 
        if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
                spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
        }
 
        bcm63xx_spi_regs_init();
index 7fb1f222b8a538b9b000e99375899f37a0c069a9..274cd4fad30c4810f29a420e2dc1e4b139adba73 100644 (file)
@@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
        octeon_irq_ciu_to_irq[line][bit] = irq;
 }
 
+static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+                                        int irq, int line, int bit)
+{
+       irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
 static int octeon_coreid_for_cpu(int cpu)
 {
 #ifdef CONFIG_SMP
@@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)
                mutex_init(&cd->core_irq_mutex);
 
                irq = OCTEON_IRQ_SW0 + i;
-               switch (irq) {
-               case OCTEON_IRQ_TIMER:
-               case OCTEON_IRQ_SW0:
-               case OCTEON_IRQ_SW1:
-               case OCTEON_IRQ_5:
-               case OCTEON_IRQ_PERF:
-                       irq_set_chip_data(irq, cd);
-                       irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
-                                                handle_percpu_irq);
-                       break;
-               default:
-                       break;
-               }
+               irq_set_chip_data(irq, cd);
+               irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+                                        handle_percpu_irq);
        }
 }
 
@@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
        unsigned int type;
        unsigned int pin;
        unsigned int trigger;
-       struct octeon_irq_gpio_domain_data *gpiod;
 
        if (d->of_node != node)
                return -EINVAL;
@@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
                break;
        }
        *out_type = type;
-       gpiod = d->host_data;
-       *out_hwirq = gpiod->base_hwirq + pin;
+       *out_hwirq = pin;
 
        return 0;
 }
@@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
 static int octeon_irq_gpio_map(struct irq_domain *d,
                               unsigned int virq, irq_hw_number_t hw)
 {
-       unsigned int line = hw >> 6;
-       unsigned int bit = hw & 63;
+       struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+       unsigned int line, bit;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
+       hw += gpiod->base_hwirq;
+       line = hw >> 6;
+       bit = hw & 63;
        if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
        octeon_irq_set_ciu_mapping(virq, line, bit,
                                   octeon_irq_gpio_chip,
                                   octeon_irq_handle_gpio);
-
        return 0;
 }
 
@@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)
        struct irq_chip *chip_wd;
        struct device_node *gpio_node;
        struct device_node *ciu_node;
+       struct irq_domain *ciu_domain = NULL;
 
        octeon_irq_init_ciu_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)
        /* Mips internal */
        octeon_irq_init_core();
 
-       /* CIU_0 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
-
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
-
-       for (i = 0; i < 4; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
-       for (i = 0; i < 4; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
-
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
-       for (i = 0; i < 4; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
-
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
-
-       /* CIU_1 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
-
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
-
        gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
        if (gpio_node) {
                struct octeon_irq_gpio_domain_data *gpiod;
@@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)
 
        ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
        if (ciu_node) {
-               irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
+               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
                of_node_put(ciu_node);
        } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n");
+               panic("Cannot find device node for cavium,octeon-3860-ciu.");
+
+       /* CIU_0 */
+       for (i = 0; i < 16; i++)
+               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+
+       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
+       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
+
+       for (i = 0; i < 4; i++)
+               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+       for (i = 0; i < 4; i++)
+               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+
+       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+       for (i = 0; i < 4; i++)
+               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+
+       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
+
+       /* CIU_1 */
+       for (i = 0; i < 16; i++)
+               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
+
+       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
index 1caa78ad06d5833306ee367cd44fbefe5dbd6fe3..dde504477fac1521e95f2fe60b75c7ac2fd8e5aa 100644 (file)
 #define AR71XX_GPIO_REG_FUNC           0x28
 
 #define AR71XX_GPIO_COUNT              16
-#define AR724X_GPIO_COUNT              18
+#define AR7240_GPIO_COUNT              18
+#define AR7241_GPIO_COUNT              20
 #define AR913X_GPIO_COUNT              22
 #define AR933X_GPIO_COUNT              30
 #define AR934X_GPIO_COUNT              23
index 4476fa03bf36d5eee19b26e1443b697eb4c33502..6ddae926bf79717b95a709d71c40a99a888a310a 100644 (file)
@@ -42,7 +42,6 @@
 #define cpu_has_mips64r1       0
 #define cpu_has_mips64r2       0
 
-#define cpu_has_dsp            0
 #define cpu_has_mipsmt         0
 
 #define cpu_has_64bits         0
index 7d98dbe5d4b5ef9de38b25717747436e08e6179e..c9bae1362606aac10d79c921f3c8b7374cfc1ba5 100644 (file)
@@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);
 
 struct bcm63xx_spi_pdata {
        unsigned int    fifo_size;
+       unsigned int    msg_type_shift;
+       unsigned int    msg_ctl_width;
        int             bus_num;
        int             num_chipselect;
        u32             speed_hz;
index 4ccc2a748aff2db6913dab0e8b67e1eb6fbceeb8..61f2a2a5099d02ad281290c675604a4e897e9bf9 100644 (file)
 #define SPI_6338_FILL_BYTE             0x07
 #define SPI_6338_MSG_TAIL              0x09
 #define SPI_6338_RX_TAIL               0x0b
-#define SPI_6338_MSG_CTL               0x40
+#define SPI_6338_MSG_CTL               0x40    /* 8-bits register */
+#define SPI_6338_MSG_CTL_WIDTH         8
 #define SPI_6338_MSG_DATA              0x41
 #define SPI_6338_MSG_DATA_SIZE         0x3f
 #define SPI_6338_RX_DATA               0x80
 #define SPI_6348_FILL_BYTE             0x07
 #define SPI_6348_MSG_TAIL              0x09
 #define SPI_6348_RX_TAIL               0x0b
-#define SPI_6348_MSG_CTL               0x40
+#define SPI_6348_MSG_CTL               0x40    /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH         8
 #define SPI_6348_MSG_DATA              0x41
 #define SPI_6348_MSG_DATA_SIZE         0x3f
 #define SPI_6348_RX_DATA               0x80
 
 /* BCM 6358 SPI core */
 #define SPI_6358_MSG_CTL               0x00    /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH         16
 #define SPI_6358_MSG_DATA              0x02
 #define SPI_6358_MSG_DATA_SIZE         0x21e
 #define SPI_6358_RX_DATA               0x400
 
 /* BCM 6358 SPI core */
 #define SPI_6368_MSG_CTL               0x00    /* 16-bits register */
+#define SPI_6368_MSG_CTL_WIDTH         16
 #define SPI_6368_MSG_DATA              0x02
 #define SPI_6368_MSG_DATA_SIZE         0x21e
 #define SPI_6368_RX_DATA               0x400
 #define SPI_HD_W                       0x01
 #define SPI_HD_R                       0x02
 #define SPI_BYTE_CNT_SHIFT             0
-#define SPI_MSG_TYPE_SHIFT             14
+#define SPI_6338_MSG_TYPE_SHIFT                6
+#define SPI_6348_MSG_TYPE_SHIFT                6
+#define SPI_6358_MSG_TYPE_SHIFT                14
+#define SPI_6368_MSG_TYPE_SHIFT                14
 
 /* Command */
 #define SPI_CMD_NOOP                   0x00
index 418992042f6fcbc23f1612a82ee886cd969c5fa5..c22a3078bf11b69f2da21547fc1c337da7b51206 100644 (file)
@@ -21,14 +21,10 @@ enum octeon_irq {
        OCTEON_IRQ_TIMER,
 /* sources in CIU_INTX_EN0 */
        OCTEON_IRQ_WORKQ0,
-       OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
-       OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
+       OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
        OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
        OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
        OCTEON_IRQ_MBOX1,
-       OCTEON_IRQ_UART0,
-       OCTEON_IRQ_UART1,
-       OCTEON_IRQ_UART2,
        OCTEON_IRQ_PCI_INT0,
        OCTEON_IRQ_PCI_INT1,
        OCTEON_IRQ_PCI_INT2,
@@ -38,8 +34,6 @@ enum octeon_irq {
        OCTEON_IRQ_PCI_MSI2,
        OCTEON_IRQ_PCI_MSI3,
 
-       OCTEON_IRQ_TWSI,
-       OCTEON_IRQ_TWSI2,
        OCTEON_IRQ_RML,
        OCTEON_IRQ_TIMER0,
        OCTEON_IRQ_TIMER1,
@@ -47,8 +41,6 @@ enum octeon_irq {
        OCTEON_IRQ_TIMER3,
        OCTEON_IRQ_USB0,
        OCTEON_IRQ_USB1,
-       OCTEON_IRQ_MII0,
-       OCTEON_IRQ_MII1,
        OCTEON_IRQ_BOOTDMA,
 #ifndef CONFIG_PCI_MSI
        OCTEON_IRQ_LAST = 127
index 7531ecd654d651df630d1d520e71f762c47547e3..dca8bce8c7abbe47473920ca6d9e28eb3c235dc9 100644 (file)
@@ -10,6 +10,7 @@ struct mod_arch_specific {
        struct list_head dbe_list;
        const struct exception_table_entry *dbe_start;
        const struct exception_table_entry *dbe_end;
+       struct mips_hi16 *r_mips_hi16_list;
 };
 
 typedef uint8_t Elf64_Byte;            /* Type for a 8-bit quantity.  */
index a37d12b3b61c0b3c2db45cc6a37f44fc4e6eac52..afe9e0e03fe96c5b7a710abe19075d9cf4a4f87b 100644 (file)
 
 #ifdef CONFIG_SYNC_R4K
 
-extern void synchronise_count_master(void);
-extern void synchronise_count_slave(void);
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
 
 #else
 
-static inline void synchronise_count_master(void)
+static inline void synchronise_count_master(int cpu)
 {
 }
 
-static inline void synchronise_count_slave(void)
+static inline void synchronise_count_slave(int cpu)
 {
 }
 
index a5066b1c3de37185896fe529839c5133a158ccb0..4f8c3cba8c0c45180cdabe08ac05aa43e652395f 100644 (file)
@@ -39,8 +39,6 @@ struct mips_hi16 {
        Elf_Addr value;
 };
 
-static struct mips_hi16 *mips_hi16_list;
-
 static LIST_HEAD(dbe_list);
 static DEFINE_SPINLOCK(dbe_lock);
 
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
 
        n->addr = (Elf_Addr *)location;
        n->value = v;
-       n->next = mips_hi16_list;
-       mips_hi16_list = n;
+       n->next = me->arch.r_mips_hi16_list;
+       me->arch.r_mips_hi16_list = n;
 
        return 0;
 }
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
        return 0;
 }
 
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+       struct mips_hi16 *next;
+
+       while (l) {
+               next = l->next;
+               kfree(l);
+               l = next;
+       }
+}
+
 static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
 {
        unsigned long insnlo = *location;
+       struct mips_hi16 *l;
        Elf_Addr val, vallo;
 
        /* Sign extend the addend we extract from the lo insn.  */
        vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
 
-       if (mips_hi16_list != NULL) {
-               struct mips_hi16 *l;
-
-               l = mips_hi16_list;
+       if (me->arch.r_mips_hi16_list != NULL) {
+               l = me->arch.r_mips_hi16_list;
                while (l != NULL) {
                        struct mips_hi16 *next;
                        unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
                        l = next;
                }
 
-               mips_hi16_list = NULL;
+               me->arch.r_mips_hi16_list = NULL;
        }
 
        /*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
        return 0;
 
 out_danger:
+       free_relocation_chain(l);
+       me->arch.r_mips_hi16_list = NULL;
+
        pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
 
        return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
        pr_debug("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
 
+       me->arch.r_mips_hi16_list = NULL;
        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
                /* This is where to make the change */
                location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
                        return res;
        }
 
+       /*
+        * Normally the hi16 list should be deallocated at this point.  A
+        * malformed binary however could contain a series of R_MIPS_HI16
+        * relocations not followed by a R_MIPS_LO16 relocation.  In that
+        * case, free up the list and return an error.
+        */
+       if (me->arch.r_mips_hi16_list) {
+               free_relocation_chain(me->arch.r_mips_hi16_list);
+               me->arch.r_mips_hi16_list = NULL;
+
+               return -ENOEXEC;
+       }
+
        return 0;
 }
 
index 31637d8c87381f04b02bcae714e8b2165952fc9c..9005bf9fb859552101d7e68638adc73ba4978ee7 100644 (file)
@@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
 
        cpu_set(cpu, cpu_callin_map);
 
-       synchronise_count_slave();
+       synchronise_count_slave(cpu);
 
        /*
         * irq will be enabled in ->smp_finish(), enabling it too early
@@ -173,7 +173,6 @@ void smp_send_stop(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        mp_ops->cpus_done();
-       synchronise_count_master();
 }
 
 /* called from main before smp_init() */
@@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
        while (!cpu_isset(cpu, cpu_callin_map))
                udelay(100);
 
+       synchronise_count_master(cpu);
        return 0;
 }
 
index 842d55e411fd396479b611ba0a1c0c2af2d4cb92..7f1eca3858def1845dc98dd1264206be19159b01 100644 (file)
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
 #define COUNTON        100
 #define NR_LOOPS 5
 
-void __cpuinit synchronise_count_master(void)
+void __cpuinit synchronise_count_master(int cpu)
 {
        int i;
        unsigned long flags;
        unsigned int initcount;
-       int nslaves;
 
 #ifdef CONFIG_MIPS_MT_SMTC
        /*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
        return;
 #endif
 
-       printk(KERN_INFO "Synchronize counters across %u CPUs: ",
-              num_online_cpus());
+       printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
 
        local_irq_save(flags);
 
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
         * Notify the slaves that it's time to start
         */
        atomic_set(&count_reference, read_c0_count());
-       atomic_set(&count_start_flag, 1);
+       atomic_set(&count_start_flag, cpu);
        smp_wmb();
 
        /* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
         * two CPUs.
         */
 
-       nslaves = num_online_cpus()-1;
        for (i = 0; i < NR_LOOPS; i++) {
-               /* slaves loop on '!= ncpus' */
-               while (atomic_read(&count_count_start) != nslaves)
+               /* slaves loop on '!= 2' */
+               while (atomic_read(&count_count_start) != 1)
                        mb();
                atomic_set(&count_count_stop, 0);
                smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
                /*
                 * Wait for all slaves to leave the synchronization point:
                 */
-               while (atomic_read(&count_count_stop) != nslaves)
+               while (atomic_read(&count_count_stop) != 1)
                        mb();
                atomic_set(&count_count_start, 0);
                smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
+       atomic_set(&count_start_flag, 0);
 
        local_irq_restore(flags);
 
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
        printk("done.\n");
 }
 
-void __cpuinit synchronise_count_slave(void)
+void __cpuinit synchronise_count_slave(int cpu)
 {
        int i;
        unsigned int initcount;
-       int ncpus;
 
 #ifdef CONFIG_MIPS_MT_SMTC
        /*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
         * so we first wait for the master to say everyone is ready
         */
 
-       while (!atomic_read(&count_start_flag))
+       while (atomic_read(&count_start_flag) != cpu)
                mb();
 
        /* Count will be initialised to next expire for all CPU's */
        initcount = atomic_read(&count_reference);
 
-       ncpus = num_online_cpus();
        for (i = 0; i < NR_LOOPS; i++) {
                atomic_inc(&count_count_start);
-               while (atomic_read(&count_count_start) != ncpus)
+               while (atomic_read(&count_count_start) != 2)
                        mb();
 
                /*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
                        write_c0_count(initcount);
 
                atomic_inc(&count_count_stop);
-               while (atomic_read(&count_count_stop) != ncpus)
+               while (atomic_read(&count_count_stop) != 2)
                        mb();
        }
        /* Arrange for an interrupt in a short while */
index 284dea54faf5a9f629e9d2cab18121cc5db328c6..2147cb34e705a2dca85355aa529c03ca878744a6 100644 (file)
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
 
        register_pci_controller(controller);
 }
-
-/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __devinit quirk_dlcsetup(struct pci_dev *dev)
-{
-       u8 odlc, ndlc;
-       (void) pci_read_config_byte(dev, 0x82, &odlc);
-       /* Enable passive releases and delayed transaction */
-       ndlc = odlc | 7;
-       (void) pci_write_config_byte(dev, 0x82, ndlc);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
-       quirk_dlcsetup);
index 414a7459858d4e06b837a08d375eccbc96804dae..86d77a666458bae80c60f94c9fc405dca5f8459c 100644 (file)
 #define AR724X_PCI_MEM_BASE    0x10000000
 #define AR724X_PCI_MEM_SIZE    0x08000000
 
+#define AR724X_PCI_REG_RESET           0x18
 #define AR724X_PCI_REG_INT_STATUS      0x4c
 #define AR724X_PCI_REG_INT_MASK                0x50
 
+#define AR724X_PCI_RESET_LINK_UP       BIT(0)
+
 #define AR724X_PCI_INT_DEV0            BIT(14)
 
 #define AR724X_PCI_IRQ_COUNT           1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
 
 static u32 ar724x_pci_bar0_value;
 static bool ar724x_pci_bar0_is_cached;
+static bool ar724x_pci_link_up;
+
+static inline bool ar724x_pci_check_link(void)
+{
+       u32 reset;
+
+       reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+       return reset & AR724X_PCI_RESET_LINK_UP;
+}
 
 static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                            int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
        void __iomem *base;
        u32 data;
 
+       if (!ar724x_pci_link_up)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
        if (devfn)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
        u32 data;
        int s;
 
+       if (!ar724x_pci_link_up)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
        if (devfn)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
        if (ar724x_pci_ctrl_base == NULL)
                goto err_unmap_devcfg;
 
+       ar724x_pci_link_up = ar724x_pci_check_link();
+       if (!ar724x_pci_link_up)
+               pr_warn("ar724x: PCIe link is down\n");
+
        ar724x_pci_irq_init(irq);
        register_pci_controller(&ar724x_pci_controller);
 
index 6c6defc24619fbab673b6296387c1dead503158b..af9cf30ed47430720ffb4736378336ee04edc9fa 100644 (file)
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define atomic_sub_and_test(i,v)       (atomic_sub_return((i),(v)) == 0)
 
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
 
 #define smp_mb__before_atomic_dec()    smp_mb()
 #define smp_mb__after_atomic_dec()     smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #ifdef CONFIG_64BIT
 
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
 
 static __inline__ s64
 __atomic64_add_return(s64 i, atomic64_t *v)
index d4b94b395c1641f6a3c88bf9b50795b36e86d09f..2c05a9292a81cb1138a28af59216eef1d829c421 100644 (file)
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                cregs->ksp = (unsigned long)stack
                        + (pregs->gr[21] & (THREAD_SIZE - 1));
                cregs->gr[30] = usp;
-               if (p->personality == PER_HPUX) {
+               if (personality(p->personality) == PER_HPUX) {
 #ifdef CONFIG_HPUX
                        cregs->kpc = (unsigned long) &hpux_child_return;
 #else
index c9b932260f4713969cc8df8c51a9813110fb622f..7426e40699bdbf08575d0b69722e6594a1535076 100644 (file)
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
        long err;
 
        if (personality(current->personality) == PER_LINUX32
-           && personality == PER_LINUX)
-               personality = PER_LINUX32;
+           && personality(personality) == PER_LINUX)
+               personality = (personality & ~PER_MASK) | PER_LINUX32;
 
        err = sys_personality(personality);
-       if (err == PER_LINUX32)
-               err = PER_LINUX;
+       if (personality(err) == PER_LINUX32)
+               err = (err & ~PER_MASK) | PER_LINUX;
 
        return err;
 }
index 8d35d2c1f694772ae733fc4d20c0d1fad683948a..4f9c9f682ecfe14b074869b7e93e79fa74326026 100644 (file)
 /include/ "qoriq-duart-1.dtsi"
 /include/ "qoriq-gpio-0.dtsi"
 /include/ "qoriq-usb2-mph-0.dtsi"
+       usb@210000 {
+               compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+               port0;
+       };
 /include/ "qoriq-usb2-dr-0.dtsi"
+       usb@211000 {
+               compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+       };
 /include/ "qoriq-sec4.0-0.dtsi"
 };
index f4337bacd0e7c2692396954a5f51a4ba02e1416b..26e541c4662b27ce2c4429a2325083f8f065bf35 100644 (file)
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_EMBEDDED=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
 CONFIG_P1023_RDS=y
 CONFIG_QUICC_ENGINE=y
 CONFIG_QE_GPIO=y
 CONFIG_CPM2=y
-CONFIG_GPIO_MPC8XXX=y
 CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
 CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_LEGACY=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
 CONFIG_SATA_SIL24=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_E1000E=y
 CONFIG_MARVELL_PHY=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_CICADA_PHY=y
 CONFIG_VITESSE_PHY=y
 CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_QE=m
-CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
 CONFIG_I2C=y
 CONFIG_I2C_CPM=m
 CONFIG_I2C_MPC=y
+CONFIG_GPIO_MPC8XXX=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=y
 CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
 CONFIG_FSL_DMA=y
 # CONFIG_NET_DMA is not set
 CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
 CONFIG_CRC_T10DIF=y
 CONFIG_FRAME_WARN=8092
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
index cbb98c1234fdea1040292542a4ec4277e5bf76e2..8b3d57c1ebe81312673642db4765408a7de5c322 100644 (file)
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
 CONFIG_P2041_RDB=y
 CONFIG_P3041_DS=y
 CONFIG_P4080_DS=y
 CONFIG_P5020_DS=y
 CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_KEXEC=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_FSL_LBC=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
 # CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
 CONFIG_RAPIDIO=y
 CONFIG_FSL_RIO=y
 CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
 CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_FSL_IFC=y
 CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_IFC=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
 CONFIG_PPC_EPAPR_HV_BYTECHAN=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
 CONFIG_VIDEO_OUTPUT_CONTROL=y
 CONFIG_USB_HID=m
 CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_OF=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_RCU_TRACE=y
 CONFIG_CRYPTO_NULL=y
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD4=y
index dd89de8b0b7f59945eb656564d49d19bcf2af121..0516e22ca3de3c16b89cdf0b4d9c7865e2d791c9 100644 (file)
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
 CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
index 15130066e5e24ee15fb2d9442bb1e3c6c36c912a..07b7f2af2dca7ed9e06f7baea8e46f904fcb8aba 100644 (file)
@@ -1,8 +1,10 @@
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_BLK_DEV_INITRD=y
@@ -13,15 +15,16 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_KEXEC=y
-# CONFIG_RELOCATABLE is not set
+# CONFIG_PPC_PSERIES is not set
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
+# CONFIG_MIGRATION is not set
 CONFIG_PCI_MSI=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_IP_NF_QUEUE=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDE_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
-CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_IEEE1394=y
+CONFIG_IEEE1394_OHCI1394=y
+CONFIG_IEEE1394_SBP2=m
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_RAWIO=y
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_DV1394=m
+CONFIG_ADB_PMU=y
+CONFIG_PMAC_SMU=y
 CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_THERM_PM72=y
+CONFIG_WINDFARM=y
+CONFIG_WINDFARM_PM81=y
+CONFIG_WINDFARM_PM91=y
+CONFIG_WINDFARM_PM112=y
+CONFIG_WINDFARM_PM121=y
 CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
 CONFIG_DUMMY=m
-CONFIG_MII=y
+CONFIG_BONDING=m
 CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SUNGEM=y
 CONFIG_ACENIC=m
 CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_TIGON3=y
 CONFIG_E1000=y
-CONFIG_SUNGEM=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
+CONFIG_TIGON3=y
 CONFIG_USB_CATC=m
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
@@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m
 # CONFIG_USB_NET_NET1080 is not set
 # CONFIG_USB_NET_CDC_SUBSET is not set
 # CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_JOYDEV=m
 CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO_I8042 is not set
 # CONFIG_SERIO_SERPORT is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_GEN_RTC=y
 CONFIG_RAW_DRIVER=y
 CONFIG_I2C_CHARDEV=y
 # CONFIG_HWMON is not set
-CONFIG_AGP=y
-CONFIG_DRM=y
-CONFIG_DRM_NOUVEAU=y
+CONFIG_AGP=m
+CONFIG_AGP_UNINORTH=m
 CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_OF=y
+CONFIG_FB_NVIDIA=y
+CONFIG_FB_NVIDIA_I2C=y
 CONFIG_FB_RADEON=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
@@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
 CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_POWERMAC=m
+CONFIG_SND_AOA=m
+CONFIG_SND_AOA_FABRIC_LAYOUT=m
+CONFIG_SND_AOA_ONYX=m
+CONFIG_SND_AOA_TAS=m
+CONFIG_SND_AOA_TOONIE=m
 CONFIG_SND_USB_AUDIO=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
 CONFIG_HID_GYRATION=y
 CONFIG_LOGITECH_FF=y
 CONFIG_HID_PANTHERLORD=y
@@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y
 CONFIG_HID_SAMSUNG=y
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_HCD_PPC_OF is not set
 CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
 CONFIG_USB_ACM=m
 CONFIG_USB_PRINTER=y
 CONFIG_USB_STORAGE=y
@@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=m
 CONFIG_XFS_POSIX_ACL=y
+CONFIG_INOTIFY=y
+CONFIG_AUTOFS_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -221,12 +259,14 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_CIFS=m
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_1250=y
 CONFIG_NLS_CODEPAGE_1251=y
@@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_ISO8859_15=y
 CONFIG_NLS_UTF8=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_LATENCYTOP=y
-CONFIG_STRICT_DEVMEM=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_BOOTX_TEXT=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
@@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
-# CONFIG_VIRTUALIZATION is not set
-CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=m
index 5aac9a8bc53b2294dfbcb64df800d6aea078fdec..9352e4430c3b05419fdff7ee3b3ec91cfeaeee5d 100644 (file)
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
 CONFIG_QUICC_ENGINE=y
 CONFIG_QE_GPIO=y
 CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
 CONFIG_PCI=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
 # CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
 CONFIG_SATA_FSL=y
 CONFIG_SATA_SIL=y
 CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
 CONFIG_MARVELL_PHY=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_VITESSE_PHY=y
 CONFIG_ICPLUS_PHY=y
 CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
 CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_SHA256=y
index 03ee911c45775410c141ab108019b278ae726d17..8b5bda27d248047b43d59ffdd9af22922e599936 100644 (file)
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
 CONFIG_MPC8540_ADS=y
 CONFIG_MPC8560_ADS=y
 CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
 CONFIG_QUICC_ENGINE=y
 CONFIG_QE_GPIO=y
 CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BINFMT_MISC=m
 CONFIG_MATH_EMULATION=y
 CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
 CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
 CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
 CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSL_ELBC=y
 CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_LEGACY=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_FSL=y
 CONFIG_PATA_ALI=y
+CONFIG_PATA_VIA=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
 CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
 CONFIG_CRC_T10DIF=y
 CONFIG_DEBUG_FS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
index fdfa84dc908f21c318f0b0409f0604524f99a2c2..b0974e7e98aefc6f4e7b39a0025c3435e5ebafe1 100644 (file)
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
 CONFIG_MPC8540_ADS=y
 CONFIG_MPC8560_ADS=y
 CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
 CONFIG_QUICC_ENGINE=y
 CONFIG_QE_GPIO=y
 CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BINFMT_MISC=m
 CONFIG_MATH_EMULATION=y
 CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
 CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
 CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
 CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSL_ELBC=y
 CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_LEGACY=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
 CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
 CONFIG_CRC_T10DIF=y
 CONFIG_DEBUG_FS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
index 50d82c8a037f4cec2903504541b39a6eb2352331..b3c083de17ad5d3d06739eecdde3926df8b1c39c 100644 (file)
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
                & feature);
 }
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
 #define HBP_NUM 1
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 
 #endif /* !__ASSEMBLY__ */
 
index 50ea12fd7bf5eeab23e6fc1bccafdf4dbe2ad89b..a8bf5c673a3c430c7aa6f45cbfd43e290dd3a95c 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/processor.h>
 #include <asm/page.h>
+#include <asm/cacheflush.h>
 
 #define KVM_MAX_VCPUS          NR_CPUS
 #define KVM_MAX_VCORES         NR_CPUS
index 0124937a23b97e104260f9cd4bbe1c409bdf1ff5..e006f0bdea95f1a9464c688928dc7206690f5c14 100644 (file)
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
 void kvmppc_free_lpid(long lpid);
 void kvmppc_init_lpid(unsigned long nr_lpids);
 
+static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
+{
+       /* Clear i-cache for new pages */
+       struct page *page;
+       page = pfn_to_page(pfn);
+       if (!test_bit(PG_arch_1, &page->flags)) {
+               flush_dcache_icache_page(page);
+               set_bit(PG_arch_1, &page->flags);
+       }
+}
+
+
 #endif /* __POWERPC_KVM_PPC_H__ */
index 326d33ca55cdc33e8b3cb112f0fefb8117bab3af..d4f471fb10310b75c056345e3094e8180930048a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <asm/smp.h>
+#include <asm/io.h>
 
 struct mpic_msgr {
        u32 __iomem *base;
index 2d7bb8ced136c1e618d1e25229553a70512dc927..e4897523de41e45476bf002a64a3ab68d3e85e22 100644 (file)
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
                return 0;
        }
 
-       if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
-               dev_info(dev, "Warning: IOMMU window too big for device mask\n");
-               dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
-                               mask, (tbl->it_offset + tbl->it_size) <<
-                               IOMMU_PAGE_SHIFT);
+       if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+               dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
+               dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
+                               mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
                return 0;
        } else
                return 1;
index f3a82dde61dbfe8b469a63d4874f2de53bf1b476..956a4c496de942d93853f42f2db1067f45cf0085 100644 (file)
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
 
        /* Do not emulate user-space instructions, instead single-step them */
        if (user_mode(regs)) {
-               bp->ctx->task->thread.last_hit_ubp = bp;
+               current->thread.last_hit_ubp = bp;
                regs->msr |= MSR_SE;
                goto out;
        }
index 782bd0a3c2f0f95496b1b144ebb3ec6e6218e0f8..c470a40b29f5d4937883cfcd8a40dd1c6bbdcfd1 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/processor.h>
 #include <asm/machdep.h>
 #include <asm/debug.h>
+#include <linux/slab.h>
 
 /*
  * This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
        return SIGHUP;          /* default for things we don't know about */
 }
 
+/**
+ *
+ *     kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ *     @exception: Exception vector number
+ *     @regs: Current &struct pt_regs.
+ *
+ *     On some architectures we need to skip a breakpoint exception when
+ *     it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+       return kgdb_isremovedbreak(regs->nip);
+}
+
 static int kgdb_call_nmi_hook(struct pt_regs *regs)
 {
        kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 static int kgdb_singlestep(struct pt_regs *regs)
 {
        struct thread_info *thread_info, *exception_thread_info;
+       struct thread_info *backup_current_thread_info = \
+               (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
 
        if (user_mode(regs))
                return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
        thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
        exception_thread_info = current_thread_info();
 
-       if (thread_info != exception_thread_info)
+       if (thread_info != exception_thread_info) {
+               /* Save the original current_thread_info. */
+               memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
                memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+       }
 
        kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
        if (thread_info != exception_thread_info)
-               memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+               /* Restore current_thread_info lastly. */
+               memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
 
        return 1;
 }
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
 #else
                        linux_regs->msr |= MSR_SE;
 #endif
-                       kgdb_single_step = 1;
                        atomic_set(&kgdb_cpu_doing_single_step,
                                   raw_smp_processor_id());
                }
index f2496f2faeccc37231110392363d0914d599e5b5..4e3cc47f26b90e157ebb5c2a1cd156918e655930 100644 (file)
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
        long ret;
 
        if (personality(current->personality) == PER_LINUX32
-           && personality == PER_LINUX)
-               personality = PER_LINUX32;
+           && personality(personality) == PER_LINUX)
+               personality = (personality & ~PER_MASK) | PER_LINUX32;
        ret = sys_personality(personality);
-       if (ret == PER_LINUX32)
-               ret = PER_LINUX;
+       if (personality(ret) == PER_LINUX32)
+               ret = (ret & ~PER_MASK) | PER_LINUX;
        return ret;
 }
 #endif
index f922c29bb234d9bc6f2baf44175e934b63de5f86..837f13e7b6bfc1be3f7ed65a1000964f04825ebe 100644 (file)
@@ -211,6 +211,9 @@ next_pteg:
                pteg1 |= PP_RWRX;
        }
 
+       if (orig_pte->may_execute)
+               kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+
        local_irq_disable();
 
        if (pteg[rr]) {
index 10fc8ec9d2a8b7e1e8e7ad8113e8dce633a5d977..0688b6b3958594fce84a62cfbd2f3564b5650781 100644 (file)
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
 
        if (!orig_pte->may_execute)
                rflags |= HPTE_R_N;
+       else
+               kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
 
        hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
 
index 5a84c8d3d04050b98ab818c9d83cedeb74b8f235..44b72feaff7d9876fad230253be42c1aee6e8af7 100644 (file)
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
        sync                    /* order setting ceded vs. testing prodded */
        lbz     r5,VCPU_PRODDED(r3)
        cmpwi   r5,0
-       bne     1f
+       bne     kvm_cede_prodded
        li      r0,0            /* set trap to 0 to say hcall is handled */
        stw     r0,VCPU_TRAP(r3)
        li      r0,H_SUCCESS
        std     r0,VCPU_GPR(R3)(r3)
 BEGIN_FTR_SECTION
-       b       2f              /* just send it up to host on 970 */
+       b       kvm_cede_exit   /* just send it up to host on 970 */
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 
        /*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
        or      r4,r4,r0
        PPC_POPCNTW(R7,R4)
        cmpw    r7,r8
-       bge     2f
+       bge     kvm_cede_exit
        stwcx.  r4,0,r6
        bne     31b
        li      r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
        b       hcall_real_fallback
 
        /* cede when already previously prodded case */
-1:     li      r0,0
+kvm_cede_prodded:
+       li      r0,0
        stb     r0,VCPU_PRODDED(r3)
        sync                    /* order testing prodded vs. clearing ceded */
        stb     r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
        blr
 
        /* we've ceded but we want to give control to the host */
-2:     li      r3,H_TOO_HARD
+kvm_cede_exit:
+       li      r3,H_TOO_HARD
        blr
 
 secondary_too_late:
index c510fc961302c2d1ae1284cc3d1aab1139002fbd..a2b66717813dfef6c43e9bf23864f8766266f0a5 100644 (file)
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        if (vcpu_e500->g2h_tlb1_map)
-               memset(vcpu_e500->g2h_tlb1_map,
-                      sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+               memset(vcpu_e500->g2h_tlb1_map, 0,
+                      sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
        if (vcpu_e500->h2g_tlb1_rmap)
-               memset(vcpu_e500->h2g_tlb1_rmap,
-                      sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+               memset(vcpu_e500->h2g_tlb1_rmap, 0,
+                      sizeof(unsigned int) * host_tlb_params[1].entries);
 }
 
 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
                                ref, gvaddr, stlbe);
+
+       /* Clear i-cache for new pages */
+       kvmppc_mmu_flush_icache(pfn);
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
index f9ede7c6606e54d36f4b54fddc5b3101085ee5ef..0d24ff15f5f6fb197af85a4d9e537eb8880f099c 100644 (file)
@@ -288,7 +288,7 @@ err1;       stb     r0,0(r3)
        std     r0,16(r1)
        stdu    r1,-STACKFRAMESIZE(r1)
        bl      .enter_vmx_usercopy
-       cmpwi   r3,0
+       cmpwi   cr1,r3,0
        ld      r0,STACKFRAMESIZE+16(r1)
        ld      r3,STACKFRAMESIZE+48(r1)
        ld      r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1;      stb     r0,0(r3)
        dcbt    r0,r8,0b01010   /* GO */
 .machine pop
 
-       /*
-        * We prefetch both the source and destination using enhanced touch
-        * instructions. We use a stream ID of 0 for the load side and
-        * 1 for the store side.
-        */
-       clrrdi  r6,r4,7
-       clrrdi  r9,r3,7
-       ori     r9,r9,1         /* stream=1 */
-
-       srdi    r7,r5,7         /* length in cachelines, capped at 0x3FF */
-       cmpldi  cr1,r7,0x3FF
-       ble     cr1,1f
-       li      r7,0x3FF
-1:     lis     r0,0x0E00       /* depth=7 */
-       sldi    r7,r7,7
-       or      r7,r7,r0
-       ori     r10,r7,1        /* stream=1 */
-
-       lis     r8,0x8000       /* GO=1 */
-       clrldi  r8,r8,32
-
-.machine push
-.machine "power4"
-       dcbt    r0,r6,0b01000
-       dcbt    r0,r7,0b01010
-       dcbtst  r0,r9,0b01000
-       dcbtst  r0,r10,0b01010
-       eieio
-       dcbt    r0,r8,0b01010   /* GO */
-.machine pop
-
-       beq     .Lunwind_stack_nonvmx_copy
+       beq     cr1,.Lunwind_stack_nonvmx_copy
 
        /*
         * If source and destination are not relatively aligned we use a
index 0efdc51bc7164cf9a1089189a8c6fc64e35002a4..7ba6c96de77856e426fe5cf83ff360280e239194 100644 (file)
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
        std     r0,16(r1)
        stdu    r1,-STACKFRAMESIZE(r1)
        bl      .enter_vmx_copy
-       cmpwi   r3,0
+       cmpwi   cr1,r3,0
        ld      r0,STACKFRAMESIZE+16(r1)
        ld      r3,STACKFRAMESIZE+48(r1)
        ld      r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
        dcbt    r0,r8,0b01010   /* GO */
 .machine pop
 
-       beq     .Lunwind_stack_nonvmx_copy
+       beq     cr1,.Lunwind_stack_nonvmx_copy
 
        /*
         * If source and destination are not relatively aligned we use a
index baaafde7d13596af850a9d7452e596d84a52935a..fbdad0e3929a8ddfbcb0f714a6480f0ca33ba6f6 100644 (file)
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
        __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
 }
+EXPORT_SYMBOL(flush_dcache_icache_page);
 
 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 {
index 77b49ddda9d3675ed816d65af7f8c242578c01a0..7cd2dbd6e4c4fa615ae4442c5d00af106f3e17dd 100644 (file)
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
                if (!event->hw.idx || is_limited_pmc(event->hw.idx))
                        continue;
                val = read_pmc(event->hw.idx);
-               if ((int)val < 0) {
+               if (pmc_overflow(val)) {
                        /* event has overflowed */
                        found = 1;
                        record_and_restart(event, val, regs);
index a7b2a600d0a4d0aa7878a633eb3549f0a0d3a373..c37f46136321272465327a106383fb440e48fa75 100644 (file)
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
                        iounmap(hose->cfg_data);
                iounmap(hose->cfg_addr);
                pcibios_free_controller(hose);
-               return 0;
+               return -ENODEV;
        }
 
        setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
 
 void __devinit fsl_pci_init(void)
 {
+       int ret;
        struct device_node *node;
        struct pci_controller *hose;
        dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
                        if (!fsl_pci_primary)
                                fsl_pci_primary = node;
 
-                       fsl_add_bridge(node, fsl_pci_primary == node);
-                       hose = pci_find_hose_for_OF_device(node);
-                       max = min(max, hose->dma_window_base_cur +
-                                       hose->dma_window_size);
+                       ret = fsl_add_bridge(node, fsl_pci_primary == node);
+                       if (ret == 0) {
+                               hose = pci_find_hose_for_OF_device(node);
+                               max = min(max, hose->dma_window_base_cur +
+                                               hose->dma_window_size);
+                       }
                }
        }
 
index 483d8fa72e8ba3bc6bca736faf2f007399282e13..e961f8c4a8f070f341c2b80bbd8a18456a323ade 100644 (file)
@@ -14,6 +14,9 @@
 #include <linux/list.h>
 #include <linux/of_platform.h>
 #include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
 #include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
index eab3492a45c5c5244eca42d58354cf6d8a092836..9b49c65ee7a42f6f9d0b8dc436628c34bafb262b 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/kallsyms.h>
+#include <linux/kmsg_dump.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
 #endif
                default:
                        printf("Unrecognized command: ");
-                       do {
+                       do {
                                if (' ' < cmd && cmd <= '~')
                                        putchar(cmd);
                                else
                                        printf("\\x%x", cmd);
                                cmd = inchar();
-                       } while (cmd != '\n'); 
+                       } while (cmd != '\n');
                        printf(" (type ? for help)\n");
                        break;
                }
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
        return 1;
 }
 
-static char *breakpoint_help_string = 
+static char *breakpoint_help_string =
     "Breakpoint command usage:\n"
     "b                show breakpoints\n"
     "b <addr> [cnt]   set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
 
        default:
                termch = cmd;
-               cmd = skipbl();
+               cmd = skipbl();
                if (cmd == '?') {
                        printf(breakpoint_help_string);
                        break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
                                       sp + REGS_OFFSET);
                                break;
                        }
-                        printf("--- Exception: %lx %s at ", regs.trap,
+                       printf("--- Exception: %lx %s at ", regs.trap,
                               getvecname(TRAP(&regs)));
                        pc = regs.nip;
                        lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
 
        cmd = skipbl();
        if (cmd == '\n') {
-               unsigned long sp, toc;
+               unsigned long sp, toc;
                asm("mr %0,1" : "=r" (sp) :);
                asm("mr %0,2" : "=r" (toc) :);
 
                printf("msr  = "REG"  sprg0= "REG"\n",
                       mfmsr(), mfspr(SPRN_SPRG0));
                printf("pvr  = "REG"  sprg1= "REG"\n",
-                      mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); 
+                      mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
                printf("dec  = "REG"  sprg2= "REG"\n",
                       mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
                printf("sp   = "REG"  sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
 static int brev;
 static int mnoread;
 
-static char *memex_help_string = 
+static char *memex_help_string =
     "Memory examine command usage:\n"
     "m [addr] [flags] examine/change memory\n"
     "  addr is optional.  will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
     "NOTE: flags are saved as defaults\n"
     "";
 
-static char *memex_subcmd_help_string = 
+static char *memex_subcmd_help_string =
     "Memory examine subcommands:\n"
     "  hexval   write this val to current location\n"
     "  'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
                nr = mread(adrs, temp, r);
                adrs += nr;
                for (m = 0; m < r; ++m) {
-                       if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+                       if ((m & (sizeof(long) - 1)) == 0 && m > 0)
                                putchar(' ');
                        if (m < nr)
                                printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
                                printf("%s", fault_chars[fault_type]);
                }
                for (; m < 16; ++m) {
-                       if ((m & (sizeof(long) - 1)) == 0)
+                       if ((m & (sizeof(long) - 1)) == 0)
                                putchar(' ');
                        printf("  ");
                }
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
 void
 dump_log_buf(void)
 {
-        const unsigned long size = 128;
-        unsigned long end, addr;
-        unsigned char buf[size + 1];
-
-        addr = 0;
-        buf[size] = '\0';
-
-        if (setjmp(bus_error_jmp) != 0) {
-                printf("Unable to lookup symbol __log_buf!\n");
-                return;
-        }
-
-        catch_memory_errors = 1;
-        sync();
-        addr = kallsyms_lookup_name("__log_buf");
-
-        if (! addr)
-                printf("Symbol __log_buf not found!\n");
-        else {
-                end = addr + (1 << CONFIG_LOG_BUF_SHIFT);
-                while (addr < end) {
-                        if (! mread(addr, buf, size)) {
-                                printf("Can't read memory at address 0x%lx\n", addr);
-                                break;
-                        }
-
-                        printf("%s", buf);
-
-                        if (strlen(buf) < size)
-                                break;
-
-                        addr += size;
-                }
-        }
-
-        sync();
-        /* wait a little while to see if we get a machine check */
-        __delay(200);
-        catch_memory_errors = 0;
+       struct kmsg_dumper dumper = { .active = 1 };
+       unsigned char buf[128];
+       size_t len;
+
+       if (setjmp(bus_error_jmp) != 0) {
+               printf("Error dumping printk buffer!\n");
+               return;
+       }
+
+       catch_memory_errors = 1;
+       sync();
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+               buf[len] = '\0';
+               printf("%s", buf);
+       }
+
+       sync();
+       /* wait a little while to see if we get a machine check */
+       __delay(200);
+       catch_memory_errors = 0;
 }
 
 /*
index 32e8449640facbef375b0a5cb1d33610290c3a7e..9b94a160fe7f06399222ead8530e5a2da2b94f78 100644 (file)
@@ -180,7 +180,8 @@ extern char elf_platform[];
 #define ELF_PLATFORM (elf_platform)
 
 #ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
 #else /* CONFIG_64BIT */
 #define SET_PERSONALITY(ex)                                    \
 do {                                                           \
index 7bcc14e395f0a2fd1d85ded571010e05c5c0392e..bf2a2ad2f8004ab36e98d49fccb084a67fba0049 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 typedef unsigned long   __kernel_size_t;
+typedef long            __kernel_ssize_t;
 #define __kernel_size_t __kernel_size_t
 
 typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short  __kernel_mode_t;
 typedef unsigned short  __kernel_ipc_pid_t;
 typedef unsigned short  __kernel_uid_t;
 typedef unsigned short  __kernel_gid_t;
-typedef int             __kernel_ssize_t;
 typedef int             __kernel_ptrdiff_t;
 
 #else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int    __kernel_mode_t;
 typedef int             __kernel_ipc_pid_t;
 typedef unsigned int    __kernel_uid_t;
 typedef unsigned int    __kernel_gid_t;
-typedef long            __kernel_ssize_t;
 typedef long            __kernel_ptrdiff_t;
 typedef unsigned long   __kernel_sigset_t;      /* at least 32 bits */
 
index a0a8340daafafb90ad25c822949b6ace5a84a595..ce26ac3cb162899a285e5812a39ea9b1b6ec0484 100644 (file)
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
 }
 
 static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
 static inline int smp_vcpu_scheduled(int cpu) { return 1; }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_yield(void) { }
index b315a33867f25963808ebe9fc81ec9889a8deca7..33692eaabab58619c7481cbfc7c6f01be929e5e9 100644 (file)
@@ -12,8 +12,7 @@
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
- * These are fair FIFO ticket locks, which are currently limited to 256
- * CPUs.
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
  *
  * (the type definitions are in asm/spinlock_types.h)
  */
index afb7ff79a29fbb33c6578c9240c7e5b550417b88..ced4534baed574f7596b014f979748b1eedc2653 100644 (file)
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
 #endif
 
 #ifdef P6_NOP1
-static const unsigned char  __initconst_or_module p6nops[] =
+static const unsigned char p6nops[] =
 {
        P6_NOP1,
        P6_NOP2,
index 7ad683d78645c1b8eed3a816e6fca23de94541f3..d44f7829968e801cf6b32d41be0c5f297971900d 100644 (file)
@@ -270,7 +270,7 @@ void fixup_irqs(void)
 
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        break_affinity = 1;
-                       affinity = cpu_all_mask;
+                       affinity = cpu_online_mask;
                }
 
                chip = irq_data_get_irq_chip(data);
index 8a2ce8fd41c0e68bbedc6fce685fb52c95502d24..82746f942cd8db4d59a32631049a79146564052e 100644 (file)
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
                                  unsigned int *current_size)
 {
        struct microcode_header_amd *mc_hdr;
-       unsigned int actual_size;
+       unsigned int actual_size, patch_size;
        u16 equiv_cpu_id;
 
        /* size of the current patch we're staring at */
-       *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+       patch_size = *(u32 *)(ucode_ptr + 4);
+       *current_size = patch_size + SECTION_HDR_SIZE;
 
        equiv_cpu_id = find_equiv_id();
        if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
        /*
         * now that the header looks sane, verify its size
         */
-       actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+       actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
        if (!actual_size)
                return 0;
 
index 97d9a9914ba8d772e522911252261110cfa58677..a3b57a27be880649ac7b0ae4144bc62faf105cbc 100644 (file)
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
        return address_mask(ctxt, reg);
 }
 
+static void masked_increment(ulong *reg, ulong mask, int inc)
+{
+       assign_masked(reg, *reg + inc, mask);
+}
+
 static inline void
 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
 {
+       ulong mask;
+
        if (ctxt->ad_bytes == sizeof(unsigned long))
-               *reg += inc;
+               mask = ~0UL;
        else
-               *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+               mask = ad_mask(ctxt);
+       masked_increment(reg, mask, inc);
+}
+
+static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+{
+       masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
 }
 
 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
 {
        struct segmented_address addr;
 
-       register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
-       addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+       rsp_increment(ctxt, -bytes);
+       addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
        addr.seg = VCPU_SREG_SS;
 
        return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
        int rc;
        struct segmented_address addr;
 
-       addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+       addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
        addr.seg = VCPU_SREG_SS;
        rc = segmented_read(ctxt, addr, dest, len);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
+       rsp_increment(ctxt, len);
        return rc;
 }
 
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
 
        while (reg >= VCPU_REGS_RAX) {
                if (reg == VCPU_REGS_RSP) {
-                       register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
-                                                       ctxt->op_bytes);
+                       rsp_increment(ctxt, ctxt->op_bytes);
                        --reg;
                }
 
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
        rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+       rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
 }
 
index 01ca00423938515cfe43781403e90bfb84929fc3..7fbd0d273ea83dbec4a330fcb6d14a8ab46462b0 100644 (file)
@@ -4112,17 +4112,22 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
                int idx;
                LIST_HEAD(invalid_list);
 
+               /*
+                * Never scan more than sc->nr_to_scan VM instances.
+                * Will not hit this condition practically since we do not try
+                * to shrink more than one VM and it is very unlikely to see
+                * !n_used_mmu_pages so many times.
+                */
+               if (!nr_to_scan--)
+                       break;
                /*
                 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
                 * here. We may skip a VM instance errorneosly, but we do not
                 * want to shrink a VM that only started to populate its MMU
                 * anyway.
                 */
-               if (kvm->arch.n_used_mmu_pages > 0) {
-                       if (!nr_to_scan--)
-                               break;
+               if (!kvm->arch.n_used_mmu_pages)
                        continue;
-               }
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
index 42bce48f692850cf3cadf96e83c86e5f8bf760ee..148ed666e311fda2979887d6c9e9a2d440f75ffc 100644 (file)
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    9
+#define KVM_SAVE_MSRS_BEGIN    10
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_KVM_STEAL_TIME:
                data = vcpu->arch.st.msr_val;
                break;
+       case MSR_KVM_PV_EOI_EN:
+               data = vcpu->arch.pv_eoi.msr_val;
+               break;
        case MSR_IA32_P5_MC_ADDR:
        case MSR_IA32_P5_MC_TYPE:
        case MSR_IA32_MCG_CAP:
index bf4bda6d3e9ad66f19af6e4669063a12739c78db..9642d4a3860239f3203a2ff12dba5543a26ccf4f 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
-#include <linux/syscore_ops.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
@@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void)
 #endif
 }
 
-#ifdef CONFIG_XEN_PVHVM
-/*
- * The pfn containing the shared_info is located somewhere in RAM. This
- * will cause trouble if the current kernel is doing a kexec boot into a
- * new kernel. The new kernel (and its startup code) can not know where
- * the pfn is, so it can not reserve the page. The hypervisor will
- * continue to update the pfn, and as a result memory corruption occours
- * in the new kernel.
- *
- * One way to work around this issue is to allocate a page in the
- * xen-platform pci device's BAR memory range. But pci init is done very
- * late and the shared_info page is already in use very early to read
- * the pvclock. So moving the pfn from RAM to MMIO is racy because some
- * code paths on other vcpus could access the pfn during the small
- * window when the old pfn is moved to the new pfn. There is even a
- * small window were the old pfn is not backed by a mfn, and during that
- * time all reads return -1.
- *
- * Because it is not known upfront where the MMIO region is located it
- * can not be used right from the start in xen_hvm_init_shared_info.
- *
- * To minimise trouble the move of the pfn is done shortly before kexec.
- * This does not eliminate the race because all vcpus are still online
- * when the syscore_ops will be called. But hopefully there is no work
- * pending at this point in time. Also the syscore_op is run last which
- * reduces the risk further.
- */
-
-static struct shared_info *xen_hvm_shared_info;
-
-static void xen_hvm_connect_shared_info(unsigned long pfn)
+void __ref xen_hvm_init_shared_info(void)
 {
+       int cpu;
        struct xen_add_to_physmap xatp;
+       static struct shared_info *shared_info_page = 0;
 
+       if (!shared_info_page)
+               shared_info_page = (struct shared_info *)
+                       extend_brk(PAGE_SIZE, PAGE_SIZE);
        xatp.domid = DOMID_SELF;
        xatp.idx = 0;
        xatp.space = XENMAPSPACE_shared_info;
-       xatp.gpfn = pfn;
+       xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
        if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
                BUG();
 
-}
-static void xen_hvm_set_shared_info(struct shared_info *sip)
-{
-       int cpu;
-
-       HYPERVISOR_shared_info = sip;
+       HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
 
        /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
         * page, we use it in the event channel upcall and in some pvclock
         * related functions. We don't need the vcpu_info placement
         * optimizations because we don't use any pv_mmu or pv_irq op on
         * HVM.
-        * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is
-        * online but xen_hvm_set_shared_info is run at resume time too and
+        * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+        * online but xen_hvm_init_shared_info is run at resume time too and
         * in that case multiple vcpus might be online. */
        for_each_online_cpu(cpu) {
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
        }
 }
 
-/* Reconnect the shared_info pfn to a mfn */
-void xen_hvm_resume_shared_info(void)
-{
-       xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
-}
-
-#ifdef CONFIG_KEXEC
-static struct shared_info *xen_hvm_shared_info_kexec;
-static unsigned long xen_hvm_shared_info_pfn_kexec;
-
-/* Remember a pfn in MMIO space for kexec reboot */
-void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
-{
-       xen_hvm_shared_info_kexec = sip;
-       xen_hvm_shared_info_pfn_kexec = pfn;
-}
-
-static void xen_hvm_syscore_shutdown(void)
-{
-       struct xen_memory_reservation reservation = {
-               .domid = DOMID_SELF,
-               .nr_extents = 1,
-       };
-       unsigned long prev_pfn;
-       int rc;
-
-       if (!xen_hvm_shared_info_kexec)
-               return;
-
-       prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
-       set_xen_guest_handle(reservation.extent_start, &prev_pfn);
-
-       /* Move pfn to MMIO, disconnects previous pfn from mfn */
-       xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
-
-       /* Update pointers, following hypercall is also a memory barrier */
-       xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
-
-       /* Allocate new mfn for previous pfn */
-       do {
-               rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
-               if (rc == 0)
-                       msleep(123);
-       } while (rc == 0);
-
-       /* Make sure the previous pfn is really connected to a (new) mfn */
-       BUG_ON(rc != 1);
-}
-
-static struct syscore_ops xen_hvm_syscore_ops = {
-       .shutdown = xen_hvm_syscore_shutdown,
-};
-#endif
-
-/* Use a pfn in RAM, may move to MMIO before kexec. */
-static void __init xen_hvm_init_shared_info(void)
-{
-       /* Remember pointer for resume */
-       xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
-       xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
-       xen_hvm_set_shared_info(xen_hvm_shared_info);
-}
-
+#ifdef CONFIG_XEN_PVHVM
 static void __init init_hvm_pv_info(void)
 {
        int major, minor;
@@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void)
        init_hvm_pv_info();
 
        xen_hvm_init_shared_info();
-#ifdef CONFIG_KEXEC
-       register_syscore_ops(&xen_hvm_syscore_ops);
-#endif
 
        if (xen_feature(XENFEAT_hvm_callback_vector))
                xen_have_vector_callback = 1;
index b2e91d40a4cb32a851006d6194d70f3465ec26f3..d4b255463253c8b3bb824a915f5764cc2992e202 100644 (file)
@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
 
 /* When we populate back during bootup, the amount of pages can vary. The
  * max we have is seen is 395979, but that does not mean it can't be more.
- * But some machines can have 3GB I/O holes even. So lets reserve enough
- * for 4GB of I/O and E820 holes. */
-RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
+ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
+ * it can re-use Xen provided mfn_list array, so we only need to allocate at
+ * most three P2M top nodes. */
+RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
        }
        return true;
 }
+
+/*
+ * Skim over the P2M tree looking at pages that are either filled with
+ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
+ * replace the P2M leaf with a p2m_missing or p2m_identity.
+ * Stick the old page in the new P2M tree location.
+ */
+bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
+{
+       unsigned topidx;
+       unsigned mididx;
+       unsigned ident_pfns;
+       unsigned inv_pfns;
+       unsigned long *p2m;
+       unsigned long *mid_mfn_p;
+       unsigned idx;
+       unsigned long pfn;
+
+       /* We only look when this entails a P2M middle layer */
+       if (p2m_index(set_pfn))
+               return false;
+
+       for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
+               topidx = p2m_top_index(pfn);
+
+               if (!p2m_top[topidx])
+                       continue;
+
+               if (p2m_top[topidx] == p2m_mid_missing)
+                       continue;
+
+               mididx = p2m_mid_index(pfn);
+               p2m = p2m_top[topidx][mididx];
+               if (!p2m)
+                       continue;
+
+               if ((p2m == p2m_missing) || (p2m == p2m_identity))
+                       continue;
+
+               if ((unsigned long)p2m == INVALID_P2M_ENTRY)
+                       continue;
+
+               ident_pfns = 0;
+               inv_pfns = 0;
+               for (idx = 0; idx < P2M_PER_PAGE; idx++) {
+                       /* IDENTITY_PFNs are 1:1 */
+                       if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
+                               ident_pfns++;
+                       else if (p2m[idx] == INVALID_P2M_ENTRY)
+                               inv_pfns++;
+                       else
+                               break;
+               }
+               if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
+                       goto found;
+       }
+       return false;
+found:
+       /* Found one, replace old with p2m_identity or p2m_missing */
+       p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
+       /* And the other for save/restore.. */
+       mid_mfn_p = p2m_top_mfn_p[topidx];
+       /* NOTE: Even if it is a p2m_identity it should still be point to
+        * a page filled with INVALID_P2M_ENTRY entries. */
+       mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
+
+       /* Reset where we want to stick the old page in. */
+       topidx = p2m_top_index(set_pfn);
+       mididx = p2m_mid_index(set_pfn);
+
+       /* This shouldn't happen */
+       if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
+               early_alloc_p2m(set_pfn);
+
+       if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
+               return false;
+
+       p2m_init(p2m);
+       p2m_top[topidx][mididx] = p2m;
+       mid_mfn_p = p2m_top_mfn_p[topidx];
+       mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+       return true;
+}
 bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
                if (!early_alloc_p2m(pfn))
                        return false;
 
+               if (early_can_reuse_p2m_middle(pfn, mfn))
+                       return __set_phys_to_machine(pfn, mfn);
+
                if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
                        return false;
 
index ead85576d54a6c097f10b9a9d55e2057575e2168..d11ca11d14fc094379e989a6b06fe2e5b2bc72e6 100644 (file)
@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
        memblock_reserve(start, size);
 
        xen_max_p2m_pfn = PFN_DOWN(start + size);
+       for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
+                       continue;
+               WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
+                       pfn, mfn);
 
-       for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
                __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+       }
 }
 
 static unsigned long __init xen_do_chunk(unsigned long start,
index ae8a00c39de4b0d02cfad0722715d0fbd159a1cb..45329c8c226e4c4070f16a791b300265a0bf472b 100644 (file)
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
 {
 #ifdef CONFIG_XEN_PVHVM
        int cpu;
-       xen_hvm_resume_shared_info();
+       xen_hvm_init_shared_info();
        xen_callback_vector();
        xen_unplug_emulated_devices();
        if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
index 1e4329e04e0fbf105467f18b009183c9dcebd2a8..202d4c150154fb31ddb03da8f8144a45f21c02a7 100644 (file)
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
 void xen_vcpu_restore(void);
 
 void xen_callback_vector(void);
-void xen_hvm_resume_shared_info(void);
+void xen_hvm_init_shared_info(void);
 void xen_unplug_emulated_devices(void);
 
 void __init xen_build_dynamic_phys_to_machine(void);
index 2b461b496a788c11d00616efaf16df121e620160..19cc761cacb2a4b71fe9d4579226025324ffddf5 100644 (file)
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
        unsigned int max_discard_sectors;
+       unsigned int granularity, alignment, mask;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
 
+       /* Zero-sector (unknown) and one-sector granularities are the same.  */
+       granularity = max(q->limits.discard_granularity >> 9, 1U);
+       mask = granularity - 1;
+       alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+
        /*
         * Ensure that max_discard_sectors is of the proper
-        * granularity
+        * granularity, so that requests stay aligned after a split.
         */
        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+       max_discard_sectors = round_down(max_discard_sectors, granularity);
        if (unlikely(!max_discard_sectors)) {
                /* Avoid infinite loop below. Being cautious never hurts. */
                return -EOPNOTSUPP;
-       } else if (q->limits.discard_granularity) {
-               unsigned int disc_sects = q->limits.discard_granularity >> 9;
-
-               max_discard_sectors &= ~(disc_sects - 1);
        }
 
        if (flags & BLKDEV_DISCARD_SECURE) {
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        bb.wait = &wait;
 
        while (nr_sects) {
+               unsigned int req_sects;
+               sector_t end_sect;
+
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
                        ret = -ENOMEM;
                        break;
                }
 
+               req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
+
+               /*
+                * If splitting a request, and the next starting sector would be
+                * misaligned, stop the discard at the previous aligned sector.
+                */
+               end_sect = sector + req_sects;
+               if (req_sects < nr_sects && (end_sect & mask) != alignment) {
+                       end_sect =
+                               round_down(end_sect - alignment, granularity)
+                               + alignment;
+                       req_sects = end_sect - sector;
+               }
+
                bio->bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
 
-               if (nr_sects > max_discard_sectors) {
-                       bio->bi_size = max_discard_sectors << 9;
-                       nr_sects -= max_discard_sectors;
-                       sector += max_discard_sectors;
-               } else {
-                       bio->bi_size = nr_sects << 9;
-                       nr_sects = 0;
-               }
+               bio->bi_size = req_sects << 9;
+               nr_sects -= req_sects;
+               sector = end_sect;
 
                atomic_inc(&bb.done);
                submit_bio(type, bio);
index 160035f548823482968bcd58298fcf9c309e7d62..e76279e411622519eebb54d4802eb11b796788db 100644 (file)
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return 0;
 }
 
+static void
+__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
+                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist **sg, int *nsegs, int *cluster)
+{
+
+       int nbytes = bvec->bv_len;
+
+       if (*bvprv && *cluster) {
+               if ((*sg)->length + nbytes > queue_max_segment_size(q))
+                       goto new_segment;
+
+               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+                       goto new_segment;
+               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+                       goto new_segment;
+
+               (*sg)->length += nbytes;
+       } else {
+new_segment:
+               if (!*sg)
+                       *sg = sglist;
+               else {
+                       /*
+                        * If the driver previously mapped a shorter
+                        * list, we could see a termination bit
+                        * prematurely unless it fully inits the sg
+                        * table on each mapping. We KNOW that there
+                        * must be more entries here or the driver
+                        * would be buggy, so force clear the
+                        * termination bit to avoid doing a full
+                        * sg_init_table() in drivers for each command.
+                        */
+                       (*sg)->page_link &= ~0x02;
+                       *sg = sg_next(*sg);
+               }
+
+               sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
+               (*nsegs)++;
+       }
+       *bvprv = bvec;
+}
+
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               int nbytes = bvec->bv_len;
-
-               if (bvprv && cluster) {
-                       if (sg->length + nbytes > queue_max_segment_size(q))
-                               goto new_segment;
-
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                               goto new_segment;
-
-                       sg->length += nbytes;
-               } else {
-new_segment:
-                       if (!sg)
-                               sg = sglist;
-                       else {
-                               /*
-                                * If the driver previously mapped a shorter
-                                * list, we could see a termination bit
-                                * prematurely unless it fully inits the sg
-                                * table on each mapping. We KNOW that there
-                                * must be more entries here or the driver
-                                * would be buggy, so force clear the
-                                * termination bit to avoid doing a full
-                                * sg_init_table() in drivers for each command.
-                                */
-                               sg->page_link &= ~0x02;
-                               sg = sg_next(sg);
-                       }
-
-                       sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
-                       nsegs++;
-               }
-               bvprv = bvec;
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
        } /* segments in rq */
 
 
@@ -199,6 +209,43 @@ new_segment:
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ *    Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+                  struct scatterlist *sglist)
+{
+       struct bio_vec *bvec, *bvprv;
+       struct scatterlist *sg;
+       int nsegs, cluster;
+       unsigned long i;
+
+       nsegs = 0;
+       cluster = blk_queue_cluster(q);
+
+       bvprv = NULL;
+       sg = NULL;
+       bio_for_each_segment(bvec, bio, i) {
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
+       } /* segments in bio */
+
+       if (sg)
+               sg_mark_end(sg);
+
+       BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+       return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
index cac7366957c376cedb2341753520e6f32516572c..d839723303c856ae221bdf54ec1de93544698d95 100644 (file)
@@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
 
 static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
 {
-       static void *p;
+       void *p;
 
        p = disk_seqf_start(seqf, pos);
        if (!IS_ERR_OR_NULL(p) && !*pos)
index 2be8ef1d30935af81a9b7ac3b721750fdda6b3cd..27cecd313e7588386960244548b391bfddac2a55 100644 (file)
@@ -115,7 +115,7 @@ config SATA_SIL24
          If unsure, say N.
 
 config ATA_SFF
-       bool "ATA SFF support"
+       bool "ATA SFF support (for legacy IDE and PATA)"
        default y
        help
          This option adds support for ATA controllers with SFF
index 062e6a1a248fe97b69cd2047017a88ded3f2164d..50d5dea0ff599feb19626ff80b8143dff1ae4f6e 100644 (file)
@@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
        { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
        { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
+       { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index c2594ddf25b00f54d22b51c66f23db5d54d47222..57eb1c212a4ce8ee267d36b3f920d3011f96b832 100644 (file)
@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
 extern struct ata_port_operations ahci_ops;
 extern struct ata_port_operations ahci_pmp_retry_srst_ops;
 
+unsigned int ahci_dev_classify(struct ata_port *ap);
 void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
                        u32 opts);
 void ahci_save_initial_config(struct device *dev,
index 3c809bfbccf58a2af573be99719a4fda77bd3815..ef773e12af79d2c931e37852e7c31b716a00e51e 100644 (file)
@@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (Lynx Point) */
        { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (Lynx Point-LP) */
+       { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+       /* SATA Controller IDE (Lynx Point-LP) */
+       { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+       /* SATA Controller IDE (Lynx Point-LP) */
+       { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (Lynx Point-LP) */
+       { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (DH89xxCC) */
        { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
index f9eaa82311a9a191dd56fe06881dd318d5c518eb..555c07afa05bc6f82d6cc0357684f1f8e8b9f8da 100644 (file)
@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
        }
 }
 
-static unsigned int ahci_dev_classify(struct ata_port *ap)
+unsigned int ahci_dev_classify(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ata_taskfile tf;
@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
 
        return ata_dev_classify(&tf);
 }
+EXPORT_SYMBOL_GPL(ahci_dev_classify);
 
 void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
                        u32 opts)
index 902b5a457170958f4d805ae30185cee170e6b6e9..fd9ecf74e631afc800e56927069d97bd61774862 100644 (file)
@@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
        if (ap->flags & ATA_FLAG_ACPI_SATA)
                return NULL;
 
-       /*
-        * If acpi bind operation has already happened, we can get the handle
-        * for the port by checking the corresponding scsi_host device's
-        * firmware node, otherwise we will need to find out the handle from
-        * its parent's acpi node.
-        */
-       if (ap->scsi_host)
-               return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev);
-       else
-               return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev),
-                               ap->port_no);
+       return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
 }
 EXPORT_SYMBOL(ata_ap_acpi_handle);
 
@@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
        if (!*handle)
                return -ENODEV;
 
+       if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+               ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
+
        return 0;
 }
 
index fadd5866d40fd29b2eb70df29e3d026e2e3722db..8e1039c8e15975aced4e6a359dc4a0451a58bc4b 100644 (file)
@@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "_NEC DV5800A",       NULL,           ATA_HORKAGE_NODMA },
        { "SAMSUNG CD-ROM SN-124", "N001",      ATA_HORKAGE_NODMA },
        { "Seagate STT20000A", NULL,            ATA_HORKAGE_NODMA },
-       { "2GB ATA Flash Disk", "ADMA428M",     ATA_HORKAGE_NODMA },
+       { " 2GB ATA Flash Disk", "ADMA428M",    ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
 
@@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 
        /* Devices that do not need bridging limits applied */
        { "MTRON MSP-SATA*",            NULL,   ATA_HORKAGE_BRIDGE_OK, },
+       { "BUFFALO HD-QSU2/R5",         NULL,   ATA_HORKAGE_BRIDGE_OK, },
 
        /* Devices which aren't very happy with higher link speeds */
        { "WD My Book",                 NULL,   ATA_HORKAGE_1_5_GBPS, },
index 361c75cea57b9d1836ca6be599343f019759833b..24e51056ac26a857c1db028b9eb71450a0608d45 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
+#include <linux/dmi.h>
 
 #define DRV_NAME "pata_atiixp"
 #define DRV_VERSION "0.4.6"
@@ -33,11 +34,26 @@ enum {
        ATIIXP_IDE_UDMA_MODE    = 0x56
 };
 
+static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
+       {
+               /* Board has onboard PATA<->SATA converters */
+               .ident = "MSI E350DM-E33",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+                       DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
+               },
+       },
+       { }
+};
+
 static int atiixp_cable_detect(struct ata_port *ap)
 {
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
        u8 udma;
 
+       if (dmi_check_system(attixp_cable_override_dmi_table))
+               return ATA_CBL_PATA40_SHORT;
+
        /* Hack from drivers/ide/pci. Really we want to know how to do the
           raw detection not play follow the bios mode guess */
        pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
index ba91b408abad75ce7da0ff173595ce23ee51f635..d84566496746aef57baf19dae86528bc202038cb 100644 (file)
@@ -889,6 +889,7 @@ struct bm_aio_ctx {
        unsigned int done;
        unsigned flags;
 #define BM_AIO_COPY_PAGES      1
+#define BM_WRITE_ALL_PAGES     2
        int error;
        struct kref kref;
 };
@@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
                if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
                        break;
                if (rw & WRITE) {
-                       if (bm_test_page_unchanged(b->bm_pages[i])) {
+                       if (!(flags & BM_WRITE_ALL_PAGES) &&
+                           bm_test_page_unchanged(b->bm_pages[i])) {
                                dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
                                continue;
                        }
@@ -1140,6 +1142,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
        return bm_rw(mdev, WRITE, 0, 0);
 }
 
+/**
+ * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
+ * @mdev:      DRBD device.
+ *
+ * Will write all pages.
+ */
+int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
+{
+       return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
+}
+
 /**
  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
  * @mdev:      DRBD device.
index b2ca143d0053d75487e3a083a4271b9e5d0d285e..b953cc7c9c00ce4fb13885f2631211c6046ed58d 100644 (file)
@@ -1469,6 +1469,7 @@ extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
 extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
 extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
 extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
 extern int  drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
                unsigned long al_enr);
index dbe6135a2abeddfb78b8d812f4b892c7b5696d03..f93a0320e952dd6b07fb8c73739ee73d3c1af0a0 100644 (file)
@@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
 static void md_sync_timer_fn(unsigned long data);
 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static void _tl_clear(struct drbd_conf *mdev);
 
 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
              "Lars Ellenberg <lars@linbit.com>");
@@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
 
        /* Actions operating on the disk state, also want to work on
           requests that got barrier acked. */
-       switch (what) {
-       case fail_frozen_disk_io:
-       case restart_frozen_disk_io:
-               list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
-                       req = list_entry(le, struct drbd_request, tl_requests);
-                       _req_mod(req, what);
-               }
 
-       case connection_lost_while_pending:
-       case resend:
-               break;
-       default:
-               dev_err(DEV, "what = %d in _tl_restart()\n", what);
+       list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+               req = list_entry(le, struct drbd_request, tl_requests);
+               _req_mod(req, what);
        }
 }
 
@@ -458,12 +450,17 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
  * receiver thread and the worker thread.
  */
 void tl_clear(struct drbd_conf *mdev)
+{
+       spin_lock_irq(&mdev->req_lock);
+       _tl_clear(mdev);
+       spin_unlock_irq(&mdev->req_lock);
+}
+
+static void _tl_clear(struct drbd_conf *mdev)
 {
        struct list_head *le, *tle;
        struct drbd_request *r;
 
-       spin_lock_irq(&mdev->req_lock);
-
        _tl_restart(mdev, connection_lost_while_pending);
 
        /* we expect this list to be empty. */
@@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)
 
        memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
 
-       spin_unlock_irq(&mdev->req_lock);
 }
 
 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
@@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        if (ns.susp_fen) {
                /* case1: The outdate peer handler is successful: */
                if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
-                       tl_clear(mdev);
                        if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
                                drbd_uuid_new_current(mdev);
                                clear_bit(NEW_CUR_UUID, &mdev->flags);
                        }
                        spin_lock_irq(&mdev->req_lock);
+                       _tl_clear(mdev);
                        _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
                        spin_unlock_irq(&mdev->req_lock);
                }
index fb9dce8daa2468c76992f4ab609adb471bc42af3..edb490aad8b44fa507da3d72724b5df925a020e4 100644 (file)
@@ -674,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
                         la_size_changed && md_moved ? "size changed and md moved" :
                         la_size_changed ? "size changed" : "md moved");
                /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
-               err = drbd_bitmap_io(mdev, &drbd_bm_write,
-                               "size changed", BM_LOCKED_MASK);
+               err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+                                    "size changed", BM_LOCKED_MASK);
                if (err) {
                        rv = dev_size_error;
                        goto out;
index 910335c30927f0429a4c4b0fddcfe74033c45e8d..01b2ac641c7babe119f98dac7596ff8a690402a8 100644 (file)
@@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                break;
 
        case resend:
+               /* Simply complete (local only) READs. */
+               if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
+
                /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
                   before the connection loss (B&C only); only P_BARRIER_ACK was missing.
                   Trowing them out of the TL here by pretending we got a BARRIER_ACK
@@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
                req->private_bio = NULL;
        }
        if (rw == WRITE) {
-               remote = 1;
+               /* Need to replicate writes.  Unless it is an empty flush,
+                * which is better mapped to a DRBD P_BARRIER packet,
+                * also for drbd wire protocol compatibility reasons. */
+               if (unlikely(size == 0)) {
+                       /* The only size==0 bios we expect are empty flushes. */
+                       D_ASSERT(bio->bi_rw & REQ_FLUSH);
+                       remote = 0;
+               } else
+                       remote = 1;
        } else {
                /* READ || READA */
                if (local) {
@@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
         * extent.  This waits for any resync activity in the corresponding
         * resync extent to finish, and, if necessary, pulls in the target
         * extent into the activity log, which involves further disk io because
-        * of transactional on-disk meta data updates. */
-       if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+        * of transactional on-disk meta data updates.
+        * Empty flushes don't need to go into the activity log, they can only
+        * flush data for pending writes which are already in there. */
+       if (rw == WRITE && local && size
+       && !test_bit(AL_SUSPENDED, &mdev->flags)) {
                req->rq_state |= RQ_IN_ACT_LOG;
                drbd_al_begin_io(mdev, sector);
        }
@@ -994,7 +1011,10 @@ allocate_barrier:
        if (rw == WRITE && _req_conflicts(req))
                goto fail_conflicting;
 
-       list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+       /* no point in adding empty flushes to the transfer log,
+        * they are mapped to drbd barriers already. */
+       if (likely(size!=0))
+               list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
 
        /* NOTE remote first: to get the concurrent write detection right,
         * we must register the request before start of local IO.  */
@@ -1014,6 +1034,14 @@ allocate_barrier:
            mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
                maybe_pull_ahead(mdev);
 
+       /* If this was a flush, queue a drbd barrier/start a new epoch.
+        * Unless the current epoch was empty anyways, or we are not currently
+        * replicating, in which case there is no point. */
+       if (unlikely(bio->bi_rw & REQ_FLUSH)
+               && mdev->newest_tle->n_writes
+               && drbd_should_do_remote(mdev->state))
+               queue_barrier(mdev);
+
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
 
index 17fa04d08be9cc0af181e5f20398128d8b4d5234..b47034e650a579b9d0263f8d0a03af2ecc8b3aa7 100644 (file)
@@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
 
        policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
 
-       if (atomic_inc_return(&freq_table_users) == 1)
+       if (!freq_table)
                result = opp_init_cpufreq_table(mpu_dev, &freq_table);
 
        if (result) {
@@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
                goto fail_ck;
        }
 
+       atomic_inc_return(&freq_table_users);
+
        result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
        if (result)
                goto fail_table;
index 53c8c51d58817cb62b4641d5e923bb4f72b39f2f..93d14070141ae64fe4e96dd838fc28c36061b3cf 100644 (file)
@@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg)
 
                head = ACCESS_ONCE(jrp->head);
 
-               spin_lock_bh(&jrp->outlock);
+               spin_lock(&jrp->outlock);
 
                sw_idx = tail = jrp->tail;
                hw_idx = jrp->out_ring_read_index;
@@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg)
                        jrp->tail = tail;
                }
 
-               spin_unlock_bh(&jrp->outlock);
+               spin_unlock(&jrp->outlock);
 
                /* Finally, execute user's callback */
                usercall(dev, userdesc, userstatus, userarg);
@@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
                return -EIO;
        }
 
-       spin_lock(&jrp->inplock);
+       spin_lock_bh(&jrp->inplock);
 
        head = jrp->head;
        tail = ACCESS_ONCE(jrp->tail);
 
        if (!rd_reg32(&jrp->rregs->inpring_avail) ||
            CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
-               spin_unlock(&jrp->inplock);
+               spin_unlock_bh(&jrp->inplock);
                dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
                return -EBUSY;
        }
@@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 
        wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 
-       spin_unlock(&jrp->inplock);
+       spin_unlock_bh(&jrp->inplock);
 
        return 0;
 }
index c9c4befb5a8d261701698c570c668d5009113a64..df14358d7fa1658c48e10d8b7a768f32eec6b9ac 100644 (file)
@@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)
        /*
         * We must wait at least 256 Pk_clk cycles between two reads of the rng.
         */
-       dev->rng_wait_time      = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
-                                 256;
+       dev->rng_wait_time      = DIV_ROUND_UP_ULL(NSEC_PER_SEC,
+                                                  dev->pk_clk_freq) * 256;
 
        dev->rng.name           = dev->name;
        dev->rng.data_present   = hifn_rng_data_present,
index 08a7aa722d6b8f0d798b7a59ccd5b8146183f497..6fbfc244748fd9f5e23e05c90d57bcfbdead4b6b 100644 (file)
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       if (!req->flags)
+       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
                return -EINVAL;
 
        mutex_lock(&dev->mode_config.mutex);
index a8743c399e83234c976ebdb4b471542a0645c42d..b7ee230572b7c053a20c6bf115d4fd98a836f820 100644 (file)
@@ -87,6 +87,9 @@ static struct edid_quirk {
        int product_id;
        u32 quirks;
 } edid_quirk_list[] = {
+       /* ASUS VW222S */
+       { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
        /* Acer AL1706 */
        { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
        /* Acer F51 */
index 30dc22a7156ce88446d1a11ff2d484c2581c82e6..8033526bb53b98fd84e197f8b57a5b340cc45bd6 100644 (file)
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
            (struct drm_connector **) (psb_intel_crtc + 1);
        psb_intel_crtc->mode_set.num_connectors = 0;
        psb_intel_cursor_init(dev, psb_intel_crtc);
+
+       /* Set to true so that the pipe is forced off on initial config. */
+       psb_intel_crtc->active = true;
 }
 
 int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
index d9a5372ec56f84a76f1e270e1edeb704462ed2e8..60815b861ec2143dc1fdad526232d03bea1158fd 100644 (file)
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
         * entries. For aliasing ppgtt support we just steal them at the end for
         * now. */
-       first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+       first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
 
        ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
        if (!ppgtt)
index a69a3d0d3acf6c7bc81119342aa830b41c047025..2dfa6cf4886b6a2a3b31b90d1df489634a30df3f 100644 (file)
@@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
                                     enum pipe pipe, int reg)
 {
        u32 val = I915_READ(reg);
-       WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
+       WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 
@@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
 
        reg = PCH_ADPA;
        val = I915_READ(reg);
-       WARN(adpa_pipe_enabled(dev_priv, val, pipe),
+       WARN(adpa_pipe_enabled(dev_priv, pipe, val),
             "PCH VGA enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
 
        reg = PCH_LVDS;
        val = I915_READ(reg);
-       WARN(lvds_pipe_enabled(dev_priv, val, pipe),
+       WARN(lvds_pipe_enabled(dev_priv, pipe, val),
             "PCH LVDS enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
 
@@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
                             enum pipe pipe, int reg)
 {
        u32 val = I915_READ(reg);
-       if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+       if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
                DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
                              reg, pipe);
                I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
 
        reg = PCH_ADPA;
        val = I915_READ(reg);
-       if (adpa_pipe_enabled(dev_priv, val, pipe))
+       if (adpa_pipe_enabled(dev_priv, pipe, val))
                I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
 
        reg = PCH_LVDS;
        val = I915_READ(reg);
-       if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+       if (lvds_pipe_enabled(dev_priv, pipe, val)) {
                DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
                I915_WRITE(reg, val & ~LVDS_PORT_EN);
                POSTING_READ(reg);
index e05c0d3e3440f1398f85806f00203998bbfc24ce..e9a6f6aaed855dfa5acad6026c0f95e79a33955e 100644 (file)
@@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Gigabyte GA-D525TUD",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+                       DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index cc8df4de2d921691fdee876b1cdc9b97acb39b8f..7644f31a3778685bb3e21c021e1ad76c5afb2e95 100644 (file)
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_XBGR8888:
-               sprctl |= SPRITE_FORMAT_RGBX888;
+               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
                pixel_size = 4;
                break;
        case DRM_FORMAT_XRGB8888:
-               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+               sprctl |= SPRITE_FORMAT_RGBX888;
                pixel_size = 4;
                break;
        case DRM_FORMAT_YUYV:
index 1866dbb499792e4e4758938c50802d11beb29bb7..c61014442aa931cfe5822069c7ecf6c0d565f4dc 100644 (file)
@@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev)
                        }
                        break;
                case NV_C0:
-                       nvc0_copy_create(dev, 1);
+                       if (!(nv_rd32(dev, 0x022500) & 0x00000200))
+                               nvc0_copy_create(dev, 1);
                case NV_D0:
-                       nvc0_copy_create(dev, 0);
+                       if (!(nv_rd32(dev, 0x022500) & 0x00000100))
+                               nvc0_copy_create(dev, 0);
                        break;
                default:
                        break;
index f4d4505fe831b9c60b676b2f761727af6987ac13..2817101fb167eb1f70ac36d88e2f4f1c58dd2427 100644 (file)
@@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
                radeon_crtc->enabled = true;
                /* adjust pm to dpms changes BEFORE enabling crtcs */
                radeon_pm_compute_clocks(rdev);
-               /* disable crtc pair power gating before programming */
                if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
                        atombios_powergate_crtc(crtc, ATOM_DISABLE);
                atombios_enable_crtc(crtc, ATOM_ENABLE);
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
                        atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
                atombios_enable_crtc(crtc, ATOM_DISABLE);
                radeon_crtc->enabled = false;
-               /* power gating is per-pair */
-               if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
-                       struct drm_crtc *other_crtc;
-                       struct radeon_crtc *other_radeon_crtc;
-                       list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
-                               other_radeon_crtc = to_radeon_crtc(other_crtc);
-                               if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
-                                   ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
-                                   ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
-                                   ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
-                                   ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
-                                   ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
-                                       /* if both crtcs in the pair are off, enable power gating */
-                                       if (other_radeon_crtc->enabled == false)
-                                               atombios_powergate_crtc(crtc, ATOM_ENABLE);
-                                       break;
-                               }
-                       }
-               }
+               if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+                       atombios_powergate_crtc(crtc, ATOM_ENABLE);
                /* adjust pm to dpms changes AFTER disabling crtcs */
                radeon_pm_compute_clocks(rdev);
                break;
@@ -1682,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_atom_ss ss;
+       int i;
 
        atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (rdev->mode_info.crtcs[i] &&
+                   rdev->mode_info.crtcs[i]->enabled &&
+                   i != radeon_crtc->crtc_id &&
+                   radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
        switch (radeon_crtc->pll_id) {
        case ATOM_PPLL1:
        case ATOM_PPLL2:
@@ -1701,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        default:
                break;
        }
+done:
        radeon_crtc->pll_id = -1;
 }
 
index 7712cf5ab33b9a107ddf6ef61f1122622a358951..3623b98ed3fe2ab529617e37ceb41dfcf4fd5882 100644 (file)
@@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+       u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+       u8 tmp;
 
        if (!ASIC_IS_DCE4(rdev))
                return panel_mode;
 
-       if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
-           ENCODER_OBJECT_ID_NUTMEG)
-               panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
-       else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
-                ENCODER_OBJECT_ID_TRAVIS) {
-               u8 id[6];
-               int i;
-               for (i = 0; i < 6; i++)
-                       id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
-               if (id[0] == 0x73 &&
-                   id[1] == 0x69 &&
-                   id[2] == 0x76 &&
-                   id[3] == 0x61 &&
-                   id[4] == 0x72 &&
-                   id[5] == 0x54)
+       if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+               /* DP bridge chips */
+               tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+               if (tmp & 1)
+                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+               else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+                        (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
                        panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
                else
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+                       panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
        } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
-               u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+               /* eDP */
+               tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
                if (tmp & 1)
                        panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
        }
index f9bc27fe269a125b54803350b838a9062f0e6337..6e8803a1170c24b052a0193ce58294605fed01fd 100644 (file)
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        struct radeon_connector *radeon_connector = NULL;
        struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               /* some early dce3.2 boards have a bug in their transmitter control table */
-               if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
-                   ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
-                       if (ASIC_IS_DCE6(rdev)) {
-                               /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
-                                * before reenabling encoder on DPMS ON, otherwise we never
-                                * get picture
-                                */
-                               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+                       if (!connector)
+                               dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+                       else
+                               dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+                       /* setup and enable the encoder */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+                       atombios_dig_encoder_setup(encoder,
+                                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+                                                  dig->panel_mode);
+                       if (ext_encoder) {
+                               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+                                       atombios_external_encoder_setup(encoder, ext_encoder,
+                                                                       EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
                        }
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               } else {
+               } else if (ASIC_IS_DCE4(rdev)) {
+                       /* setup and enable the encoder */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+                       /* enable the transmitter */
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+               } else {
+                       /* setup and enable the encoder and transmitter */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+                       /* some early dce3.2 boards have a bug in their transmitter control table */
+                       if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
                        if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+                       /* disable the transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-               else
+               } else if (ASIC_IS_DCE4(rdev)) {
+                       /* disable the transmitter */
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+               } else {
+                       /* disable the encoder and transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+               }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
                        if (ASIC_IS_DCE4(rdev))
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct drm_encoder *test_encoder;
-       struct radeon_encoder_atom_dig *dig;
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t dig_enc_in_use = 0;
 
-       /* DCE4/5 */
-       if (ASIC_IS_DCE4(rdev)) {
-               dig = radeon_encoder->enc_priv;
-               if (ASIC_IS_DCE41(rdev)) {
+       if (ASIC_IS_DCE6(rdev)) {
+               /* DCE6 */
+               switch (radeon_encoder->encoder_id) {
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+                       if (dig->linkb)
+                               return 1;
+                       else
+                               return 0;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+                       if (dig->linkb)
+                               return 3;
+                       else
+                               return 2;
+                       break;
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+                       if (dig->linkb)
+                               return 5;
+                       else
+                               return 4;
+                       break;
+               }
+       } else if (ASIC_IS_DCE4(rdev)) {
+               /* DCE4/5 */
+               if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
                        /* ontario follows DCE4 */
                        if (rdev->family == CHIP_PALM) {
                                if (dig->linkb)
@@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
        radeon_encoder->pixel_clock = adjusted_mode->clock;
 
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
        if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
                if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
                        atombios_yuv_setup(encoder, true);
@@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
-                       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-                       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
-                       if (!connector)
-                               dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
-                       else
-                               dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
-
-                       /* setup and enable the encoder */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-                       atombios_dig_encoder_setup(encoder,
-                                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
-                                                  dig->panel_mode);
-               } else if (ASIC_IS_DCE4(rdev)) {
-                       /* disable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       /* setup and enable the encoder */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
-                       /* enable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               } else {
-                       /* disable the encoder and transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
-                       /* setup and enable the encoder and transmitter */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               }
+               /* handled in dpms */
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
        case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
                break;
        }
 
-       if (ext_encoder) {
-               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
-                       atombios_external_encoder_setup(encoder, ext_encoder,
-                                                       EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
-               else
-                       atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
-       }
-
        atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
        }
 
        radeon_atom_output_lock(encoder, true);
-       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
        if (connector) {
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 
 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
 {
+       /* need to call this here as we need the crtc set up */
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
        radeon_atom_output_lock(encoder, false);
 }
@@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               if (ASIC_IS_DCE4(rdev))
-                       /* disable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-               else {
-                       /* disable the encoder and transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-               }
+               /* handled in dpms */
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
        case ENCODER_OBJECT_ID_INTERNAL_DVO1:
index ab74e6b149e7468c16f47d0f4e6228abdcf41e14..f37676d7f217c56373ba55e7c883098b5736d1f7 100644 (file)
@@ -63,6 +63,7 @@ struct r600_cs_track {
        u32                     cb_color_size_idx[8]; /* unused */
        u32                     cb_target_mask;
        u32                     cb_shader_mask;  /* unused */
+       bool                    is_resolve;
        u32                     cb_color_size[8];
        u32                     vgt_strmout_en;
        u32                     vgt_strmout_buffer_en;
@@ -315,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track)
                track->cb_color_bo[i] = NULL;
                track->cb_color_bo_offset[i] = 0xFFFFFFFF;
                track->cb_color_bo_mc[i] = 0xFFFFFFFF;
-       }
+               track->cb_color_frag_bo[i] = NULL;
+               track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+               track->cb_color_tile_bo[i] = NULL;
+               track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+               track->cb_color_mask[i] = 0xFFFFFFFF;
+       }
+       track->is_resolve = false;
+       track->nsamples = 16;
+       track->log_nsamples = 4;
        track->cb_target_mask = 0xFFFFFFFF;
        track->cb_shader_mask = 0xFFFFFFFF;
        track->cb_dirty = true;
@@ -352,6 +361,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        volatile u32 *ib = p->ib.ptr;
        unsigned array_mode;
        u32 format;
+       /* When resolve is used, the second colorbuffer has always 1 sample. */
+       unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
 
        size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
        format = G_0280A0_FORMAT(track->cb_color_info[i]);
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        array_check.group_size = track->group_size;
        array_check.nbanks = track->nbanks;
        array_check.npipes = track->npipes;
-       array_check.nsamples = track->nsamples;
+       array_check.nsamples = nsamples;
        array_check.blocksize = r600_fmt_get_blocksize(format);
        if (r600_get_array_mode_alignment(&array_check,
                                          &pitch_align, &height_align, &depth_align, &base_align)) {
@@ -421,7 +432,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 
        /* check offset */
        tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
-             r600_fmt_get_blocksize(format) * track->nsamples;
+             r600_fmt_get_blocksize(format) * nsamples;
        switch (array_mode) {
        default:
        case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -792,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
         */
        if (track->cb_dirty) {
                tmp = track->cb_target_mask;
+
+               /* We must check both colorbuffers for RESOLVE. */
+               if (track->is_resolve) {
+                       tmp |= 0xff;
+               }
+
                for (i = 0; i < 8; i++) {
                        if ((tmp >> (i * 4)) & 0xF) {
                                /* at least one component is enabled */
@@ -1281,6 +1298,11 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                track->nsamples = 1 << tmp;
                track->cb_dirty = true;
                break;
+       case R_028808_CB_COLOR_CONTROL:
+               tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+               track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+               track->cb_dirty = true;
+               break;
        case R_0280A0_CB_COLOR0_INFO:
        case R_0280A4_CB_COLOR1_INFO:
        case R_0280A8_CB_COLOR2_INFO:
@@ -1416,7 +1438,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case R_028118_CB_COLOR6_MASK:
        case R_02811C_CB_COLOR7_MASK:
                tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
-               track->cb_color_mask[tmp] = ib[idx];
+               track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
                if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
                        track->cb_dirty = true;
                }
index bdb69a63062fd72947b924503d5fb201c7e33f00..fa6f37099ba90a6968744f15e4531e7ce3de9d17 100644 (file)
 #define        CC_RB_BACKEND_DISABLE                           0x98F4
 #define                BACKEND_DISABLE(x)                              ((x) << 16)
 
+#define R_028808_CB_COLOR_CONTROL                      0x28808
+#define   S_028808_SPECIAL_OP(x)                       (((x) & 0x7) << 4)
+#define   G_028808_SPECIAL_OP(x)                       (((x) >> 4) & 0x7)
+#define   C_028808_SPECIAL_OP                          0xFFFFFF8F
+#define     V_028808_SPECIAL_NORMAL                     0x00
+#define     V_028808_SPECIAL_DISABLE                    0x01
+#define     V_028808_SPECIAL_RESOLVE_BOX                0x07
+
 #define        CB_COLOR0_BASE                                  0x28040
 #define        CB_COLOR1_BASE                                  0x28044
 #define        CB_COLOR2_BASE                                  0x28048
index d2e243867ac6f3c9fe4d6c28e25fa421cac6c48b..7a3daebd732d67d85d40dc04d6aa851af42aae26 100644 (file)
@@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev,
        if (rdev->flags & RADEON_IS_AGP)
                rdev->need_dma32 = true;
        if ((rdev->flags & RADEON_IS_PCI) &&
-           (rdev->family < CHIP_RS400))
+           (rdev->family <= CHIP_RS740))
                rdev->need_dma32 = true;
 
        dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -1346,12 +1346,15 @@ retry:
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                        radeon_ring_restore(rdev, &rdev->ring[i],
                                            ring_sizes[i], ring_data[i]);
+                       ring_sizes[i] = 0;
+                       ring_data[i] = NULL;
                }
 
                r = radeon_ib_ring_tests(rdev);
                if (r) {
                        dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
                        if (saved) {
+                               saved = false;
                                radeon_suspend(rdev);
                                goto retry;
                        }
index 27d22d709c9040561c61e51013d0418ab3625e31..8c593ea82c412b2098d1c3dac3e1f6167356e6fa 100644 (file)
  *   2.19.0 - r600-eg: MSAA textures
  *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
  *   2.21.0 - r600-r700: FMASK and CMASK
+ *   2.22.0 - r600 only: RESOLVE_BOX allowed
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       21
+#define KMS_DRIVER_MINOR       22
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index f93e45d869f401225e853d81d2e9ec44baf092e6..20bfbda7b3f1bdcf0eff27332bf2b9812af696b4 100644 (file)
@@ -744,7 +744,6 @@ r600 0x9400
 0x00028C38 CB_CLRCMP_DST
 0x00028C3C CB_CLRCMP_MSK
 0x00028C34 CB_CLRCMP_SRC
-0x00028808 CB_COLOR_CONTROL
 0x0002842C CB_FOG_BLUE
 0x00028428 CB_FOG_GREEN
 0x00028424 CB_FOG_RED
index 60ea284407cea4d1f62db43a72b3e6cdbea47e31..8bf8a64e511543989c41624338304d0f11f0c59b 100644 (file)
@@ -1624,7 +1624,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
index 351d1f4593e7a8276da3ff7cdd9b286d4ea2b097..4ee5789487233dbe6bf03920fa29fb820d366b8d 100644 (file)
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
                }
+       }, {
+               /* Old interface reads the same sensor for fan0 and fan1 */
+               .ident = "Asus M5A78L",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
+               }
        },
        { }
 };
index 92406097efebcb5e0052fba38a0f59b5eb8c45eb..8d1e32d7cd9767db4e1c99db627681f03f233139 100644 (file)
@@ -4,7 +4,7 @@
 
 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 {
-       ide_drive_t *drive = dev_get_drvdata(dev);
+       ide_drive_t *drive = to_ide_device(dev);
        ide_drive_t *pair = ide_get_pair_dev(drive);
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 
 int generic_ide_resume(struct device *dev)
 {
-       ide_drive_t *drive = dev_get_drvdata(dev);
+       ide_drive_t *drive = to_ide_device(dev);
        ide_drive_t *pair = ide_get_pair_dev(drive);
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
index 437bc193e170d6da377d6b7447adc5cc43d348bb..568307cc7caf8d882034011aa5303ad47eaf3bed 100644 (file)
@@ -340,7 +340,7 @@ retry:
         * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
         */
        err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
 
@@ -353,7 +353,7 @@ write_error:
                list_add(&new_aeb->u.list, &ai->erase);
                goto retry;
        }
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 out_free:
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
index 4f50145f64839f519c232d0e4ec79e7fedf2bd34..662c5f7eb0c54af4cb3c788959109a7693798629 100644 (file)
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        dev->irq = res_irq->start;
-       priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+       priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+       if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+               priv->irq_flags |= IRQF_SHARED;
        priv->reg_base = addr;
        /* The CAN clock frequency is half the oscillator clock frequency */
        priv->can.clock.freq = pdata->osc_freq / 2;
index 3105961756767cb3b52bc18bce8a248e7ef110b7..b595d3422b9f759d86c760370b30efcb04f76575 100644 (file)
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
        const uint8_t *mem, *end, *dat;
        uint16_t type, len;
        uint32_t addr;
-       uint8_t *buf = NULL;
+       uint8_t *buf = NULL, *new_buf;
        int buflen = 0;
        int8_t type_end = 0;
 
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
                if (len > buflen) {
                        /* align buflen */
                        buflen = (len + (1024-1)) & ~(1024-1);
-                       buf = krealloc(buf, buflen, GFP_KERNEL);
-                       if (!buf) {
+                       new_buf = krealloc(buf, buflen, GFP_KERNEL);
+                       if (!new_buf) {
                                ret = -ENOMEM;
                                goto failed;
                        }
+                       buf = new_buf;
                }
                /* verify record data */
                memcpy_fromio(buf, &dpram[addr + offset], len);
index 463b9ec57d8077c8a1599457974ab15dc1210536..6d1a24acb77e1cb9332906759811980aa537b4e2 100644 (file)
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
                        continue;               \
                else
 
-#define for_each_napi_rx_queue(bp, var) \
-       for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
 /* Skip OOO FP */
 #define for_each_tx_queue(bp, var) \
        for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
index e879e19eb0d68926a883ee55ffcaaa194a8b513b..af20c6ee2cd9292dfa41c8693e6d8253dfe667e4 100644 (file)
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
         */
        bnx2x_setup_tc(bp->dev, bp->max_cos);
 
+       /* Add all NAPI objects */
+       bnx2x_add_all_napi(bp);
        bnx2x_napi_enable(bp);
 
        /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
                /* Disable HW interrupts, NAPI */
                bnx2x_netif_stop(bp, 1);
+               /* Delete all NAPI objects */
+               bnx2x_del_all_napi(bp);
 
                /* Release IRQs */
                bnx2x_free_irq(bp);
index dfa757e742966998fb8e9ea8b99f2667b0835a43..21b553229ea4c176dfe6c53c60ddde9df3f3f977 100644 (file)
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
        bp->num_napi_queues = bp->num_queues;
 
        /* Add NAPI objects */
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
                               bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
        int i;
 
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
index fc4e0e3885b039f0cb7dff48edade89e211de15b..c37a68d68090e1326b6f491c9f4e28f26a78f87b 100644 (file)
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
  */
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 {
-       bnx2x_del_all_napi(bp);
        bnx2x_disable_msi(bp);
        BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
        bnx2x_set_int_mode(bp);
-       bnx2x_add_all_napi(bp);
 }
 
 /**
index 02b5a343b19506a2edc60be5dd6b41d781aadb85..21054987257a12960b859594db72830c42e21059 100644 (file)
@@ -8427,6 +8427,8 @@ unload_error:
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        /* Release IRQs */
        bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static void poll_bnx2x(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
+       int i;
 
-       disable_irq(bp->pdev->irq);
-       bnx2x_interrupt(bp->pdev->irq, dev);
-       enable_irq(bp->pdev->irq);
+       for_each_eth_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+       }
 }
 #endif
 
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
         */
        bnx2x_set_int_mode(bp);
 
-       /* Add all NAPI objects */
-       bnx2x_add_all_napi(bp);
-
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
-       /* Delete all NAPI objects */
-       bnx2x_del_all_napi(bp);
-
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        bnx2x_set_power_state(bp, PCI_D0);
 
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        bnx2x_tx_disable(bp);
 
        bnx2x_netif_stop(bp, 0);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        del_timer_sync(&bp->timer);
 
index 845b2020f291cc20ad8223c8165512604314e812..138446957786a9ce0845a7e8221af4887b904ce3 100644 (file)
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
 {
        struct net_local *lp = netdev_priv(dev);
        unsigned long flags;
+       u16 cfg;
 
        spin_lock_irqsave(&lp->lock, flags);
        if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
        /* in promiscuous mode, we accept errored packets,
         * so we have to enable interrupts on them also
         */
-       writereg(dev, PP_RxCFG,
-                (lp->curr_rx_cfg |
-                 (lp->rx_mode == RX_ALL_ACCEPT)
-                 ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
-                 : 0));
+       cfg = lp->curr_rx_cfg;
+       if (lp->rx_mode == RX_ALL_ACCEPT)
+               cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
+       writereg(dev, PP_RxCFG, cfg);
        spin_unlock_irqrestore(&lp->lock, flags);
 }
 
index 7fac97b4bb59c19ecaca1073d84c6a183ecd17b1..8c63d06ab12b6ccf899fae8fa13f75904beb22c1 100644 (file)
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
        int num = 0, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-       spin_lock_bh(&adapter->mcc_cq_lock);
+       spin_lock(&adapter->mcc_cq_lock);
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
        if (num)
                be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 
-       spin_unlock_bh(&adapter->mcc_cq_lock);
+       spin_unlock(&adapter->mcc_cq_lock);
        return status;
 }
 
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
                if (be_error(adapter))
                        return -EIO;
 
+               local_bh_disable();
                status = be_process_mcc(adapter);
+               local_bh_enable();
 
                if (atomic_read(&mcc_obj->q.used) == 0)
                        break;
index 90a903d83d8747ca26d711b7b3a188f81a554611..78b8aa8069f03c440ea6037f9dc3e86e81c62fe6 100644 (file)
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
        /* when interrupts are not yet enabled, just reap any pending
        * mcc completions */
        if (!netif_running(adapter->netdev)) {
+               local_bh_disable();
                be_process_mcc(adapter);
+               local_bh_enable();
                goto reschedule;
        }
 
index 4605f7246687d0898ad82e9c769026c741d6d540..d3233f59a82e47b1d70a372c0973e1447d0ef90b 100644 (file)
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
                dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+               dev->features |= NETIF_F_HW_VLAN_RX;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
index cd153326c3cf8254543b0b544f0dfd8443cd6736..cb3356c9af803509efb2c58ff4f7d0667e689e1c 100644 (file)
@@ -310,6 +310,7 @@ struct e1000_adapter {
         */
        struct e1000_ring *tx_ring /* One per active queue */
                                                ____cacheline_aligned_in_smp;
+       u32 tx_fifo_limit;
 
        struct napi_struct napi;
 
index 46c3b1f9ff8997af685836bb82fb0e1bd5e599cf..d01a099475a143a44253b83ef212d8ad1f5935bf 100644 (file)
@@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
                break;
        }
 
+       /*
+        * Alignment of Tx data is on an arbitrary byte boundary with the
+        * maximum size per Tx descriptor limited only to the transmit
+        * allocation of the packet buffer minus 96 bytes with an upper
+        * limit of 24KB due to receive synchronization limitations.
+        */
+       adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+                                      24 << 10);
+
        /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
         * fit in receive buffer.
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-#define E1000_MAX_PER_TXD      8192
-#define E1000_MAX_TXD_PWR      12
-
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
                        unsigned int first, unsigned int max_per_txd,
-                       unsigned int nr_frags, unsigned int mss)
+                       unsigned int nr_frags)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 {
+       BUG_ON(size > tx_ring->count);
+
        if (e1000_desc_unused(tx_ring) >= size)
                return 0;
        return __e1000_maybe_stop_tx(tx_ring, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int first;
-       unsigned int max_per_txd = E1000_MAX_PER_TXD;
-       unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
        unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 
        mss = skb_shinfo(skb)->gso_size;
-       /*
-        * The controller does a simple calculation to
-        * make sure there is enough room in the FIFO before
-        * initiating the DMA for each buffer.  The calc is:
-        * 4 = ceil(buffer len/mss).  To make sure we don't
-        * overrun the FIFO, adjust the max buffer len if mss
-        * drops.
-        */
        if (mss) {
                u8 hdr_len;
-               max_per_txd = min(mss << 2, max_per_txd);
-               max_txd_pwr = fls(max_per_txd) - 1;
 
                /*
                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count++;
        count++;
 
-       count += TXD_USE_COUNT(len, max_txd_pwr);
+       count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++)
-               count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-                                      max_txd_pwr);
+               count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+                                     adapter->tx_fifo_limit);
 
        if (adapter->hw.mac.tx_pkt_filtering)
                e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                tx_flags |= E1000_TX_FLAGS_NO_FCS;
 
        /* if count is 0 then mapping error has occurred */
-       count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+       count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+                            nr_frags);
        if (count) {
                skb_tx_timestamp(skb);
 
                netdev_sent_queue(netdev, skb->len);
                e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+               e1000_maybe_stop_tx(tx_ring,
+                                   (MAX_SKB_FRAGS *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+                                                 adapter->tx_fifo_limit) + 2));
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
        /* ring size defaults */
-       adapter->rx_ring->count = 256;
-       adapter->tx_ring->count = 256;
+       adapter->rx_ring->count = E1000_DEFAULT_RXD;
+       adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
        /*
         * Initial Wake on LAN setting - If APM wake is enabled in
index 8cba2df82b18b9f2dfa1ff82eee3b22ef9eeb0c9..5faedd855b779272342b37c6d992a1a93562a70b 100644 (file)
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
                                       &ip_entry->ip4dst, &ip_entry->pdst);
        if (rc != 0) {
                rc = efx_filter_get_ipv4_full(
-                       &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
-                       &ip_entry->ip4dst, &ip_entry->pdst);
+                       &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+                       &ip_entry->ip4src, &ip_entry->psrc);
                EFX_WARN_ON_PARANOID(rc);
                ip_mask->ip4src = ~0;
                ip_mask->psrc = ~0;
index e2d083228f3a6b6b6039ab3276182d3adbc72869..719be3912aa9ca5cb7b0a527811df460c257561b 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */
index 9820ec842cc01f8a7fb0897d993d0b873902dee8..223adf95fd0374e465447adc52d854491721fe4f 100644 (file)
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
 struct dma_desc {
        /* Receive descriptor */
        union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
                                         * is not calculated */
        cic_full = 3,           /* IP header and pseudoheader */
 };
+
+#endif /* __DESCS_H__ */
index dd8d6e19dff6b2c1182dfb0f31ca8c4696c84931..7ee9499a6e385a77b9b2110b9578750e767745ab 100644 (file)
@@ -27,6 +27,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
 #if defined(CONFIG_STMMAC_RING)
 static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
 {
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
        p->des01.tx.buffer1_size = len;
 }
 #endif
+
+#endif /* __DESC_COM_H__ */
index 7c6d857a9cc7f0c357bb7b1d0cb9d417ecbc25a5..2ec6aeae349e5f7dd3b4e873d1366037a943cb27 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
 #include <linux/phy.h>
 #include "common.h"
 
@@ -119,3 +122,5 @@ enum ttc_control {
 #define DMA_MISSED_FRAME_M_CNTR        0x0000ffff      /* Missed Frame Couinter */
 
 extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
index f90fcb5f957351b742df584104eade1df609bb8b..0e4cacedc1f0e0cf44fad3bb84c5bac000f8e6ee 100644 (file)
@@ -19,6 +19,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
 
 #include <linux/phy.h>
 #include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
 /* Synopsys Core versions */
-#define        DWMAC_CORE_3_40 34
+#define        DWMAC_CORE_3_40 0x34
 
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
index e678ce39d0146b707c012bb7bbd40a58361855c2..e49c9a0fd6ffe9a8ad52f19134e2e8c1aa7dcd2e 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
 /* DMA CRS Control and Status Register Mapping */
 #define DMA_BUS_MODE           0x00001000      /* Bus Mode */
 #define DMA_XMT_POLL_DEMAND    0x00001004      /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
 extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
 extern int dwmac_dma_interrupt(void __iomem *ioaddr,
                                struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
index a38352024cb8fec7897fc4a990972658efb35dfb..67995ef252515f53c6796ecf77bbe66460223944 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __MMC_H__
+#define __MMC_H__
+
 /* MMC control register */
 /* When set, all counter are reset */
 #define MMC_CNTRL_COUNTER_RESET                0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
 extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
 extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
 extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
index c07cfe989f6ef35716c59d3b5e29cdeefa4f1f2a..0c74a702d4610597da59200be253306c7b4db534 100644 (file)
@@ -33,7 +33,7 @@
 #define MMC_TX_INTR            0x00000108      /* MMC TX Interrupt */
 #define MMC_RX_INTR_MASK       0x0000010c      /* MMC Interrupt Mask */
 #define MMC_TX_INTR_MASK       0x00000110      /* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK                0xffffffff
+#define MMC_DEFAULT_MASK               0xffffffff
 
 /* MMC TX counter registers */
 
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
 /* To mask all all interrupts.*/
 void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
index f2d3665430ad455b82552d975d9f91066a5fc913..e872e1da3137cef68265e845188bb8544acb988e 100644 (file)
@@ -20,6 +20,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define DRV_MODULE_VERSION     "March_2012"
 
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
 {
 }
 #endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
index 6863590d184bcc9c335da8bfb51562eafafc7557..aea9b14cdfbeff126a962e766ca97894284e51b6 100644 (file)
@@ -21,6 +21,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
 
 struct stmmac_timer {
        void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
 extern int tmu2_register_user(void *fnt, void *data);
 extern void tmu2_unregister_user(void);
 #endif
+
+#endif /* __STMMAC_TIMER_H__ */
index cd7ee204e94a10abaa88b3288dcd2cace5ceca3e..a9ca4a03d31b2fe2adc68818da636edb8d5aeb2b 100644 (file)
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
 
-       if (data->bus)
+       if (data->bus) {
+               mdiobus_unregister(data->bus);
                mdiobus_free(data->bus);
+       }
 
        if (data->clk)
                clk_put(data->clk);
index 24d8566cfd8b98cdd50540b09b19c9287757e914..441b4dc79450c1009151692565586c53060e5c83 100644 (file)
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
                        sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
                break ;
        case SMT_P_REASON :
-               * (u_long *) to = 0 ;
+               *(u32 *)to = 0 ;
                sp_len = 4 ;
                goto sp_done ;
        case SMT_P1033 :                        /* time stamp */
index 328397c66730cae37a9c1cdc02cdb7dde523e2b3..adfab3fc5478cb6815129e3b966dbe904141bad3 100644 (file)
@@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
 
        /* 5. Gobi 2000 and 3000 devices */
        {QMI_GOBI_DEVICE(0x413c, 0x8186)},      /* Dell Gobi 2000 Modem device (N0218, VU936) */
+       {QMI_GOBI_DEVICE(0x413c, 0x8194)},      /* Dell Gobi 3000 Composite */
        {QMI_GOBI_DEVICE(0x05c6, 0x920b)},      /* Generic Gobi 2000 Modem device */
+       {QMI_GOBI_DEVICE(0x05c6, 0x920d)},      /* Gobi 3000 Composite */
        {QMI_GOBI_DEVICE(0x05c6, 0x9225)},      /* Sony Gobi 2000 Modem device (N0279, VU730) */
        {QMI_GOBI_DEVICE(0x05c6, 0x9245)},      /* Samsung Gobi 2000 Modem device (VL176) */
        {QMI_GOBI_DEVICE(0x03f0, 0x251d)},      /* HP Gobi 2000 Modem device (VP412) */
@@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
        {QMI_GOBI_DEVICE(0x1199, 0x9015)},      /* Sierra Wireless Gobi 3000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x9019)},      /* Sierra Wireless Gobi 3000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x901b)},      /* Sierra Wireless MC7770 */
+       {QMI_GOBI_DEVICE(0x12d1, 0x14f1)},      /* Sony Gobi 3000 Composite */
+       {QMI_GOBI_DEVICE(0x1410, 0xa021)},      /* Foxconn Gobi 3000 Modem device (Novatel E396) */
 
        { }                                     /* END */
 };
index 8531c1caac283263febc4f674daedef5b7844ff1..fd4b26d46fd5d2f2c8ccb68cd55844a3a5c9d336 100644 (file)
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
                                netif_device_present(dev->net) &&
                                !timer_pending(&dev->delay) &&
                                !test_bit(EVENT_RX_HALT, &dev->flags))
-                                       rx_alloc_submit(dev, GFP_KERNEL);
+                                       rx_alloc_submit(dev, GFP_NOIO);
 
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
index 4026c906cc7b45745e3a80dbc5edc9360413f78f..b7e0258887e70cf9be193c70e73334e9e8fe0473 100644 (file)
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
        case AR5K_EEPROM_MODE_11A:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
                rate_pcal_info = ee->ee_rate_tpwr_a;
-               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
+               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
                break;
        case AR5K_EEPROM_MODE_11B:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
index dc2bcfeadeb45b121de183bd558045a69dda47ba..94a9bbea6874c0daf10510e26fa2862f8ebbfa81 100644 (file)
 #define AR5K_EEPROM_EEP_DELTA          10
 #define AR5K_EEPROM_N_MODES            3
 #define AR5K_EEPROM_N_5GHZ_CHAN                10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN   8
 #define AR5K_EEPROM_N_2GHZ_CHAN                3
 #define AR5K_EEPROM_N_2GHZ_CHAN_2413   4
 #define        AR5K_EEPROM_N_2GHZ_CHAN_MAX     4
index 192ad5c1fcc8813805702d7b75c4ab89761cfd42..a5edebeb0b4f7f748155551df76396602885078e 100644 (file)
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
        /* dpc will not be rescheduled */
        wl->resched = false;
 
+       /* inform publicly that interface is down */
+       wl->pub->up = false;
+
        return 0;
 }
 
index 95aa8e1683ecb4bdeb663e0cf605d699407b96f0..83324b3216527ec72195b35da104a4ce6c5ad6e1 100644 (file)
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
                return;
        }
        len = ETH_ALEN;
-       ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
+       ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+                                 &len);
        if (ret) {
                IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
                               __LINE__);
index 46782f1102ac7c8e159c3ef764a54205326923fd..a47b306b522cd3a49fcbb1622083baacb245d621 100644 (file)
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
        const struct fw_img *img;
        size_t bufsz;
 
+       if (!iwl_is_ready_rf(priv))
+               return -EAGAIN;
+
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
                priv->dbgfs_sram_offset = 0x800000;
index d9694c58208c38cd60575bb48f37bc75f42884bd..4ffc18dc3a5761cc6b63c5aa60691727459e5e27 100644 (file)
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
 void iwl_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
index 39a6ca1f009c39fdaf8c60775348f4790ede45dd..d1a61ba6247ab68a13e4cd8943959c92d0fa3452 100644 (file)
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
        }
 
        iwl_dump_csr(trans);
-       iwl_dump_fh(trans, NULL, false);
+       iwl_dump_fh(trans, NULL);
 
        iwl_op_mode_nic_error(trans->op_mode);
 }
index 939c2f78df5833cc1d203deabe311568dc7ff88c..1e86ea2266d46971844ba5c66a2fb1142a47609d 100644 (file)
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
 #undef IWL_CMD
 }
 
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
 {
        int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       int pos = 0;
-       size_t bufsz = 0;
-#endif
        static const u32 fh_tbl[] = {
                FH_RSCSR_CHNL0_STTS_WPTR_REG,
                FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
                FH_TSSR_TX_STATUS_REG,
                FH_TSSR_TX_ERROR_REG
        };
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (display) {
-               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (buf) {
+               int pos = 0;
+               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
                *buf = kmalloc(bufsz, GFP_KERNEL);
                if (!*buf)
                        return -ENOMEM;
+
                pos += scnprintf(*buf + pos, bufsz - pos,
                                "FH register values:\n");
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
                        pos += scnprintf(*buf + pos, bufsz - pos,
                                "  %34s: 0X%08x\n",
                                get_fh_string(fh_tbl[i]),
                                iwl_read_direct32(trans, fh_tbl[i]));
-               }
+
                return pos;
        }
 #endif
+
        IWL_ERR(trans, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
                IWL_ERR(trans, "  %34s: 0X%08x\n",
                        get_fh_string(fh_tbl[i]),
                        iwl_read_direct32(trans, fh_tbl[i]));
-       }
+
        return 0;
 }
 
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_trans *trans = file->private_data;
-       char *buf;
+       char *buf = NULL;
        int pos = 0;
        ssize_t ret = -EFAULT;
 
-       ret = pos = iwl_dump_fh(trans, &buf, true);
+       ret = pos = iwl_dump_fh(trans, &buf);
        if (buf) {
                ret = simple_read_from_buffer(user_buf,
                                              count, ppos, buf, pos);
index 30899901aef56b00e05c5f062c86cabcb54127d8..650f79a1f2bd4a89cd96326d63c85982b24db0aa 100644 (file)
@@ -57,8 +57,7 @@
 static const struct ethtool_ops xennet_ethtool_ops;
 
 struct netfront_cb {
-       struct page *page;
-       unsigned offset;
+       int pull_to;
 };
 
 #define NETFRONT_SKB_CB(skb)   ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(rxq)) != NULL) {
-               struct page *page = NETFRONT_SKB_CB(skb)->page;
-               void *vaddr = page_address(page);
-               unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
-               memcpy(skb->data, vaddr + offset,
-                      skb_headlen(skb));
+               int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-               if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
-                       __free_page(page);
+               __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
        struct sk_buff_head errq;
        struct sk_buff_head tmpq;
        unsigned long flags;
-       unsigned int len;
        int err;
 
        spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
                        }
                }
 
-               NETFRONT_SKB_CB(skb)->page =
-                       skb_frag_page(&skb_shinfo(skb)->frags[0]);
-               NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
-               len = rx->status;
-               if (len > RX_COPY_THRESHOLD)
-                       len = RX_COPY_THRESHOLD;
-               skb_put(skb, len);
+               NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+               if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+                       NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 
-               if (rx->status > len) {
-                       skb_shinfo(skb)->frags[0].page_offset =
-                               rx->offset + len;
-                       skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
-                       skb->data_len = rx->status - len;
-               } else {
-                       __skb_fill_page_desc(skb, 0, NULL, 0, 0);
-                       skb_shinfo(skb)->nr_frags = 0;
-               }
+               skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+               skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
+               skb->data_len = rx->status;
 
                i = xennet_fill_frags(np, skb, &tmpq);
 
@@ -999,7 +980,7 @@ err:
                 * receive throughout using the standard receive
                 * buffer size was cut by 25%(!!!).
                 */
-               skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+               skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
                skb->len += skb->data_len;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
index 40a826a7295f7282ec8f05e2dcfd66d8f9601927..2fb2b9ea97ecc11d7e60af84d756b9eaad8dfbed 100644 (file)
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
        case BIODASDSYMMIO:
                return dasd_symm_io(device, argp);
        default:
-               return -ENOIOCTLCMD;
+               return -ENOTTY;
        }
 }
 
index cceae70279f6407d96f7249f02939de53405ea03..654c6921a6d462f5d9fc0f0b82e9813480b96670 100644 (file)
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
                break;
        default:
                /* if the discipline has an ioctl method try it. */
-               if (base->discipline->ioctl) {
+               rc = -ENOTTY;
+               if (base->discipline->ioctl)
                        rc = base->discipline->ioctl(block, cmd, argp);
-                       if (rc == -ENOIOCTLCMD)
-                               rc = -EINVAL;
-               } else
-                       rc = -EINVAL;
        }
        dasd_put_device(base);
        return rc;
index ea0aaa3f13d07549263a8b8e4da08acd92c3bc7e..a9f4049c6769316c368a716b0b6707d7082abd25 100644 (file)
@@ -47,6 +47,8 @@ struct bcm63xx_spi {
        /* Platform data */
        u32                     speed_hz;
        unsigned                fifo_size;
+       unsigned int            msg_type_shift;
+       unsigned int            msg_ctl_width;
 
        /* Data buffers */
        const unsigned char     *tx_ptr;
@@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
        msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
 
        if (t->rx_buf && t->tx_buf)
-               msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT);
+               msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
        else if (t->rx_buf)
-               msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT);
+               msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
        else if (t->tx_buf)
-               msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT);
-
-       bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+               msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
+
+       switch (bs->msg_ctl_width) {
+       case 8:
+               bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
+               break;
+       case 16:
+               bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+               break;
+       }
 
        /* Issue the transfer */
        cmd = SPI_CMD_START_IMMEDIATE;
@@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
        master->transfer_one_message = bcm63xx_spi_transfer_one;
        master->mode_bits = MODEBITS;
        bs->speed_hz = pdata->speed_hz;
+       bs->msg_type_shift = pdata->msg_type_shift;
+       bs->msg_ctl_width = pdata->msg_ctl_width;
        bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
        bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
 
+       switch (bs->msg_ctl_width) {
+       case 8:
+       case 16:
+               break;
+       default:
+               dev_err(dev, "unsupported MSG_CTL width: %d\n",
+                        bs->msg_ctl_width);
+               goto out_clk_disable;
+       }
+
        /* Initialize hardware */
        clk_enable(bs->clk);
        bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
index 3fe82d0e8caae583a2bdd8ac41a30bec5540fa43..5b06d31ab6a98678320b8c88456f0d3abde2cb35 100644 (file)
@@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file,
 
        switch (cmd) {
        case WDIOC_GETSUPPORT:
-               if (copy_to_user((void *)arg, &ident, sizeof(ident)))
-                       return -EFAULT;
+               return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
        case WDIOC_GETSTATUS:
                return put_user(0, p);
        case WDIOC_GETBOOTSTATUS:
                /* XXX: something is clearing TSR */
                tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
                /* returns CARDRESET if last reset was caused by the WDT */
-               return (tmp ? WDIOF_CARDRESET : 0);
+               return put_user((tmp ? WDIOF_CARDRESET : 0), p);
        case WDIOC_SETOPTIONS:
                if (get_user(tmp, p))
-                       return -EINVAL;
+                       return -EFAULT;
                if (tmp == WDIOS_ENABLECARD) {
                        booke_wdt_ping();
                        break;
index 3f75129eb0a9f06cd0a7d194a3c390f5ee9bfd3f..f7abbaeebcaf51466efbc493c2fb433b01408bf3 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/jiffies.h>
-#include <linux/delay.h>
 
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/da9052.h>
index d4c50d63acbc14b03b8bcf3aac7d8c80fb1ea2d7..97ca359ae2bdfb8d072ee62348114a252660adf9 100644 (file)
@@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev)
        return 0;
 }
 
-static void __devinit prepare_shared_info(void)
-{
-#ifdef CONFIG_KEXEC
-       unsigned long addr;
-       struct shared_info *hvm_shared_info;
-
-       addr = alloc_xen_mmio(PAGE_SIZE);
-       hvm_shared_info = ioremap(addr, PAGE_SIZE);
-       memset(hvm_shared_info, 0, PAGE_SIZE);
-       xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT);
-#endif
-}
-
 static int __devinit platform_pci_init(struct pci_dev *pdev,
                                       const struct pci_device_id *ent)
 {
@@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
        platform_mmio = mmio_addr;
        platform_mmiolen = mmio_len;
 
-       prepare_shared_info();
-
        if (!xen_have_vector_callback) {
                ret = xen_allocate_irq(pdev);
                if (ret) {
index 5eaa70c9d96e6bb3900930d63beb224f38d15c8f..71072ab99128aadf1090e2ceab32bae67827dc9c 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
 {
        unsigned int sz = sizeof(struct bio) + extra_size;
        struct kmem_cache *slab = NULL;
-       struct bio_slab *bslab;
+       struct bio_slab *bslab, *new_bio_slabs;
        unsigned int i, entry = -1;
 
        mutex_lock(&bio_slab_lock);
@@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
 
        if (bio_slab_nr == bio_slab_max && entry == -1) {
                bio_slab_max <<= 1;
-               bio_slabs = krealloc(bio_slabs,
-                                    bio_slab_max * sizeof(struct bio_slab),
-                                    GFP_KERNEL);
-               if (!bio_slabs)
+               new_bio_slabs = krealloc(bio_slabs,
+                                        bio_slab_max * sizeof(struct bio_slab),
+                                        GFP_KERNEL);
+               if (!new_bio_slabs)
                        goto out_unlock;
+               bio_slabs = new_bio_slabs;
        }
        if (entry == -1)
                entry = bio_slab_nr++;
index 1e519195d45bd1ab466f8b7ee12505f35a45d271..38e721b35d45388cb0febed8021a02277a4a2f1b 100644 (file)
@@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
+       struct blk_plug plug;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
+       blk_start_plug(&plug);
        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
@@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                if (err < 0 && ret > 0)
                        ret = err;
        }
+       blk_finish_plug(&plug);
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkdev_aio_write);
index a256f3b2a845f22ef1c0c8dd1310364a96b651f5..ff6475f409d64aaade5499f9d0cee64c2989a80c 100644 (file)
@@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
        ret = extent_from_logical(fs_info, logical, path,
                                        &found_key);
        btrfs_release_path(path);
-       if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
-               ret = -EINVAL;
        if (ret < 0)
                return ret;
+       if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+               return -EINVAL;
 
        extent_item_pos = logical - found_key.objectid;
        ret = iterate_extent_inodes(fs_info, found_key.objectid,
index 86eff48dab786d525d3395d31dcf3f45a11dafc1..43d1c5a3a030888544d8dd8f7b36239386552d08 100644 (file)
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace)
        btrfs_compress_op[idx]->free_workspace(workspace);
        atomic_dec(alloc_workspace);
 wake:
+       smp_mb();
        if (waitqueue_active(workspace_wait))
                wake_up(workspace_wait);
 }
index 9d7621f271ff1e5b3f2b57931d96dad265678d7d..6d183f60d63a0521e8c4461e4d5fd705163d17d1 100644 (file)
@@ -420,12 +420,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
        }
        spin_unlock(&fs_info->tree_mod_seq_lock);
 
-       /*
-        * we removed the lowest blocker from the blocker list, so there may be
-        * more processible delayed refs.
-        */
-       wake_up(&fs_info->tree_mod_seq_wait);
-
        /*
         * anything that's lower than the lowest existing (read: blocked)
         * sequence number can be removed from the tree.
@@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
        u32 nritems;
        int ret;
 
+       if (btrfs_header_level(eb) == 0)
+               return;
+
        nritems = btrfs_header_nritems(eb);
        for (i = nritems - 1; i >= 0; i--) {
                ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
index 4bab807227ad938c87039ea2aa1ef8f94fe46146..0d195b5076604b4350f88de9a58e9f4836fd785a 100644 (file)
@@ -1252,7 +1252,6 @@ struct btrfs_fs_info {
        atomic_t tree_mod_seq;
        struct list_head tree_mod_seq_list;
        struct seq_list tree_mod_seq_elem;
-       wait_queue_head_t tree_mod_seq_wait;
 
        /* this protects tree_mod_log */
        rwlock_t tree_mod_log_lock;
@@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
                          struct bio *bio, u32 *dst);
 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
-                             struct bio *bio, u64 logical_offset, u32 *dst);
+                             struct bio *bio, u64 logical_offset);
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             u64 objectid, u64 pos,
index 335605c8ceab730394d45d9975adaa7025203dd7..07d5eeb1e6f1df1f8ae2ddf94218b1f45298aeda 100644 (file)
@@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 
        rb_erase(&delayed_item->rb_node, root);
        delayed_item->delayed_node->count--;
-       atomic_dec(&delayed_root->items);
-       if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+       if (atomic_dec_return(&delayed_root->items) <
+           BTRFS_DELAYED_BACKGROUND &&
            waitqueue_active(&delayed_root->wait))
                wake_up(&delayed_root->wait);
 }
@@ -1028,9 +1028,10 @@ do_again:
                btrfs_release_delayed_item(prev);
                ret = 0;
                btrfs_release_path(path);
-               if (curr)
+               if (curr) {
+                       mutex_unlock(&node->mutex);
                        goto do_again;
-               else
+               else
                        goto delete_fail;
        }
 
@@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
                delayed_node->count--;
 
                delayed_root = delayed_node->root->fs_info->delayed_root;
-               atomic_dec(&delayed_root->items);
-               if (atomic_read(&delayed_root->items) <
+               if (atomic_dec_return(&delayed_root->items) <
                    BTRFS_DELAYED_BACKGROUND &&
                    waitqueue_active(&delayed_root->wait))
                        wake_up(&delayed_root->wait);
index da7419ed01bb7e520cfbcac0fbef44ef607a92e5..ae94117733973e2d7aa8ea590c171fcf2e6bd26a 100644 (file)
 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
                          struct btrfs_delayed_tree_ref *ref1)
 {
-       if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
-               if (ref1->root < ref2->root)
-                       return -1;
-               if (ref1->root > ref2->root)
-                       return 1;
-       } else {
-               if (ref1->parent < ref2->parent)
-                       return -1;
-               if (ref1->parent > ref2->parent)
-                       return 1;
-       }
+       if (ref1->root < ref2->root)
+               return -1;
+       if (ref1->root > ref2->root)
+               return 1;
+       if (ref1->parent < ref2->parent)
+               return -1;
+       if (ref1->parent > ref2->parent)
+               return 1;
        return 0;
 }
 
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  * type of the delayed backrefs and content of delayed backrefs.
  */
 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
-                     struct btrfs_delayed_ref_node *ref1)
+                     struct btrfs_delayed_ref_node *ref1,
+                     bool compare_seq)
 {
        if (ref1->bytenr < ref2->bytenr)
                return -1;
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
        if (ref1->type > ref2->type)
                return 1;
        /* merging of sequenced refs is not allowed */
-       if (ref1->seq < ref2->seq)
-               return -1;
-       if (ref1->seq > ref2->seq)
-               return 1;
+       if (compare_seq) {
+               if (ref1->seq < ref2->seq)
+                       return -1;
+               if (ref1->seq > ref2->seq)
+                       return 1;
+       }
        if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
            ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
                return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
                entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
                                 rb_node);
 
-               cmp = comp_entry(entry, ins);
+               cmp = comp_entry(entry, ins, 1);
                if (cmp < 0)
                        p = &(*p)->rb_left;
                else if (cmp > 0)
@@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
+                                   struct btrfs_delayed_ref_root *delayed_refs,
+                                   struct btrfs_delayed_ref_node *ref)
+{
+       rb_erase(&ref->rb_node, &delayed_refs->root);
+       ref->in_tree = 0;
+       btrfs_put_delayed_ref(ref);
+       delayed_refs->num_entries--;
+       if (trans->delayed_ref_updates)
+               trans->delayed_ref_updates--;
+}
+
+static int merge_ref(struct btrfs_trans_handle *trans,
+                    struct btrfs_delayed_ref_root *delayed_refs,
+                    struct btrfs_delayed_ref_node *ref, u64 seq)
+{
+       struct rb_node *node;
+       int merged = 0;
+       int mod = 0;
+       int done = 0;
+
+       node = rb_prev(&ref->rb_node);
+       while (node) {
+               struct btrfs_delayed_ref_node *next;
+
+               next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+               node = rb_prev(node);
+               if (next->bytenr != ref->bytenr)
+                       break;
+               if (seq && next->seq >= seq)
+                       break;
+               if (comp_entry(ref, next, 0))
+                       continue;
+
+               if (ref->action == next->action) {
+                       mod = next->ref_mod;
+               } else {
+                       if (ref->ref_mod < next->ref_mod) {
+                               struct btrfs_delayed_ref_node *tmp;
+
+                               tmp = ref;
+                               ref = next;
+                               next = tmp;
+                               done = 1;
+                       }
+                       mod = -next->ref_mod;
+               }
+
+               merged++;
+               drop_delayed_ref(trans, delayed_refs, next);
+               ref->ref_mod += mod;
+               if (ref->ref_mod == 0) {
+                       drop_delayed_ref(trans, delayed_refs, ref);
+                       break;
+               } else {
+                       /*
+                        * You can't have multiples of the same ref on a tree
+                        * block.
+                        */
+                       WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+                               ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+               }
+
+               if (done)
+                       break;
+               node = rb_prev(&ref->rb_node);
+       }
+
+       return merged;
+}
+
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+                             struct btrfs_fs_info *fs_info,
+                             struct btrfs_delayed_ref_root *delayed_refs,
+                             struct btrfs_delayed_ref_head *head)
+{
+       struct rb_node *node;
+       u64 seq = 0;
+
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       if (!list_empty(&fs_info->tree_mod_seq_list)) {
+               struct seq_list *elem;
+
+               elem = list_first_entry(&fs_info->tree_mod_seq_list,
+                                       struct seq_list, list);
+               seq = elem->seq;
+       }
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+
+       node = rb_prev(&head->node.rb_node);
+       while (node) {
+               struct btrfs_delayed_ref_node *ref;
+
+               ref = rb_entry(node, struct btrfs_delayed_ref_node,
+                              rb_node);
+               if (ref->bytenr != head->node.bytenr)
+                       break;
+
+               /* We can't merge refs that are outside of our seq count */
+               if (seq && ref->seq >= seq)
+                       break;
+               if (merge_ref(trans, delayed_refs, ref, seq))
+                       node = rb_prev(&head->node.rb_node);
+               else
+                       node = rb_prev(node);
+       }
+}
+
 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
                            struct btrfs_delayed_ref_root *delayed_refs,
                            u64 seq)
@@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
                 * every changing the extent allocation tree.
                 */
                existing->ref_mod--;
-               if (existing->ref_mod == 0) {
-                       rb_erase(&existing->rb_node,
-                                &delayed_refs->root);
-                       existing->in_tree = 0;
-                       btrfs_put_delayed_ref(existing);
-                       delayed_refs->num_entries--;
-                       if (trans->delayed_ref_updates)
-                               trans->delayed_ref_updates--;
-               } else {
+               if (existing->ref_mod == 0)
+                       drop_delayed_ref(trans, delayed_refs, existing);
+               else
                        WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
                                existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
-               }
        } else {
                WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
                        existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, level, action,
                                   for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
-           waitqueue_active(&fs_info->tree_mod_seq_wait))
-               wake_up(&fs_info->tree_mod_seq_wait);
        spin_unlock(&delayed_refs->lock);
        if (need_ref_seq(for_cow, ref_root))
                btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, owner, offset,
                                   action, for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
-           waitqueue_active(&fs_info->tree_mod_seq_wait))
-               wake_up(&fs_info->tree_mod_seq_wait);
        spin_unlock(&delayed_refs->lock);
        if (need_ref_seq(for_cow, ref_root))
                btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
                                   extent_op->is_data);
 
-       if (waitqueue_active(&fs_info->tree_mod_seq_wait))
-               wake_up(&fs_info->tree_mod_seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
index 0d7c90c366b629152796c0c717c0e4cd5f50e9b1..ab5300595847e20602c307620a3a5059ca258b06 100644 (file)
@@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op);
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+                             struct btrfs_fs_info *fs_info,
+                             struct btrfs_delayed_ref_root *delayed_refs,
+                             struct btrfs_delayed_ref_head *head);
 
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
index 62e0cafd6e250d5d1717656d3d5821e2d1bfec42..22e98e04c2eabbc0b4baeb2618033657a9344fb9 100644 (file)
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
                ret = read_extent_buffer_pages(io_tree, eb, start,
                                               WAIT_COMPLETE,
                                               btree_get_extent, mirror_num);
-               if (!ret && !verify_parent_transid(io_tree, eb,
+               if (!ret) {
+                       if (!verify_parent_transid(io_tree, eb,
                                                   parent_transid, 0))
-                       break;
+                               break;
+                       else
+                               ret = -EIO;
+               }
 
                /*
                 * This buffer's crc is fine, but its contents are corrupted, so
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work)
        limit = btrfs_async_submit_limit(fs_info);
        limit = limit * 2 / 3;
 
-       atomic_dec(&fs_info->nr_async_submits);
-
-       if (atomic_read(&fs_info->nr_async_submits) < limit &&
+       if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
            waitqueue_active(&fs_info->async_submit_wait))
                wake_up(&fs_info->async_submit_wait);
 
@@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb,
        fs_info->free_chunk_space = 0;
        fs_info->tree_mod_log = RB_ROOT;
 
-       init_waitqueue_head(&fs_info->tree_mod_seq_wait);
-
        /* readahead state */
        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
        spin_lock_init(&fs_info->reada_lock);
@@ -2528,8 +2528,7 @@ retry_root_backup:
                goto fail_trans_kthread;
 
        /* do not make disk changes in broken FS */
-       if (btrfs_super_log_root(disk_super) != 0 &&
-           !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
+       if (btrfs_super_log_root(disk_super) != 0) {
                u64 bytenr = btrfs_super_log_root(disk_super);
 
                if (fs_devices->rw_devices == 0) {
@@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root)
        /* clear out the rbtree of defraggable inodes */
        btrfs_run_defrag_inodes(fs_info);
 
-       /*
-        * Here come 2 situations when btrfs is broken to flip readonly:
-        *
-        * 1. when btrfs flips readonly somewhere else before
-        * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
-        * and btrfs will skip to write sb directly to keep
-        * ERROR state on disk.
-        *
-        * 2. when btrfs flips readonly just in btrfs_commit_super,
-        * and in such case, btrfs cannot write sb via btrfs_commit_super,
-        * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
-        * btrfs will cleanup all FS resources first and write sb then.
-        */
        if (!(fs_info->sb->s_flags & MS_RDONLY)) {
                ret = btrfs_commit_super(root);
                if (ret)
                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
        }
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-               ret = btrfs_error_commit_super(root);
-               if (ret)
-                       printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
-       }
+       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+               btrfs_error_commit_super(root);
 
        btrfs_put_block_group_cache(fs_info);
 
@@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
        if (read_only)
                return 0;
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-               printk(KERN_WARNING "warning: mount fs with errors, "
-                      "running btrfsck is recommended\n");
-       }
-
        return 0;
 }
 
-int btrfs_error_commit_super(struct btrfs_root *root)
+void btrfs_error_commit_super(struct btrfs_root *root)
 {
-       int ret;
-
        mutex_lock(&root->fs_info->cleaner_mutex);
        btrfs_run_delayed_iputs(root);
        mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root)
 
        /* cleanup FS via transaction */
        btrfs_cleanup_transaction(root);
-
-       ret = write_ctree_super(NULL, root, 0);
-
-       return ret;
 }
 
 static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
@@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
                /* FIXME: cleanup wait for commit */
                t->in_commit = 1;
                t->blocked = 1;
+               smp_mb();
                if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
                        wake_up(&root->fs_info->transaction_blocked_wait);
 
                t->blocked = 0;
+               smp_mb();
                if (waitqueue_active(&root->fs_info->transaction_wait))
                        wake_up(&root->fs_info->transaction_wait);
 
                t->commit_done = 1;
+               smp_mb();
                if (waitqueue_active(&t->commit_wait))
                        wake_up(&t->commit_wait);
 
index 95e147eea23952c689ee9e87cfd39e7d61fde96b..c5b00a735fefac258b7914223139d902efacbf99 100644 (file)
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, int max_mirrors);
 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
 int btrfs_commit_super(struct btrfs_root *root);
-int btrfs_error_commit_super(struct btrfs_root *root);
+void btrfs_error_commit_super(struct btrfs_root *root);
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize);
 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
index 4e1b153b7c47c99e00adffd8ee6d24e2db29093d..ba58024d40d3eaf486c50d96cd116c3b828b9c75 100644 (file)
@@ -2251,6 +2251,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                        }
                }
 
+               /*
+                * We need to try and merge add/drops of the same ref since we
+                * can run into issues with relocate dropping the implicit ref
+                * and then it being added back again before the drop can
+                * finish.  If we merged anything we need to re-loop so we can
+                * get a good ref.
+                */
+               btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
+                                        locked_ref);
+
                /*
                 * locked_ref is the head node, so we have to go one
                 * node back for any delayed ref updates
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-               /*
-                * we modified num_entries, but as we're currently running
-                * delayed refs, skip
-                *     wake_up(&delayed_refs->seq_wait);
-                * here.
-                */
+               if (locked_ref) {
+                       /*
+                        * when we play the delayed ref, also correct the
+                        * ref_mod on head
+                        */
+                       switch (ref->action) {
+                       case BTRFS_ADD_DELAYED_REF:
+                       case BTRFS_ADD_DELAYED_EXTENT:
+                               locked_ref->node.ref_mod -= ref->ref_mod;
+                               break;
+                       case BTRFS_DROP_DELAYED_REF:
+                               locked_ref->node.ref_mod += ref->ref_mod;
+                               break;
+                       default:
+                               WARN_ON(1);
+                       }
+               }
                spin_unlock(&delayed_refs->lock);
 
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2350,22 +2371,6 @@ next:
        return count;
 }
 
-static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
-                              struct btrfs_delayed_ref_root *delayed_refs,
-                              unsigned long num_refs,
-                              struct list_head *first_seq)
-{
-       spin_unlock(&delayed_refs->lock);
-       pr_debug("waiting for more refs (num %ld, first %p)\n",
-                num_refs, first_seq);
-       wait_event(fs_info->tree_mod_seq_wait,
-                  num_refs != delayed_refs->num_entries ||
-                  fs_info->tree_mod_seq_list.next != first_seq);
-       pr_debug("done waiting for more refs (num %ld, first %p)\n",
-                delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
-       spin_lock(&delayed_refs->lock);
-}
-
 #ifdef SCRAMBLE_DELAYED_REFS
 /*
  * Normally delayed refs get processed in ascending bytenr order. This
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
-       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
        int run_most = 0;
-       unsigned long num_refs = 0;
-       int consider_waiting;
+       int loops;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        delayed_refs = &trans->transaction->delayed_refs;
        INIT_LIST_HEAD(&cluster);
 again:
-       consider_waiting = 0;
+       loops = 0;
        spin_lock(&delayed_refs->lock);
 
 #ifdef SCRAMBLE_DELAYED_REFS
@@ -2512,31 +2515,6 @@ again:
                if (ret)
                        break;
 
-               if (delayed_start >= delayed_refs->run_delayed_start) {
-                       if (consider_waiting == 0) {
-                               /*
-                                * btrfs_find_ref_cluster looped. let's do one
-                                * more cycle. if we don't run any delayed ref
-                                * during that cycle (because we can't because
-                                * all of them are blocked) and if the number of
-                                * refs doesn't change, we avoid busy waiting.
-                                */
-                               consider_waiting = 1;
-                               num_refs = delayed_refs->num_entries;
-                               first_seq = root->fs_info->tree_mod_seq_list.next;
-                       } else {
-                               wait_for_more_refs(root->fs_info, delayed_refs,
-                                                  num_refs, first_seq);
-                               /*
-                                * after waiting, things have changed. we
-                                * dropped the lock and someone else might have
-                                * run some refs, built new clusters and so on.
-                                * therefore, we restart staleness detection.
-                                */
-                               consider_waiting = 0;
-                       }
-               }
-
                ret = run_clustered_refs(trans, root, &cluster);
                if (ret < 0) {
                        spin_unlock(&delayed_refs->lock);
@@ -2549,9 +2527,26 @@ again:
                if (count == 0)
                        break;
 
-               if (ret || delayed_refs->run_delayed_start == 0) {
+               if (delayed_start >= delayed_refs->run_delayed_start) {
+                       if (loops == 0) {
+                               /*
+                                * btrfs_find_ref_cluster looped. let's do one
+                                * more cycle. if we don't run any delayed ref
+                                * during that cycle (because we can't because
+                                * all of them are blocked), bail out.
+                                */
+                               loops = 1;
+                       } else {
+                               /*
+                                * no runnable refs left, stop trying
+                                */
+                               BUG_ON(run_all);
+                               break;
+                       }
+               }
+               if (ret) {
                        /* refs were run, let's reset staleness detection */
-                       consider_waiting = 0;
+                       loops = 0;
                }
        }
 
@@ -3007,17 +3002,16 @@ again:
        }
        spin_unlock(&block_group->lock);
 
-       num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
+       /*
+        * Try to preallocate enough space based on how big the block group is.
+        * Keep in mind this has to include any pinned space which could end up
+        * taking up quite a bit since it's not folded into the other space
+        * cache.
+        */
+       num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
        if (!num_pages)
                num_pages = 1;
 
-       /*
-        * Just to make absolutely sure we have enough space, we're going to
-        * preallocate 12 pages worth of space for each block group.  In
-        * practice we ought to use at most 8, but we need extra space so we can
-        * add our header and have a terminator between the extents and the
-        * bitmaps.
-        */
        num_pages *= 16;
        num_pages *= PAGE_CACHE_SIZE;
 
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        if (root->fs_info->quota_enabled) {
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
-               if (ret)
+               if (ret) {
+                       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
                        return ret;
+               }
        }
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        rb_erase(&head->node.rb_node, &delayed_refs->root);
 
        delayed_refs->num_entries--;
-       smp_mb();
-       if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
-               wake_up(&root->fs_info->tree_mod_seq_wait);
 
        /*
         * we don't take a ref on the node because we're removing it from the
index 45c81bb4ac820323c7fd4282a0cf71f7cc2d190d..4c878476bb91ce0985dabc25464622442aaca54a 100644 (file)
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
-                       if (ret) {
-                               /* no IO indicated but software detected errors
-                                * in the block, either checksum errors or
-                                * issues with the contents */
-                               struct btrfs_root *root =
-                                       BTRFS_I(page->mapping->host)->root;
-                               struct btrfs_device *device;
-
+                       if (ret)
                                uptodate = 0;
-                               device = btrfs_find_device_for_logical(
-                                               root, start, mirror);
-                               if (device)
-                                       btrfs_dev_stat_inc_and_print(device,
-                                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
-                       } else {
+                       else
                                clean_io_failure(start, page);
-                       }
                }
 
                if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
index b45b9de0c21d0f773f19f45ee10ae9803fa36f3a..857d93cd01dc579eb46838624349442fef07cf00 100644 (file)
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
 }
 
 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
-                             struct bio *bio, u64 offset, u32 *dst)
+                             struct bio *bio, u64 offset)
 {
-       return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1);
+       return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
 }
 
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
index 6e8f416773d4b221713c30079eb1de1f732e5ded..ec154f95464696cfa7df3cd6ab87ac4a0185ed03 100644 (file)
@@ -1008,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
        nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
                PAGE_CACHE_SHIFT;
 
-       atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
-
-       if (atomic_read(&root->fs_info->async_delalloc_pages) <
+       if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
            5 * 1024 * 1024 &&
            waitqueue_active(&root->fs_info->async_submit_wait))
                wake_up(&root->fs_info->async_submit_wait);
@@ -1885,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                                trans = btrfs_join_transaction_nolock(root);
                        else
                                trans = btrfs_join_transaction(root);
-                       if (IS_ERR(trans))
-                               return PTR_ERR(trans);
+                       if (IS_ERR(trans)) {
+                               ret = PTR_ERR(trans);
+                               trans = NULL;
+                               goto out;
+                       }
                        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                        ret = btrfs_update_inode_fallback(trans, root, inode);
                        if (ret) /* -ENOMEM or corruption */
@@ -3174,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-       ret = btrfs_update_inode(trans, root, dir);
+       ret = btrfs_update_inode_fallback(trans, root, dir);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
 out:
@@ -5774,18 +5775,112 @@ out:
        return ret;
 }
 
+static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+                             struct extent_state **cached_state, int writing)
+{
+       struct btrfs_ordered_extent *ordered;
+       int ret = 0;
+
+       while (1) {
+               lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                0, cached_state);
+               /*
+                * We're concerned with the entire range that we're going to be
+                * doing DIO to, so we need to make sure theres no ordered
+                * extents in this range.
+                */
+               ordered = btrfs_lookup_ordered_range(inode, lockstart,
+                                                    lockend - lockstart + 1);
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   *cached_state)))
+                       break;
+
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                    cached_state, GFP_NOFS);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(inode->i_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               break;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(inode->i_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret)
+                               break;
+               }
+
+               cond_resched();
+       }
+
+       return ret;
+}
+
 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                                   struct buffer_head *bh_result, int create)
 {
        struct extent_map *em;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct extent_state *cached_state = NULL;
        u64 start = iblock << inode->i_blkbits;
+       u64 lockstart, lockend;
        u64 len = bh_result->b_size;
        struct btrfs_trans_handle *trans;
+       int unlock_bits = EXTENT_LOCKED;
+       int ret;
+
+       if (create) {
+               ret = btrfs_delalloc_reserve_space(inode, len);
+               if (ret)
+                       return ret;
+               unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
+       } else {
+               len = min_t(u64, len, root->sectorsize);
+       }
+
+       lockstart = start;
+       lockend = start + len - 1;
+
+       /*
+        * If this errors out it's because we couldn't invalidate pagecache for
+        * this range and we need to fallback to buffered.
+        */
+       if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
+               return -ENOTBLK;
+
+       if (create) {
+               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                    lockend, EXTENT_DELALLOC, NULL,
+                                    &cached_state, GFP_NOFS);
+               if (ret)
+                       goto unlock_err;
+       }
 
        em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
-       if (IS_ERR(em))
-               return PTR_ERR(em);
+       if (IS_ERR(em)) {
+               ret = PTR_ERR(em);
+               goto unlock_err;
+       }
 
        /*
         * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
@@ -5804,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
            em->block_start == EXTENT_MAP_INLINE) {
                free_extent_map(em);
-               return -ENOTBLK;
+               ret = -ENOTBLK;
+               goto unlock_err;
        }
 
        /* Just a good old fashioned hole, return */
        if (!create && (em->block_start == EXTENT_MAP_HOLE ||
                        test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
                free_extent_map(em);
-               /* DIO will do one hole at a time, so just unlock a sector */
-               unlock_extent(&BTRFS_I(inode)->io_tree, start,
-                             start + root->sectorsize - 1);
-               return 0;
+               ret = 0;
+               goto unlock_err;
        }
 
        /*
@@ -5827,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
         *
         */
        if (!create) {
-               len = em->len - (start - em->start);
-               goto map;
+               len = min(len, em->len - (start - em->start));
+               lockstart = start + len;
+               goto unlock;
        }
 
        if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
@@ -5860,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                        btrfs_end_transaction(trans, root);
                        if (ret) {
                                free_extent_map(em);
-                               return ret;
+                               goto unlock_err;
                        }
                        goto unlock;
                }
@@ -5873,14 +5968,12 @@ must_cow:
         */
        len = bh_result->b_size;
        em = btrfs_new_extent_direct(inode, em, start, len);
-       if (IS_ERR(em))
-               return PTR_ERR(em);
+       if (IS_ERR(em)) {
+               ret = PTR_ERR(em);
+               goto unlock_err;
+       }
        len = min(len, em->len - (start - em->start));
 unlock:
-       clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
-                         EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
-                         0, NULL, GFP_NOFS);
-map:
        bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
                inode->i_blkbits;
        bh_result->b_size = len;
@@ -5898,9 +5991,44 @@ map:
                        i_size_write(inode, start + len);
        }
 
+       /*
+        * In the case of write we need to clear and unlock the entire range,
+        * in the case of read we need to unlock only the end area that we
+        * aren't using if there is any left over space.
+        */
+       if (lockstart < lockend) {
+               if (create && len < lockend - lockstart) {
+                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                        lockstart + len - 1, unlock_bits, 1, 0,
+                                        &cached_state, GFP_NOFS);
+                       /*
+                        * Beside unlock, we also need to cleanup reserved space
+                        * for the left range by attaching EXTENT_DO_ACCOUNTING.
+                        */
+                       clear_extent_bit(&BTRFS_I(inode)->io_tree,
+                                        lockstart + len, lockend,
+                                        unlock_bits | EXTENT_DO_ACCOUNTING,
+                                        1, 0, NULL, GFP_NOFS);
+               } else {
+                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                        lockend, unlock_bits, 1, 0,
+                                        &cached_state, GFP_NOFS);
+               }
+       } else {
+               free_extent_state(cached_state);
+       }
+
        free_extent_map(em);
 
        return 0;
+
+unlock_err:
+       if (create)
+               unlock_bits |= EXTENT_DO_ACCOUNTING;
+
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                        unlock_bits, 1, 0, &cached_state, GFP_NOFS);
+       return ret;
 }
 
 struct btrfs_dio_private {
@@ -5908,7 +6036,6 @@ struct btrfs_dio_private {
        u64 logical_offset;
        u64 disk_bytenr;
        u64 bytes;
-       u32 *csums;
        void *private;
 
        /* number of bios pending for this dio */
@@ -5928,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 start;
-       u32 *private = dip->csums;
 
        start = dip->logical_offset;
        do {
@@ -5936,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        struct page *page = bvec->bv_page;
                        char *kaddr;
                        u32 csum = ~(u32)0;
+                       u64 private = ~(u32)0;
                        unsigned long flags;
 
+                       if (get_state_private(&BTRFS_I(inode)->io_tree,
+                                             start, &private))
+                               goto failed;
                        local_irq_save(flags);
                        kaddr = kmap_atomic(page);
                        csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
@@ -5947,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        local_irq_restore(flags);
 
                        flush_dcache_page(bvec->bv_page);
-                       if (csum != *private) {
+                       if (csum != private) {
+failed:
                                printk(KERN_ERR "btrfs csum failed ino %llu off"
                                      " %llu csum %u private %u\n",
                                      (unsigned long long)btrfs_ino(inode),
                                      (unsigned long long)start,
-                                     csum, *private);
+                                     csum, (unsigned)private);
                                err = -EIO;
                        }
                }
 
                start += bvec->bv_len;
-               private++;
                bvec++;
        } while (bvec <= bvec_end);
 
@@ -5966,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                      dip->logical_offset + dip->bytes - 1);
        bio->bi_private = dip->private;
 
-       kfree(dip->csums);
        kfree(dip);
 
        /* If we had a csum failure make sure to clear the uptodate flag */
@@ -6072,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 
 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                                         int rw, u64 file_offset, int skip_sum,
-                                        u32 *csums, int async_submit)
+                                        int async_submit)
 {
        int write = rw & REQ_WRITE;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -6105,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                if (ret)
                        goto err;
        } else if (!skip_sum) {
-               ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
-                                         file_offset, csums);
+               ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
                if (ret)
                        goto err;
        }
@@ -6132,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        u64 submit_len = 0;
        u64 map_length;
        int nr_pages = 0;
-       u32 *csums = dip->csums;
        int ret = 0;
        int async_submit = 0;
-       int write = rw & REQ_WRITE;
 
        map_length = orig_bio->bi_size;
        ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -6171,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        atomic_inc(&dip->pending_bios);
                        ret = __btrfs_submit_dio_bio(bio, inode, rw,
                                                     file_offset, skip_sum,
-                                                    csums, async_submit);
+                                                    async_submit);
                        if (ret) {
                                bio_put(bio);
                                atomic_dec(&dip->pending_bios);
                                goto out_err;
                        }
 
-                       /* Write's use the ordered csums */
-                       if (!write && !skip_sum)
-                               csums = csums + nr_pages;
                        start_sector += submit_len >> 9;
                        file_offset += submit_len;
 
@@ -6210,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
 
 submit:
        ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
-                                    csums, async_submit);
+                                    async_submit);
        if (!ret)
                return 0;
 
@@ -6246,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
                ret = -ENOMEM;
                goto free_ordered;
        }
-       dip->csums = NULL;
-
-       /* Write's use the ordered csum stuff, so we don't need dip->csums */
-       if (!write && !skip_sum) {
-               dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
-               if (!dip->csums) {
-                       kfree(dip);
-                       ret = -ENOMEM;
-                       goto free_ordered;
-               }
-       }
 
        dip->private = bio->bi_private;
        dip->inode = inode;
@@ -6341,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
 out:
        return retval;
 }
+
 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                        const struct iovec *iov, loff_t offset,
                        unsigned long nr_segs)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
-       struct btrfs_ordered_extent *ordered;
-       struct extent_state *cached_state = NULL;
-       u64 lockstart, lockend;
-       ssize_t ret;
-       int writing = rw & WRITE;
-       int write_bits = 0;
-       size_t count = iov_length(iov, nr_segs);
 
        if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
-                           offset, nr_segs)) {
+                           offset, nr_segs))
                return 0;
-       }
-
-       lockstart = offset;
-       lockend = offset + count - 1;
-
-       if (writing) {
-               ret = btrfs_delalloc_reserve_space(inode, count);
-               if (ret)
-                       goto out;
-       }
-
-       while (1) {
-               lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                                0, &cached_state);
-               /*
-                * We're concerned with the entire range that we're going to be
-                * doing DIO to, so we need to make sure theres no ordered
-                * extents in this range.
-                */
-               ordered = btrfs_lookup_ordered_range(inode, lockstart,
-                                                    lockend - lockstart + 1);
-
-               /*
-                * We need to make sure there are no buffered pages in this
-                * range either, we could have raced between the invalidate in
-                * generic_file_direct_write and locking the extent.  The
-                * invalidate needs to happen so that reads after a write do not
-                * get stale data.
-                */
-               if (!ordered && (!writing ||
-                   !test_range_bit(&BTRFS_I(inode)->io_tree,
-                                   lockstart, lockend, EXTENT_UPTODATE, 0,
-                                   cached_state)))
-                       break;
-
-               unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                                    &cached_state, GFP_NOFS);
-
-               if (ordered) {
-                       btrfs_start_ordered_extent(inode, ordered, 1);
-                       btrfs_put_ordered_extent(ordered);
-               } else {
-                       /* Screw you mmap */
-                       ret = filemap_write_and_wait_range(file->f_mapping,
-                                                          lockstart,
-                                                          lockend);
-                       if (ret)
-                               goto out;
-
-                       /*
-                        * If we found a page that couldn't be invalidated just
-                        * fall back to buffered.
-                        */
-                       ret = invalidate_inode_pages2_range(file->f_mapping,
-                                       lockstart >> PAGE_CACHE_SHIFT,
-                                       lockend >> PAGE_CACHE_SHIFT);
-                       if (ret) {
-                               if (ret == -EBUSY)
-                                       ret = 0;
-                               goto out;
-                       }
-               }
-
-               cond_resched();
-       }
 
-       /*
-        * we don't use btrfs_set_extent_delalloc because we don't want
-        * the dirty or uptodate bits
-        */
-       if (writing) {
-               write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
-               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                                    EXTENT_DELALLOC, NULL, &cached_state,
-                                    GFP_NOFS);
-               if (ret) {
-                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                        lockend, EXTENT_LOCKED | write_bits,
-                                        1, 0, &cached_state, GFP_NOFS);
-                       goto out;
-               }
-       }
-
-       free_extent_state(cached_state);
-       cached_state = NULL;
-
-       ret = __blockdev_direct_IO(rw, iocb, inode,
+       return __blockdev_direct_IO(rw, iocb, inode,
                   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
                   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
                   btrfs_submit_direct, 0);
-
-       if (ret < 0 && ret != -EIOCBQUEUED) {
-               clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
-                             offset + iov_length(iov, nr_segs) - 1,
-                             EXTENT_LOCKED | write_bits, 1, 0,
-                             &cached_state, GFP_NOFS);
-       } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
-               /*
-                * We're falling back to buffered, unlock the section we didn't
-                * do IO on.
-                */
-               clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
-                             offset + iov_length(iov, nr_segs) - 1,
-                             EXTENT_LOCKED | write_bits, 1, 0,
-                             &cached_state, GFP_NOFS);
-       }
-out:
-       free_extent_state(cached_state);
-       return ret;
 }
 
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
index 7bb755677a220f71fd0540932c0c19565aec6a23..9df50fa8a0781ba387553297fbe442ed964e671e 100644 (file)
@@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        uuid_le_gen(&new_uuid);
        memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
        root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
-       root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+       root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
        root_item.ctime = root_item.otime;
        btrfs_set_root_ctransid(&root_item, trans->transid);
        btrfs_set_root_otransid(&root_item, trans->transid);
index a44eff0748051b1627448e54f281ca04cd77c719..2a1762c660416c662d32f95060046ba2557ab903 100644 (file)
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
        if (eb->lock_nested) {
                read_lock(&eb->lock);
-               if (&eb->lock_nested && current->pid == eb->lock_owner) {
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
                        read_unlock(&eb->lock);
                        return;
                }
index bc424ae5a81a49ef15cadb0159812f80acf0221f..38b42e7bc91d0e258a7fd086d793aae3d43ac4c1 100644 (file)
@@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
        spin_lock(&fs_info->qgroup_lock);
 
        dstgroup = add_qgroup_rb(fs_info, objectid);
-       if (!dstgroup)
+       if (IS_ERR(dstgroup)) {
+               ret = PTR_ERR(dstgroup);
                goto unlock;
+       }
 
        if (srcid) {
                srcgroup = find_qgroup_rb(fs_info, srcid);
-               if (!srcgroup)
+               if (!srcgroup) {
+                       ret = -EINVAL;
                        goto unlock;
+               }
                dstgroup->rfer = srcgroup->rfer - level_size;
                dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
                srcgroup->excl = level_size;
@@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
                qgroup_dirty(fs_info, srcgroup);
        }
 
-       if (!inherit)
+       if (!inherit) {
+               ret = -EINVAL;
                goto unlock;
+       }
 
        i_qgroups = (u64 *)(inherit + 1);
        for (i = 0; i < inherit->num_qgroups; ++i) {
index 6bb465cca20f5c68804ac3c4f78d4aa650fee96c..10d8e4d88071747651afd3a875eae0fb407c22e4 100644 (file)
@@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
        struct timespec ct = CURRENT_TIME;
 
        spin_lock(&root->root_times_lock);
-       item->ctransid = trans->transid;
+       item->ctransid = cpu_to_le64(trans->transid);
        item->ctime.sec = cpu_to_le64(ct.tv_sec);
-       item->ctime.nsec = cpu_to_le64(ct.tv_nsec);
+       item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
        spin_unlock(&root->root_times_lock);
 }
index f2eb24c477a3ca1c60ee95b51040354dd5a869ba..83d6f9f9c2209861efdec86dec9ad54d629deeb9 100644 (file)
@@ -838,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
        struct btrfs_trans_handle *trans;
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_root *root = fs_info->tree_root;
-       int ret;
 
        trace_btrfs_sync_fs(wait);
 
@@ -849,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
 
        btrfs_wait_ordered_extents(root, 0, 0);
 
-       trans = btrfs_start_transaction(root, 0);
+       spin_lock(&fs_info->trans_lock);
+       if (!fs_info->running_transaction) {
+               spin_unlock(&fs_info->trans_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->trans_lock);
+
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
-       ret = btrfs_commit_transaction(trans, root);
-       return ret;
+       return btrfs_commit_transaction(trans, root);
 }
 
 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1530,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
        while (cur_devices) {
                head = &cur_devices->devices;
                list_for_each_entry(dev, head, dev_list) {
+                       if (dev->missing)
+                               continue;
                        if (!first_dev || dev->devid < first_dev->devid)
                                first_dev = dev;
                }
index 17be3dedacbab1c47084270153ed059719984470..27c26004e050a33211674363cfd80c02c98d1063 100644 (file)
@@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 
        btrfs_i_size_write(parent_inode, parent_inode->i_size +
                                         dentry->d_name.len * 2);
+       parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, parent_root, parent_inode);
        if (ret)
                goto abort_trans_dput;
@@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        memcpy(new_root_item->parent_uuid, root->root_item.uuid,
                        BTRFS_UUID_SIZE);
        new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
-       new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+       new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
        btrfs_set_root_otransid(new_root_item, trans->transid);
        memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
        memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
index e86ae04abe6a78e72dd86b3a813bce3107a8802e..88b969aeeb71a53128ae941e569ad19f7b1038c3 100644 (file)
@@ -227,9 +227,8 @@ loop_lock:
                cur = pending;
                pending = pending->bi_next;
                cur->bi_next = NULL;
-               atomic_dec(&fs_info->nr_async_bios);
 
-               if (atomic_read(&fs_info->nr_async_bios) < limit &&
+               if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
                    waitqueue_active(&fs_info->async_submit_wait))
                        wake_up(&fs_info->async_submit_wait);
 
@@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                memcpy(new_device, device, sizeof(*new_device));
 
                /* Safe because we are under uuid_mutex */
-               name = rcu_string_strdup(device->name->str, GFP_NOFS);
-               BUG_ON(device->name && !name); /* -ENOMEM */
-               rcu_assign_pointer(new_device->name, name);
+               if (device->name) {
+                       name = rcu_string_strdup(device->name->str, GFP_NOFS);
+                       BUG_ON(device->name && !name); /* -ENOMEM */
+                       rcu_assign_pointer(new_device->name, name);
+               }
                new_device->bdev = NULL;
                new_device->writeable = 0;
                new_device->in_fs_metadata = 0;
@@ -4605,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        return ret;
 }
 
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
-                                                  u64 logical, int mirror_num)
-{
-       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
-       int ret;
-       u64 map_length = 0;
-       struct btrfs_bio *bbio = NULL;
-       struct btrfs_device *device;
-
-       BUG_ON(mirror_num == 0);
-       ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
-                             mirror_num);
-       if (ret) {
-               BUG_ON(bbio != NULL);
-               return NULL;
-       }
-       BUG_ON(mirror_num != bbio->mirror_num);
-       device = bbio->stripes[mirror_num - 1].dev;
-       kfree(bbio);
-       return device;
-}
-
 int btrfs_read_chunk_tree(struct btrfs_root *root)
 {
        struct btrfs_path *path;
index 5479325987b3c8af40e0760790d5365fda0efc8d..53c06af92e8da94270dab4f24906b937441d8a8f 100644 (file)
@@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *max_avail);
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
-                                                  u64 logical, int mirror_num);
 void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
 int btrfs_get_dev_stats(struct btrfs_root *root,
index 9f6d2e41281d69752f77d9c68772a46c855a8453..58e2e7b7737264fac2da09e2a71d451497e25e83 100644 (file)
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
 /*
  * Initialise the state of a blockdev page's buffers.
  */ 
-static void
+static sector_t
 init_page_buffers(struct page *page, struct block_device *bdev,
                        sector_t block, int size)
 {
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
                block++;
                bh = bh->b_this_page;
        } while (bh != head);
+
+       /*
+        * Caller needs to validate requested block against end of device.
+        */
+       return end_block;
 }
 
 /*
  * Create the page-cache page that contains the requested block.
  *
- * This is user purely for blockdev mappings.
+ * This is used purely for blockdev mappings.
  */
-static struct page *
+static int
 grow_dev_page(struct block_device *bdev, sector_t block,
-               pgoff_t index, int size)
+               pgoff_t index, int size, int sizebits)
 {
        struct inode *inode = bdev->bd_inode;
        struct page *page;
        struct buffer_head *bh;
+       sector_t end_block;
+       int ret = 0;            /* Will call free_more_memory() */
 
        page = find_or_create_page(inode->i_mapping, index,
                (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
        if (!page)
-               return NULL;
+               return ret;
 
        BUG_ON(!PageLocked(page));
 
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
                if (bh->b_size == size) {
-                       init_page_buffers(page, bdev, block, size);
-                       return page;
+                       end_block = init_page_buffers(page, bdev,
+                                               index << sizebits, size);
+                       goto done;
                }
                if (!try_to_free_buffers(page))
                        goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       init_page_buffers(page, bdev, block, size);
+       end_block = init_page_buffers(page, bdev, index << sizebits, size);
        spin_unlock(&inode->i_mapping->private_lock);
-       return page;
-
+done:
+       ret = (block < end_block) ? 1 : -ENXIO;
 failed:
        unlock_page(page);
        page_cache_release(page);
-       return NULL;
+       return ret;
 }
 
 /*
@@ -999,7 +1007,6 @@ failed:
 static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
 {
-       struct page *page;
        pgoff_t index;
        int sizebits;
 
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
                        bdevname(bdev, b));
                return -EIO;
        }
-       block = index << sizebits;
+
        /* Create a page with the proper size buffers.. */
-       page = grow_dev_page(bdev, block, index, size);
-       if (!page)
-               return 0;
-       unlock_page(page);
-       page_cache_release(page);
-       return 1;
+       return grow_dev_page(bdev, block, index, size, sizebits);
 }
 
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
-       int ret;
-       struct buffer_head *bh;
-
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-retry:
-       bh = __find_get_block(bdev, block, size);
-       if (bh)
-               return bh;
+       for (;;) {
+               struct buffer_head *bh;
+               int ret;
 
-       ret = grow_buffers(bdev, block, size);
-       if (ret == 0) {
-               free_more_memory();
-               goto retry;
-       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
+
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
+                       free_more_memory();
        }
-       return NULL;
 }
 
 /*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
  * which corresponds to the passed block_device, block and size. The
  * returned buffer has its reference count incremented.
  *
- * __getblk() cannot fail - it just keeps trying.  If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block.  Very weird.
- *
  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
  * attempt is failing.  FIXME, perhaps?
  */
index 074923ce593d7d53da6ae010128dffc8c080815b..f0cf934ba877d8549d8631787a4aea8da871b371 100644 (file)
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
                /* result already set, check signature */
                if (server->sec_mode &
                    (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-                       if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
-                                         server, mid->sequence_number + 1))
-                               cERROR(1, "Unexpected SMB signature");
+                       int rc = 0;
+
+                       rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
+                                                  server,
+                                                  mid->sequence_number + 1);
+                       if (rc)
+                               cERROR(1, "SMB signature verification returned "
+                                      "error = %d", rc);
                }
                /* FIXME: should this be counted toward the initiating task? */
                task_io_account_read(rdata->bytes);
index cbe709ad6663485cdcec49dbce9d1156de510c5d..781025be48bc47581c7484e8d55948914d588848 100644 (file)
@@ -356,19 +356,12 @@ cifs_create_get_file_info:
 cifs_create_set_dentry:
        if (rc != 0) {
                cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+               CIFSSMBClose(xid, tcon, *fileHandle);
                goto out;
        }
        d_drop(direntry);
        d_add(direntry, newinode);
 
-       /* ENOENT for create?  How weird... */
-       rc = -ENOENT;
-       if (!newinode) {
-               CIFSSMBClose(xid, tcon, *fileHandle);
-               goto out;
-       }
-       rc = 0;
-
 out:
        kfree(buf);
        kfree(full_path);
index 7354877fa3bd825519ea981b1c6208fd886dff11..cb79c7edecb0f3b2856ce8817c3515e8ad827e59 100644 (file)
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
 {
        struct cifsInodeInfo *cifs_i = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
-       unsigned long oldtime = cifs_i->time;
 
        cifs_revalidate_cache(inode, fattr);
 
+       spin_lock(&inode->i_lock);
        inode->i_atime = fattr->cf_atime;
        inode->i_mtime = fattr->cf_mtime;
        inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        else
                cifs_i->time = jiffies;
 
-       cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
-                oldtime, cifs_i->time);
-
        cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
 
        cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
         * Can't safely change the file size here if the client is writing to
         * it due to potential races.
         */
-       spin_lock(&inode->i_lock);
        if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
                i_size_write(inode, fattr->cf_eof);
 
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb)
 
        if (rc && tcon->ipc) {
                cFYI(1, "ipc connection - fake read inode");
+               spin_lock(&inode->i_lock);
                inode->i_mode |= S_IFDIR;
                set_nlink(inode, 2);
                inode->i_op = &cifs_ipc_inode_ops;
                inode->i_fop = &simple_dir_operations;
                inode->i_uid = cifs_sb->mnt_uid;
                inode->i_gid = cifs_sb->mnt_gid;
+               spin_unlock(&inode->i_lock);
        } else if (rc) {
                iget_failed(inode);
                inode = ERR_PTR(rc);
@@ -1110,6 +1108,15 @@ undo_setattr:
        goto out_close;
 }
 
+/* copied from fs/nfs/dir.c with small changes */
+static void
+cifs_drop_nlink(struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       if (inode->i_nlink > 0)
+               drop_nlink(inode);
+       spin_unlock(&inode->i_lock);
+}
 
 /*
  * If dentry->d_inode is null (usually meaning the cached dentry
@@ -1166,13 +1173,13 @@ retry_std_delete:
 psx_del_no_retry:
        if (!rc) {
                if (inode)
-                       drop_nlink(inode);
+                       cifs_drop_nlink(inode);
        } else if (rc == -ENOENT) {
                d_drop(dentry);
        } else if (rc == -ETXTBSY) {
                rc = cifs_rename_pending_delete(full_path, dentry, xid);
                if (rc == 0)
-                       drop_nlink(inode);
+                       cifs_drop_nlink(inode);
        } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
                attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
                if (attrs == NULL) {
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
         * setting nlink not necessary except in cases where we failed to get it
         * from the server or was set bogus
         */
+       spin_lock(&dentry->d_inode->i_lock);
        if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
                set_nlink(dentry->d_inode, 2);
-
+       spin_unlock(&dentry->d_inode->i_lock);
        mode &= ~current_umask();
        /* must turn on setgid bit if parent dir has it */
        if (inode->i_mode & S_ISGID)
index 09e4b3ae45640e3d0c1bd8757cc6da935fca65f5..e6ce3b1128756be4496494b2623fab27fa0d165f 100644 (file)
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
        if (old_file->d_inode) {
                cifsInode = CIFS_I(old_file->d_inode);
                if (rc == 0) {
+                       spin_lock(&old_file->d_inode->i_lock);
                        inc_nlink(old_file->d_inode);
+                       spin_unlock(&old_file->d_inode->i_lock);
 /* BB should we make this contingent on superblock flag NOATIME? */
 /*                     old_file->d_inode->i_ctime = CURRENT_TIME;*/
                        /* parent dir timestamps will update from srv
index a4ff5d547554d174466bc48eb7c8fccb48c8f668..e4d3b99641673670b681ca41e443d3a3083359c1 100644 (file)
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
                        cERROR(1, "Bad protocol string signature header %x",
                                  *(unsigned int *) hdr->ProtocolId);
                if (mid != hdr->MessageId)
-                       cERROR(1, "Mids do not match");
+                       cERROR(1, "Mids do not match: %llu and %llu", mid,
+                                 hdr->MessageId);
        }
        cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
        return 1;
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length)
         * ie Validate the wct via smb2_struct_sizes table above
         */
 
-       if (length < 2 + sizeof(struct smb2_hdr)) {
+       if (length < sizeof(struct smb2_pdu)) {
                if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
                        pdu->StructureSize2 = 0;
                        /*
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length)
                return 1;
        }
        if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
-               cERROR(1, "SMB length greater than maximum, mid=%lld", mid);
+               cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
                return 1;
        }
 
        if (check_smb2_hdr(hdr, mid))
                return 1;
 
-       if (hdr->StructureSize != SMB2_HEADER_SIZE) {
-               cERROR(1, "Illegal structure size %d",
+       if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+               cERROR(1, "Illegal structure size %u",
                          le16_to_cpu(hdr->StructureSize));
                return 1;
        }
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length)
        if (4 + len != clc_len) {
                cFYI(1, "Calculated size %u length %u mismatch mid %llu",
                        clc_len, 4 + len, mid);
-               if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */
-                       return 0; /* BB workaround Samba 3 bug SessSetup rsp */
+               /* server can return one byte more */
+               if (clc_len == 4 + len + 1)
+                       return 0;
                return 1;
        }
        return 0;
index f37a1b41b402b76e9f5ce7ae155e7bd5bb3a08a3..c5fbfac5d576ef917f8cf8e484dbb801523fd35c 100644 (file)
 
 #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
 
-#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64)
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
-
 /*
  * SMB2 Header Definition
  *
@@ -99,6 +95,9 @@
  * "PDU" :  "Protocol Data Unit" (ie a network "frame")
  *
  */
+
+#define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64)
+
 struct smb2_hdr {
        __be32 smb2_buf_length; /* big endian on wire */
                                /* length is only two or three bytes - with
@@ -140,6 +139,9 @@ struct smb2_pdu {
  *  command code name for the struct. Note that structures must be packed.
  *
  */
+
+#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
+
 struct smb2_err_rsp {
        struct smb2_hdr hdr;
        __le16 StructureSize;
index 83867ef348dfe15276d83379f12ae5c46399f7a0..d9b639b95fa8b8a5167ecf58fa3285f90ff2520f 100644 (file)
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
        /* convert the length into a more usable form */
        if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                struct kvec iov;
+               int rc = 0;
 
                iov.iov_base = mid->resp_buf;
                iov.iov_len = len;
                /* FIXME: add code to kill session */
-               if (cifs_verify_signature(&iov, 1, server,
-                                         mid->sequence_number + 1) != 0)
-                       cERROR(1, "Unexpected SMB signature");
+               rc = cifs_verify_signature(&iov, 1, server,
+                                          mid->sequence_number + 1);
+               if (rc)
+                       cERROR(1, "SMB signature verification returned error = "
+                              "%d", rc);
        }
 
        /* BB special case reconnect tid and uid here? */
index 1faf4cb56f3963d0945d8004b8640464b9e3b6fd..f86c720dba0eeea72d7ecefdefa8ad0b52886011 100644 (file)
@@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        unsigned long user_addr;
        size_t bytes;
        struct buffer_head map_bh = { 0, };
+       struct blk_plug plug;
 
        if (rw & WRITE)
                rw = WRITE_ODIRECT;
@@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
                                PAGE_SIZE - user_addr / PAGE_SIZE);
        }
 
+       blk_start_plug(&plug);
+
        for (seg = 0; seg < nr_segs; seg++) {
                user_addr = (unsigned long)iov[seg].iov_base;
                sdio.size += bytes = iov[seg].iov_len;
@@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (sdio.bio)
                dio_bio_submit(dio, &sdio);
 
+       blk_finish_plug(&plug);
+
        /*
         * It is possible that, we return short IO due to end of file.
         * In that case, we need to release all the pages we got hold on.
index 09357508ec9ae5aebbac1e2ccbd44d42087bfd4a..a2862339323b2a5f1e08dd7e25a779e1b683b828 100644 (file)
@@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)
 
        BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
        spin_lock(&journal->j_state_lock);
+       /* Is it already empty? */
+       if (sb->s_start == 0) {
+               spin_unlock(&journal->j_state_lock);
+               return;
+       }
        jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
                  journal->j_tail_sequence);
 
index df0de27c273349c22ee6b8770b589663801c68f5..e784a217b50067919ad3ebffe559b3552b58a9bc 100644 (file)
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        struct completion complete;
 
        bio_init(&bio);
+       bio.bi_max_vecs = 1;
        bio.bi_io_vec = &bio_vec;
        bio_vec.bv_page = page;
        bio_vec.bv_len = PAGE_SIZE;
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
        struct address_space *mapping = super->s_mapping_inode->i_mapping;
        struct bio *bio;
        struct page *page;
-       struct request_queue *q = bdev_get_queue(sb->s_bdev);
-       unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+       unsigned int max_pages;
        int i;
 
-       if (max_pages > BIO_MAX_PAGES)
-               max_pages = BIO_MAX_PAGES;
+       max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
        bio = bio_alloc(GFP_NOFS, max_pages);
        BUG_ON(!bio);
 
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
 {
        struct logfs_super *super = logfs_super(sb);
        struct bio *bio;
-       struct request_queue *q = bdev_get_queue(sb->s_bdev);
-       unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+       unsigned int max_pages;
        int i;
 
-       if (max_pages > BIO_MAX_PAGES)
-               max_pages = BIO_MAX_PAGES;
+       max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
        bio = bio_alloc(GFP_NOFS, max_pages);
        BUG_ON(!bio);
 
index a422f42238b250764011fa421d24a1a0858dd153..6984562738d36bc4142a3e0556730ae9e3bf3a57 100644 (file)
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)
        call_rcu(&inode->i_rcu, logfs_i_callback);
 }
 
+static void __logfs_destroy_meta_inode(struct inode *inode)
+{
+       struct logfs_inode *li = logfs_inode(inode);
+       BUG_ON(li->li_block);
+       call_rcu(&inode->i_rcu, logfs_i_callback);
+}
+
 static void logfs_destroy_inode(struct inode *inode)
 {
        struct logfs_inode *li = logfs_inode(inode);
 
+       if (inode->i_ino < LOGFS_RESERVED_INOS) {
+               /*
+                * The reserved inodes are never destroyed unless we are in
+                * unmont path.
+                */
+               __logfs_destroy_meta_inode(inode);
+               return;
+       }
+
        BUG_ON(list_empty(&li->li_freeing_list));
        spin_lock(&logfs_inode_lock);
        li->li_refcount--;
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)
 {
        struct logfs_super *super = logfs_super(sb);
        /* kill the meta-inodes */
-       iput(super->s_master_inode);
        iput(super->s_segfile_inode);
+       iput(super->s_master_inode);
        iput(super->s_mapping_inode);
 }
 
index 1e1c369df22bb085f62519b1100eb300053aaae8..2a09b8d73989539aedfe205fafd7f856f1122c7c 100644 (file)
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,
        index = ofs >> PAGE_SHIFT;
        page_ofs = ofs & (PAGE_SIZE - 1);
 
-       page = find_lock_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        BUG_ON(!page);
        memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
        unlock_page(page);
index f1cb512c5019dacf057391ed857a9697f0b98422..5be0abef603d4f82af9e59aaca639118e280476b 100644 (file)
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)
                return;
        }
 
-       BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
        page = inode_to_page(inode);
        BUG_ON(!page); /* FIXME: Use emergency page */
        logfs_put_write_page(page);
index e28d090c98d6bbb2986d40fd4ae57e795cf7415c..038da0991794a39962fac3d4ef7ed5b18008c6f9 100644 (file)
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
 
 static void map_invalidatepage(struct page *page, unsigned long l)
 {
-       BUG();
+       return;
 }
 
 static int map_releasepage(struct page *page, gfp_t g)
index cbaf4f8bb7b712be7e92db2a7bf102c1f8c2dc6f..4c7bd35b187687915ab4826877dc62aecca707e3 100644 (file)
@@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
 
        if (clp->cl_minorversion == 0) {
                if (!clp->cl_cred.cr_principal &&
-                               (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+                               (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
                        return -EINVAL;
                args.client_name = clp->cl_cred.cr_principal;
                args.prognumber = conn->cb_prog,
                args.protocol = XPRT_TRANSPORT_TCP;
-               args.authflavor = clp->cl_flavor;
+               args.authflavor = clp->cl_cred.cr_flavor;
                clp->cl_cb_ident = conn->cb_ident;
        } else {
                if (!conn->cb_xprt)
index e6173147f9821c816527e0bde05289d392374af7..22bd0a66c3566465ee77ca228b1979134b625ab2 100644 (file)
@@ -231,7 +231,6 @@ struct nfs4_client {
        nfs4_verifier           cl_verifier;    /* generated by client */
        time_t                  cl_time;        /* time of last lease renewal */
        struct sockaddr_storage cl_addr;        /* client ipaddress */
-       u32                     cl_flavor;      /* setclientid pseudoflavor */
        struct svc_cred         cl_cred;        /* setclientid principal */
        clientid_t              cl_clientid;    /* generated by server */
        nfs4_verifier           cl_confirm;     /* generated by server */
index 36a29b753c79c709175ebfd788196d1ecad948b5..c495a3055e2a3be9b5e471afebdf53ac00c6fe51 100644 (file)
@@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
                goto out;
        }
 
-       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++)
                warn[cnt].w_type = QUOTA_NL_NOWARN;
 
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
        spin_lock(&dq_data_lock);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (!dquots[cnt])
index 4c0c7d163d150c02535cffcad08a1aead02f53d2..a98b7740a0fcade0b894920154e65ecbe7987509 100644 (file)
@@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
        else if (bitmap == 0)
                block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
 
-       reiserfs_write_unlock(sb);
        bh = sb_bread(sb, block);
-       reiserfs_write_lock(sb);
        if (bh == NULL)
                reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
                                 "reading failed", __func__, block);
index a6d4268fb6c11798db5f8339bd14ab297cd9b21f..855da58db1456b94d43715bb4a28bbc5f982a8d7 100644 (file)
@@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)
                ;
        }
       out:
+       reiserfs_write_unlock_once(inode->i_sb, depth);
        clear_inode(inode);     /* note this must go after the journal_end to prevent deadlock */
        dquot_drop(inode);
        inode->i_blocks = 0;
-       reiserfs_write_unlock_once(inode->i_sb, depth);
        return;
 
 no_delete:
index 8b8cc4e945f4014bd8e0a77d1d9bde0197f0fa76..760de723dadb4928eec6f9c4729245a40dcce5de 100644 (file)
@@ -167,7 +167,7 @@ struct ubifs_global_debug_info {
 #define ubifs_dbg_msg(type, fmt, ...) \
        pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
 
-#define DBG_KEY_BUF_LEN 32
+#define DBG_KEY_BUF_LEN 48
 #define ubifs_dbg_msg_key(type, key, fmt, ...) do {                            \
        char __tmp_key_buf[DBG_KEY_BUF_LEN];                                   \
        pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__,             \
index ce33b2beb151f92ac61e51dcefc47cd25ee8dd15..8640920766ed461896add49f69b9db767aa9569c 100644 (file)
@@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
        return 0;
 
 out_err:
-       ubifs_lpt_free(c, 0);
+       if (wr)
+               ubifs_lpt_free(c, 1);
+       if (rd)
+               ubifs_lpt_free(c, 0);
        return err;
 }
 
index c30d976b4be857a9d222cedd5b7192a4f90e6cfa..edeec499c048ba1d93375796b5fd4f439f38dd81 100644 (file)
@@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
 
 corrupted_rescan:
        /* Re-scan the corrupted data with verbose messages */
-       ubifs_err("corruptio %d", ret);
+       ubifs_err("corruption %d", ret);
        ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
 corrupted:
        ubifs_scanned_corruption(c, lnum, offs, buf);
index eba46d4a76192c017846c4389addf2ebdcc2a78e..94d78fc5d4e0dc11e6ea4db670f43853f569eabb 100644 (file)
@@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
        c->replaying = 1;
        lnum = c->ltail_lnum = c->lhead_lnum;
 
-       lnum = UBIFS_LOG_LNUM;
        do {
                err = replay_log_leb(c, lnum, 0, c->sbuf);
                if (err == 1)
@@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
                if (err)
                        goto out;
                lnum = ubifs_next_log_lnum(c, lnum);
-       } while (lnum != UBIFS_LOG_LNUM);
+       } while (lnum != c->ltail_lnum);
 
        err = replay_buds(c);
        if (err)
index c3fa6c5327a3bb7b6939206c118a9d6ace0ee039..71a197f0f93d24c0cc33527d4f2d69c8c3d51f7b 100644 (file)
@@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)
  *
  * This function mounts UBIFS file system. Returns zero in case of success and
  * a negative error code in case of failure.
- *
- * Note, the function does not de-allocate resources it it fails half way
- * through, and the caller has to do this instead.
  */
 static int mount_ubifs(struct ubifs_info *c)
 {
index fafaad795cd6da4842a798e25446f6b426d65d65..aa233469b3c1a0deb1e3ef60b8039f5dd2e1cd55 100644 (file)
@@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)
                                if (err)
                                        return err;
                                down_write(&iinfo->i_data_sem);
-                       } else
+                       } else {
                                iinfo->i_lenAlloc = newsize;
+                               goto set_size;
+                       }
                }
                err = udf_extend_file(inode, newsize);
                if (err) {
                        up_write(&iinfo->i_data_sem);
                        return err;
                }
+set_size:
                truncate_setsize(inode, newsize);
                up_write(&iinfo->i_data_sem);
        } else {
index dcbf98722afcd5df2a8940f4dbd5c1c8a02c7b40..18fc038a438da4b6bbf58fa73c23c27ecd0cb721 100644 (file)
@@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                udf_err(sb, "error loading logical volume descriptor: "
                        "Partition table too long (%u > %lu)\n", table_len,
                        sb->s_blocksize - sizeof(*lvd));
+               ret = 1;
                goto out_bh;
        }
 
@@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
                                if (udf_load_sparable_map(sb, map,
-                                   (struct sparablePartitionMap *)gpm) < 0)
+                                   (struct sparablePartitionMap *)gpm) < 0) {
+                                       ret = 1;
                                        goto out_bh;
+                               }
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
@@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
                        if (!silent)
                                pr_notice("Rescanning with blocksize %d\n",
                                          UDF_DEFAULT_BLOCKSIZE);
+                       brelse(sbi->s_lvid_bh);
+                       sbi->s_lvid_bh = NULL;
                        uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
                        ret = udf_load_vrs(sb, &uopt, silent, &fileset);
                }
index f9c3fe304a17fc9ae470c7754294b3f129c78bea..69cf4fcde03e2d31266f70f6dfee1b73fe71b4a7 100644 (file)
@@ -179,12 +179,14 @@ xfs_ioc_trim(
         * used by the fstrim application.  In the end it really doesn't
         * matter as trimming blocks is an advisory interface.
         */
+       if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+           range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
+               return -XFS_ERROR(EINVAL);
+
        start = BTOBB(range.start);
        end = start + BTOBBT(range.len) - 1;
        minlen = BTOBB(max_t(u64, granularity, range.minlen));
 
-       if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
-               return -XFS_ERROR(EINVAL);
        if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
                end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
 
index 21e37b55f7e596c6d29c0c0125a755f7a8ada7f6..5aceb3f8ecd625de029daaff00e6093a0e0213a2 100644 (file)
@@ -962,23 +962,22 @@ xfs_dialloc(
                if (!pag->pagi_freecount && !okalloc)
                        goto nextag;
 
+               /*
+                * Then read in the AGI buffer and recheck with the AGI buffer
+                * lock held.
+                */
                error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
                if (error)
                        goto out_error;
 
-               /*
-                * Once the AGI has been read in we have to recheck
-                * pagi_freecount with the AGI buffer lock held.
-                */
                if (pag->pagi_freecount) {
                        xfs_perag_put(pag);
                        goto out_alloc;
                }
 
-               if (!okalloc) {
-                       xfs_trans_brelse(tp, agbp);
-                       goto nextag;
-               }
+               if (!okalloc)
+                       goto nextag_relse_buffer;
+
 
                error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
                if (error) {
@@ -1007,6 +1006,8 @@ xfs_dialloc(
                        return 0;
                }
 
+nextag_relse_buffer:
+               xfs_trans_brelse(tp, agbp);
 nextag:
                xfs_perag_put(pag);
                if (++agno == mp->m_sb.sb_agcount)
index 92d4331cd4f1c118ea0a0087849d399a3948f230..ca28a4ba4b548f0c379291bfb0e716ac3a9ec54e 100644 (file)
@@ -857,7 +857,7 @@ xfs_rtbuf_get(
        xfs_buf_t       *bp;            /* block buffer, result */
        xfs_inode_t     *ip;            /* bitmap or summary inode */
        xfs_bmbt_irec_t map;
-       int             nmap;
+       int             nmap = 1;
        int             error;          /* error value */
 
        ip = issum ? mp->m_rsumip : mp->m_rbmip;
index ced362533e3c770f6dc8f4cb3a3872aeb52e455d..bfacf0d5a225fd0dd4c9a52d42496f3e7049a030 100644 (file)
@@ -118,7 +118,8 @@ enum drm_mode_status {
        .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
        .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
        .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-       .vscan = (vs), .flags = (f), .vrefresh = 0
+       .vscan = (vs), .flags = (f), .vrefresh = 0, \
+       .base.type = DRM_MODE_OBJECT_MODE
 
 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 
index 5581980b14f606e593e9827e175980e5a50802f0..3d6301b6ec16d36b60cb4f69570170b8c804fb6d 100644 (file)
@@ -359,8 +359,9 @@ struct drm_mode_mode_cmd {
        struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_CURSOR_BO     (1<<0)
-#define DRM_MODE_CURSOR_MOVE   (1<<1)
+#define DRM_MODE_CURSOR_BO     0x01
+#define DRM_MODE_CURSOR_MOVE   0x02
+#define DRM_MODE_CURSOR_FLAGS  0x03
 
 /*
  * depending on the value in flags different members are used.
index 4e72a9d48232d513b5b2fe44d973de8dfeb1e9e4..4a2ab7c85393df48fd8d93e085f3b7ead5de7be2 100644 (file)
@@ -601,7 +601,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
  * it already be started by driver.
  */
 #define RQ_NOMERGE_FLAGS       \
-       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
 #define rq_mergeable(rq)       \
        (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
         (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -894,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+                         struct scatterlist *sglist);
 extern void blk_dump_rq_flags(struct request *, char *);
 extern long nr_blockdev_pages(void);
 
@@ -1139,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
                & (lim->discard_granularity - 1);
 }
 
+static inline int bdev_discard_alignment(struct block_device *bdev)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+
+       if (bdev != bdev->bd_contains)
+               return bdev->bd_part->discard_alignment;
+
+       return q->limits.discard_alignment;
+}
+
 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
 {
        if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
index 040b13b5c14a5bdc500f82855123e7940fda6f6f..279b1eaa8b7304bf1028b6f91acedf4f15317148 100644 (file)
@@ -194,6 +194,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#else
+static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+}
 #endif
 
 /******************************
index 603bec2913b00ca40086149828f3fcddab32a435..06177ba10a1617461dbe6803efc22ba6dfb43304 100644 (file)
@@ -58,13 +58,6 @@ union ktime {
 
 typedef union ktime ktime_t;           /* Kill this */
 
-#define KTIME_MAX                      ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX                 (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX                 LONG_MAX
-#endif
-
 /*
  * ktime_t definitions when using the 64-bit scalar representation:
  */
index 51bf8ada6dc0166b103c4d11067d92fbb2496d78..49258e0ed1c679e4dfacc488b8e94aa49e7d815e 100644 (file)
@@ -15,6 +15,8 @@
 #define MV643XX_ETH_SIZE_REG_4         0x2224
 #define MV643XX_ETH_BASE_ADDR_ENABLE_REG       0x2290
 
+#define MV643XX_TX_CSUM_DEFAULT_LIMIT  0
+
 struct mv643xx_eth_shared_platform_data {
        struct mbus_dram_target_info    *dram;
        struct platform_device  *shared_smi;
index fc35260773489d55724f87d3493f5d5b23ab1f57..6b4565c440c8795eecf21044e9a8dc76b630687d 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index c81c5e40fcb512dda61255ac7399fb818bcb0cec..b51e664c83e7237e17e02f0b5752ce586f87dbd4 100644 (file)
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
        return ts_delta;
 }
 
+#define KTIME_MAX                      ((s64)~((u64)1 << 63))
+#if (BITS_PER_LONG == 64)
+# define KTIME_SEC_MAX                 (KTIME_MAX / NSEC_PER_SEC)
+#else
+# define KTIME_SEC_MAX                 LONG_MAX
+#endif
+
 /*
  * Returns true if the timespec is norm, false if denorm:
  */
-#define timespec_valid(ts) \
-       (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
+static inline bool timespec_valid(const struct timespec *ts)
+{
+       /* Dates before 1970 are bogus */
+       if (ts->tv_sec < 0)
+               return false;
+       /* Can't have more nanoseconds then a second */
+       if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+               return false;
+       return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+       if (!timespec_valid(ts))
+               return false;
+       /* Disallow values that could overflow ktime_t */
+       if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+               return false;
+       return true;
+}
 
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
index e1ce1048fe5fa1142196cb88e08829b7bb1d60f5..4a045cda9c60c75a96b956b051dfbf5a6d6581b7 100644 (file)
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 pid;                /* netlink pid of destroyer */
+       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
index 9c641deb65d2c62236343b4934d0db6ecb6c0dff..04399b28e821bf694a58d706e0b6c9f66e9c7d9f 100644 (file)
@@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq);
 
 void xen_irq_resume(void);
 
-void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn);
-
 /* Clear an irq's pending state, in preparation for polling on it */
 void xen_clear_irq_pending(int irq);
 void xen_set_irq_pending(int irq);
index 3bd2280d79f6b5507537c3e294e05c77a69d678f..2c8857e12855393759562b3c6eeec2d23de6f080 100644 (file)
@@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (retval)
                        goto out;
 
-               if (file && uprobe_mmap(tmp))
-                       goto out;
+               if (file)
+                       uprobe_mmap(tmp);
        }
        /* a new mm has just been created */
        arch_dup_mmap(oldmm, mm);
index e16af197a2bc54c8f81070a1043ed1f81923679f..34e5eac81424d98738246415a85d1a98a042bef1 100644 (file)
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
 {
        tk->xtime_sec += ts->tv_sec;
        tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+       tk_normalize_xtime(tk);
 }
 
 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
        tk->xtime_nsec += cycle_delta * tk->mult;
 
        /* If arch requires, add in gettimeoffset() */
-       tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
+       tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
 
        tk_normalize_xtime(tk);
 
@@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)
        struct timespec ts_delta, xt;
        unsigned long flags;
 
-       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+       if (!timespec_valid_strict(tv))
                return -EINVAL;
 
        write_seqlock_irqsave(&tk->lock, flags);
@@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts)
 {
        struct timekeeper *tk = &timekeeper;
        unsigned long flags;
+       struct timespec tmp;
+       int ret = 0;
 
        if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
                return -EINVAL;
@@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts)
 
        timekeeping_forward_now(tk);
 
+       /* Make sure the proposed value is valid */
+       tmp = timespec_add(tk_xtime(tk),  *ts);
+       if (!timespec_valid_strict(&tmp)) {
+               ret = -EINVAL;
+               goto error;
+       }
 
        tk_xtime_add(tk, ts);
        tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
 
+error: /* even if we error out, we forwarded the time, so call update */
        timekeeping_update(tk, true);
 
        write_sequnlock_irqrestore(&tk->lock, flags);
@@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts)
        /* signal hrtimers about time change */
        clock_was_set();
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(timekeeping_inject_offset);
 
@@ -649,7 +659,20 @@ void __init timekeeping_init(void)
        struct timespec now, boot, tmp;
 
        read_persistent_clock(&now);
+       if (!timespec_valid_strict(&now)) {
+               pr_warn("WARNING: Persistent clock returned invalid value!\n"
+                       "         Check your CMOS/BIOS settings.\n");
+               now.tv_sec = 0;
+               now.tv_nsec = 0;
+       }
+
        read_boot_clock(&boot);
+       if (!timespec_valid_strict(&boot)) {
+               pr_warn("WARNING: Boot clock returned invalid value!\n"
+                       "         Check your CMOS/BIOS settings.\n");
+               boot.tv_sec = 0;
+               boot.tv_nsec = 0;
+       }
 
        seqlock_init(&tk->lock);
 
@@ -690,7 +713,7 @@ static struct timespec timekeeping_suspend_time;
 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
                                                        struct timespec *delta)
 {
-       if (!timespec_valid(delta)) {
+       if (!timespec_valid_strict(delta)) {
                printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
                                        "sleep delta value!\n");
                return;
@@ -1129,6 +1152,10 @@ static void update_wall_time(void)
        offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
 #endif
 
+       /* Check if there's really nothing to do */
+       if (offset < tk->cycle_interval)
+               goto out;
+
        /*
         * With NO_HZ we may have to accumulate many cycle_intervals
         * (think "ticks") worth of time at once. To do this efficiently,
@@ -1161,9 +1188,9 @@ static void update_wall_time(void)
        * the vsyscall implementations are converted to use xtime_nsec
        * (shifted nanoseconds), this can be killed.
        */
-       remainder = tk->xtime_nsec & ((1 << tk->shift) - 1);
+       remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
        tk->xtime_nsec -= remainder;
-       tk->xtime_nsec += 1 << tk->shift;
+       tk->xtime_nsec += 1ULL << tk->shift;
        tk->ntp_error += remainder << tk->ntp_error_shift;
 
        /*
index 60e4d78756723189b95d11390bc566a6578a4bbe..6b245f64c8dd850bbbd7a16132d997fccd25206e 100644 (file)
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
+       if (syscall_nr < 0)
+               return;
        if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
 
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
+       if (syscall_nr < 0)
+               return;
        if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
 
index fa5ca304148e7b4756bf2e52cd6afde1dba2b5c3..384344575c375e1a1464734670218557c31e0e27 100644 (file)
@@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        retval = filemap_write_and_wait_range(mapping, pos,
                                        pos + iov_length(iov, nr_segs) - 1);
                        if (!retval) {
-                               struct blk_plug plug;
-
-                               blk_start_plug(&plug);
                                retval = mapping->a_ops->direct_IO(READ, iocb,
                                                        iov, pos, nr_segs);
-                               blk_finish_plug(&plug);
                        }
                        if (retval > 0) {
                                *ppos = pos + retval;
@@ -2527,14 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
-       struct blk_plug plug;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
        sb_start_write(inode->i_sb);
        mutex_lock(&inode->i_mutex);
-       blk_start_plug(&plug);
        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
@@ -2545,7 +2539,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                if (err < 0 && ret > 0)
                        ret = err;
        }
-       blk_finish_plug(&plug);
        sb_end_write(inode->i_sb);
        return ret;
 }
index 9adee9fc0d8a6d799843ee0b010e8c4133f227f5..ae18a48e7e4e7944af308bbff226217ae7d1601e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1356,9 +1356,8 @@ out:
        } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
 
-       if (file && uprobe_mmap(vma))
-               /* matching probes but cannot insert */
-               goto unmap_and_free_vma;
+       if (file)
+               uprobe_mmap(vma);
 
        return addr;
 
index f8b0d539b4822af7812c8f9edcb51224450f3f64..811af03a14ef168ca8e6ba90ec0af60b2dffa305 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3260,6 +3260,7 @@ force_grow:
 
                /* cache_grow can reenable interrupts, then ac could change. */
                ac = cpu_cache_get(cachep);
+               node = numa_mem_id();
 
                /* no objects in sight? abort */
                if (!x && (ac->avail == 0 || force_refill))
index 346b1eb83a1f0336ed1e9dcf4f5905b733022dff..e4ba3e70c1747684ad480815f67b2410e87974a7 100644 (file)
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
        struct napi_struct *napi;
        int budget = 16;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
-               local_irq_enable();
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       rcu_read_lock_bh();
                        budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
                                               napi, budget);
-                       rcu_read_unlock_bh();
                        spin_unlock(&napi->poll_lock);
 
-                       if (!budget) {
-                               local_irq_disable();
+                       if (!budget)
                                break;
-                       }
                }
-               local_irq_disable();
        }
 }
 
index 8eec8f4a05360d24897719495100ea100f0c69b2..ebdf06f938bf040eebc91763c3e952c63d00f92b 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
 static struct kmem_cache *mrt_cachep __read_mostly;
 
 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
                         struct sk_buff *skb, struct mfc_cache *cache,
                         int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                              struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
-               kfree(mrt);
+               ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
 }
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
-       kfree(net->ipv4.mrt);
+       ipmr_free_table(net->ipv4.mrt);
 }
 #endif
 
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        return mrt;
 }
 
+static void ipmr_free_table(struct mr_table *mrt)
+{
+       del_timer_sync(&mrt->ipmr_expire_timer);
+       mroute_clean_tables(mrt);
+       kfree(mrt);
+}
+
 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
 
 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
index 4ad9cf1739922cfb4b13d3135d01381dd70bf0be..9c87cde28ff831472cc7072a05526d753727d1fa 100644 (file)
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
                ret = nf_ct_expect_related(rtcp_exp);
                if (ret == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (ret == -EBUSY) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       continue;
+               } else if (ret < 0) {
                        nf_ct_unexpect_related(rtp_exp);
                        port = 0;
                        break;
index fd9ecb52c66bf8815c09e678c452017e9a7e2122..82cf2a722b2310803ec603f60d59cdf819481a4a 100644 (file)
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
+       rcu_read_lock();
        if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
                struct fib_nh *nh = &FIB_RES_NH(res);
 
                update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
                                      jiffies + ip_rt_mtu_expires);
        }
+       rcu_read_unlock();
        return mtu;
 }
 
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                dst->obsolete = DST_OBSOLETE_KILL;
        } else {
                rt->rt_pmtu = mtu;
-               dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+               rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
        }
 }
 
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (dst->flags & DST_NOCACHE) {
+       if (!list_empty(&rt->rt_uncached)) {
                spin_lock_bh(&rt_uncached_lock);
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&rt_uncached_lock);
index 85308b90df80a844119a8cc773783f78aab5de04..6e38c6c23caa69601216e193507460663c1926ec 100644 (file)
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int newly_acked_sacked, bool is_dupack,
+                                 int prior_sacked, bool is_dupack,
                                  int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
+       int newly_acked_sacked = 0;
        int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                } else
                        do_lost = tcp_try_undo_partial(sk, pkts_acked);
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
                break;
        case TCP_CA_Loss:
                if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        if (is_dupack)
                                tcp_add_reno_sack(sk);
                }
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets;
        int prior_sacked = tp->sacked_out;
        int pkts_acked = 0;
-       int newly_acked_sacked = 0;
        bool frto_cwnd = false;
 
        /* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
        pkts_acked = prior_packets - tp->packets_out;
-       newly_acked_sacked = (prior_packets - prior_sacked) -
-                            (tp->packets_out - tp->sacked_out);
 
        if (tp->frto_counter)
                frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               newly_acked_sacked = tp->sacked_out - prior_sacked;
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        }
 
index 6dc7fd353ef53f9c08c9202340fc953bf3b294c5..282f3723ee194704ab7fa0757e2c032254903300 100644 (file)
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        struct esp_data *esp = x->data;
 
        /* skb is pure payload to encrypt */
-       err = -ENOMEM;
-
        aead = esp->aead;
        alen = crypto_aead_authsize(aead);
 
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
-       if (!tmp)
+       if (!tmp) {
+               err = -ENOMEM;
                goto error;
+       }
 
        seqhi = esp_tmp_seqhi(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
index 393355d37b476bc67219d57221759c3a1bdb52e8..513cab08a9863c0080d510ae5a2a4d927ad2ccdd 100644 (file)
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        /* Remove from tunnel list */
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
        list_del_rcu(&tunnel->list);
+       kfree_rcu(tunnel, rcu);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-       synchronize_rcu();
 
        atomic_dec(&l2tp_tunnel_count);
-       kfree(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
index a38ec6cdeee1a7ba81dc0ea9f659ca3f46bccb46..56d583e083a7baf7b0cf707a76882a88701d8025 100644 (file)
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
 
 struct l2tp_tunnel {
        int                     magic;          /* Should be L2TP_TUNNEL_MAGIC */
+       struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
        struct hlist_head       session_hlist[L2TP_HASH_SIZE];
                                                /* hashed list of sessions,
index acf712ffb5e630b3c1fc74c6390c7606d4f043bd..c5e8c9c31f7687d9922d0011ea1b31e8244ea8d2 100644 (file)
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
-                       int is_mesh_mcast = 1;
-                       const u8 *mesh_da;
+                       /* DS -> MBSS (802.11-2012 13.11.3.3).
+                        * For unicast with unknown forwarding information,
+                        * destination might be in the MBSS or if that fails
+                        * forwarded to another mesh gate. In either case
+                        * resolution will be handled in ieee80211_xmit(), so
+                        * leave the original DA. This also works for mcast */
+                       const u8 *mesh_da = skb->data;
+
+                       if (mppath)
+                               mesh_da = mppath->mpp;
+                       else if (mpath)
+                               mesh_da = mpath->dst;
+                       rcu_read_unlock();
 
-                       if (is_multicast_ether_addr(skb->data))
-                               /* DA TA mSA AE:SA */
-                               mesh_da = skb->data;
-                       else {
-                               static const u8 bcast[ETH_ALEN] =
-                                       { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-                               if (mppath) {
-                                       /* RA TA mDA mSA AE:DA SA */
-                                       mesh_da = mppath->mpp;
-                                       is_mesh_mcast = 0;
-                               } else if (mpath) {
-                                       mesh_da = mpath->dst;
-                                       is_mesh_mcast = 0;
-                               } else {
-                                       /* DA TA mSA AE:SA */
-                                       mesh_da = bcast;
-                               }
-                       }
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        mesh_da, sdata->vif.addr);
-                       rcu_read_unlock();
-                       if (is_mesh_mcast)
+                       if (is_multicast_ether_addr(mesh_da))
+                               /* DA TA mSA AE:SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
                                                        skb->data + ETH_ALEN,
                                                        NULL);
                        else
+                               /* RA TA mDA mSA AE:DA SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
index 72bf32a84874718927a4bcbdc2e26395be00bdd8..f51013c07b9f4e5a81885f0462d5423ccbc8a44f 100644 (file)
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                goto out_err;
        }
        svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
-       if (!svc->stats.cpustats)
+       if (!svc->stats.cpustats) {
+               ret = -ENOMEM;
                goto out_err;
+       }
 
        /* I'm the first user of the service */
        atomic_set(&svc->usecnt, 0);
index cf4875565d6755af8cb7e3ee952b723a007f1b4c..2ceec64b19f9866a222787531ba948b4c9e7e75b 100644 (file)
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
                /* bad luck, let's retry again */
-               ct->timeout.expires = jiffies +
+               ecache->timeout.expires = jiffies +
                        (random32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ct->timeout);
+               add_timer(&ecache->timeout);
                return;
        }
        /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
 void nf_ct_insert_dying_list(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        /* add this conntrack to the dying list */
        spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
                             &net->ct.dying);
        spin_unlock_bh(&nf_conntrack_lock);
        /* set a new timer to retry event delivery */
-       setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
-       ct->timeout.expires = jiffies +
+       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+       ecache->timeout.expires = jiffies +
                (random32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ct->timeout);
+       add_timer(&ecache->timeout);
 }
 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 
index da4fc37a8578b6ef4590cbd0abf6e04241ebb8fc..9807f3278fcbcdfcc28c61b9b19e6a8c74d02b18 100644 (file)
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+       ret = register_pernet_subsys(&ctnetlink_net_ops);
+       if (ret < 0) {
                pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
index 169ab59ed9d49073105443bfcae75c89a2f30024..14e2f3903142e322d1ce72a3eba13441004eef16 100644 (file)
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
        }
 
        if (indev && skb_mac_header_was_set(skb)) {
-               if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+               if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
                    nla_put_be16(inst->skb, NFULA_HWLEN,
                                 htons(skb->dev->hard_header_len)) ||
                    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
 
 #ifdef CONFIG_PROC_FS
        if (!proc_create("nfnetlink_log", 0440,
-                        proc_net_netfilter, &nful_file_ops))
+                        proc_net_netfilter, &nful_file_ops)) {
+               status = -ENOMEM;
                goto cleanup_logger;
+       }
 #endif
        return status;
 
index 1445d73533ed13ac9aa43dd85d6f58b4a006c130..527023823b5c5ea1a48c373b49e9f1688891a494 100644 (file)
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_pid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_pid) &&
+                   !netlink_capable(sock, NL_NONROOT_SEND))
                        goto out;
        } else {
                dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
+       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
index aee7196aac36c990eedfc7b494a04351c29bb167..c5c9e2a54218207f0dba9b16920b2e17da84c353 100644 (file)
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
        spin_unlock(&f->lock);
 }
 
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
 {
        if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
                return true;
index a5471f804d994ece8c31df6be0e04d5076b6dfd0..edc3c4af9085362c7227e31babfc19a489bf9cf6 100644 (file)
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
        err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
        set_fs(old_fs);
        if (!err)
-               err = compat_put_timeval(up, &ktv);
+               err = compat_put_timeval(&ktv, up);
 
        return err;
 }
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
        err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
        set_fs(old_fs);
        if (!err)
-               err = compat_put_timespec(up, &kts);
+               err = compat_put_timespec(&kts, up);
 
        return err;
 }
index 88f2bf671960d444e73d3d9eba2998f75ac2885b..bac973a313673eaab9dc5300a86cef0cfbd577e0 100644 (file)
@@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
  */
 void svc_xprt_enqueue(struct svc_xprt *xprt)
 {
-       struct svc_serv *serv = xprt->xpt_server;
        struct svc_pool *pool;
        struct svc_rqst *rqstp;
        int cpu;
@@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
                                rqstp, rqstp->rq_xprt);
                rqstp->rq_xprt = xprt;
                svc_xprt_get(xprt);
-               rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
                pool->sp_stats.threads_woken++;
                wake_up(&rqstp->rq_wait);
        } else {
@@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        if (xprt) {
                rqstp->rq_xprt = xprt;
                svc_xprt_get(xprt);
-               rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 
                /* As there is a shortage of threads and this request
                 * had to be queued, don't allow the thread to wait so
@@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                else
                        len = xprt->xpt_ops->xpo_recvfrom(rqstp);
                dprintk("svc: got len=%d\n", len);
+               rqstp->rq_reserved = serv->sv_max_mesg;
+               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
        }
        svc_xprt_received(xprt);
 
@@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp)
 
        /* Grab mutex to serialize outgoing data. */
        mutex_lock(&xprt->xpt_mutex);
-       if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+       if (test_bit(XPT_DEAD, &xprt->xpt_flags)
+                       || test_bit(XPT_CLOSE, &xprt->xpt_flags))
                len = -ENOTCONN;
        else
                len = xprt->xpt_ops->xpo_sendto(rqstp);
index 18bc130255a75e537f87d15bddde82faf165d91e..998aa8c1807cd7cd548023d4ea2e80fedc9c2ca9 100644 (file)
@@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
        if (len >= 0)
                svsk->sk_tcplen += len;
        if (len != want) {
+               svc_tcp_save_pages(svsk, rqstp);
                if (len < 0 && len != -EAGAIN)
                        goto err_other;
-               svc_tcp_save_pages(svsk, rqstp);
                dprintk("svc: incomplete TCP record (%d of %d)\n",
                        svsk->sk_tcplen, svsk->sk_reclen);
                goto err_noclose;
index 87cd0e4d42829a19797ae3e2371c47723fd40fb7..210be48d8ae3c295a3f9e9642c356acd9866c92c 100644 (file)
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
                goto error;
 
        x->outer_mode = xfrm_get_mode(x->props.mode, family);
-       if (x->outer_mode == NULL)
+       if (x->outer_mode == NULL) {
+               err = -EPROTONOSUPPORT;
                goto error;
+       }
 
        if (init_replay) {
                err = xfrm_init_replay(x);
index 2884e67ee625d6c41c8fb6a6fb495d843a6b86e7..213362850abdab31ca804db1a583dd5c38bdfbbd 100644 (file)
@@ -10,10 +10,12 @@ util/ctype.c
 util/evlist.c
 util/evsel.c
 util/cpumap.c
+util/hweight.c
 util/thread_map.c
 util/util.c
 util/xyarray.c
 util/cgroup.c
 util/debugfs.c
+util/rblist.c
 util/strlist.c
 ../../lib/rbtree.c
index 246852397e301ee86ac5c40f653e4695eae659a8..d617f69131d7667d3847c7e0fc040a717625bf38 100644 (file)
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
                        if (copy_from_user(&csigset, sigmask_arg->sigset,
                                           sizeof csigset))
                                goto out;
-               }
-               sigset_from_compat(&sigset, &csigset);
-               r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+                       sigset_from_compat(&sigset, &csigset);
+                       r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+               } else
+                       r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
                break;
        }
        default: