]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'pinctrl-for-v3.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Aug 2013 23:33:37 +0000 (16:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Aug 2013 23:33:37 +0000 (16:33 -0700)
Pull pinctrl fixes from Linus Walleij:
 "Fixes for the sunxi (AllWinner) pin control driver.  This was a new
  driver in this merge window, so some post-merge hardening is
  happening"

[ I had completely missed this pull request for some reason, it was sent
  over a week ago but my mailbox is chaotic ]

* tag 'pinctrl-for-v3.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl:
  pinctrl: sunxi: Add spinlocks
  pinctrl: sunxi: Fix gpio_set behaviour
  pinctrl: sunxi: Read register before writing to it in irq_set_type

382 files changed:
Documentation/DocBook/media_api.tmpl
Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
Documentation/devicetree/bindings/regulator/palmas-pmic.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/msm8960-cdp.dts
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/stih41x.dtsi
arch/arm/boot/dts/tegra20-colibri-512.dtsi
arch/arm/include/asm/smp_plat.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/tlb.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/fiq.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/mach-msm/Kconfig
arch/arm/mach-msm/gpiomux-v1.c [deleted file]
arch/arm/mach-msm/gpiomux.h
arch/arm/mach-omap2/dss-common.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-bockw.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-sti/headsmp.S
arch/arm64/include/asm/tlb.h
arch/avr32/boards/atngw100/mrmt.c
arch/hexagon/Kconfig
arch/ia64/include/asm/tlb.h
arch/m68k/emu/natfeat.c
arch/m68k/include/asm/div64.h
arch/microblaze/Kconfig
arch/mips/include/asm/cpu-features.h
arch/mips/kernel/smp-bmips.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pnx833x/common/platform.c
arch/openrisc/Kconfig
arch/powerpc/Kconfig
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/tm.S
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/platforms/pseries/nvram.c
arch/s390/Kconfig
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/misc.c
arch/s390/include/asm/bitops.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/perf_event.c
arch/s390/kernel/setup.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/mm/init.c
arch/s390/oprofile/init.c
arch/score/Kconfig
arch/sh/include/asm/tlb.h
arch/um/include/asm/tlb.h
arch/x86/boot/compressed/eboot.c
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/spinlock.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/i387.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_amd_early.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/mmap.c
drivers/acpi/acpi_processor.c
drivers/acpi/glue.c
drivers/acpi/proc.c
drivers/acpi/video.c
drivers/ata/pata_imx.c
drivers/base/regmap/regcache.c
drivers/block/aoe/aoecmd.c
drivers/char/virtio_console.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/zynq/clkc.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/dma/sh/shdma.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cypress_dpm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rv6xx_dpm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/rv770_dpm.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/hid/hid-logitech-dj.c
drivers/hwmon/adt7470.c
drivers/i2c/busses/i2c-kempld.c
drivers/i2c/busses/i2c-mxs.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/industrialio-trigger.c
drivers/media/i2c/ml86v7667.c
drivers/media/platform/coda.c
drivers/media/platform/s5p-g2d/g2d.c
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
drivers/media/usb/em28xx/em28xx-i2c.c
drivers/media/usb/hdpvr/hdpvr-core.c
drivers/media/usb/usbtv/Kconfig
drivers/media/usb/usbtv/usbtv.c
drivers/net/bonding/bond_main.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/tun.c
drivers/net/vxlan.c
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/pci/pci-acpi.c
drivers/rtc/rtc-stmp3xxx.c
drivers/s390/block/dasd.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_main.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/scsi.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-davinci.c
drivers/staging/zcache/zcache-main.c
drivers/usb/class/usbtmc.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci.c
drivers/usb/misc/adutux.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb_wwan.c
drivers/usb/wusbcore/wa-xfer.c
drivers/video/mxsfb.c
drivers/video/omap2/displays-new/connector-analog-tv.c
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2transport.c
fs/debugfs/inode.c
fs/dlm/user.c
fs/exec.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/fcntl.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/main.c
fs/hugetlbfs/inode.c
fs/lockd/clntlock.c
fs/lockd/clntproc.c
fs/namei.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/super.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/ocfs2/aops.c
fs/ocfs2/dir.c
fs/ocfs2/file.c
fs/ocfs2/journal.h
fs/ocfs2/move_extents.c
fs/ocfs2/refcounttree.c
fs/ocfs2/refcounttree.h
fs/open.c
fs/proc/generic.c
fs/proc/root.c
fs/proc/task_mmu.c
fs/reiserfs/procfs.c
fs/reiserfs/super.c
include/acpi/acpi_bus.h
include/asm-generic/pgtable.h
include/asm-generic/tlb.h
include/linux/ftrace_event.h
include/linux/iio/trigger.h
include/linux/kernel.h
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/regmap.h
include/linux/sched.h
include/linux/spinlock.h
include/linux/sunrpc/sched.h
include/linux/swapops.h
include/linux/syscalls.h
include/linux/user_namespace.h
include/media/v4l2-ctrls.h
include/net/busy_poll.h
include/net/ip_tunnels.h
include/net/sch_generic.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/snmp.h
kernel/cgroup.c
kernel/cpuset.c
kernel/fork.c
kernel/mutex.c
kernel/power/qos.c
kernel/printk/braille.c
kernel/ptrace.c
kernel/sched/core.c
kernel/sched/cpupri.c
kernel/sched/fair.c
kernel/time/sched_clock.c
kernel/time/tick-sched.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_uprobe.c
kernel/user_namespace.c
kernel/wait.c
kernel/workqueue.c
mm/fremap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/rmap.c
mm/slub.c
mm/swapfile.c
net/8021q/vlan_core.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/soft-interface.c
net/batman-adv/unicast.c
net/bridge/br_multicast.c
net/bridge/br_sysfs_br.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/rtnetlink.c
net/ipv4/esp4.c
net/ipv4/fib_trie.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel_core.c
net/ipv4/proc.c
net/ipv4/tcp_cubic.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/mac80211/mlme.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netlink/genetlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sctp/associola.c
net/sctp/transport.c
net/sunrpc/clnt.c
net/sunrpc/netns.h
net/sunrpc/rpcb_clnt.c
net/tipc/bearer.c
net/vmw_vsock/af_vsock.c
net/wireless/core.c
net/wireless/nl80211.c
security/smack/smack_lsm.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/sgtl5000.c
sound/soc/soc-dapm.c
sound/soc/tegra/tegra30_i2s.c
sound/usb/6fire/comm.c
sound/usb/6fire/comm.h
sound/usb/6fire/midi.c
sound/usb/6fire/midi.h
sound/usb/6fire/pcm.c
sound/usb/6fire/pcm.h
sound/usb/endpoint.c
sound/usb/mixer.c
sound/usb/quirks.c

index 6a8b7158697f9ae58b33e82d2f7becd772a6c4aa..9c92bb879b6dc7b1468e247ae0dbb026ad519f2f 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-       "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+       "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
 <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
 <!ENTITY media-indices SYSTEM "./media-indices.tmpl">
 
index a1ee681942ccc01aedbd335cc3e2aaa69b036489..6113f9275f42074d044199145b37d20feddf279e 100644 (file)
@@ -4,7 +4,7 @@
 Required properties :
 
  - reg             : Offset and length of the register set for the device
- - compatible      : Should be "marvell,mv64xxx-i2c"
+ - compatible      : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
  - interrupts      : The interrupt number
 
 Optional properties :
index d5a308629c5765093cf4111d8e42186cbe6b06d3..30b0581bb1ce63cbea673888e9fec22a5df72aa5 100644 (file)
@@ -31,9 +31,8 @@ Optional nodes:
               Optional sub-node properties:
               ti,warm-reset - maintain voltage during warm reset(boolean)
               ti,roof-floor - control voltage selection by pin(boolean)
-              ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto,
+              ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
               2 - eco, 3 - forced pwm
-              ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
               ti,smps-range - OTP has the wrong range set for the hardware so override
               0 - low range, 1 - high range.
 
@@ -59,7 +58,6 @@ pmic {
                        ti,warm-reset;
                        ti,roof-floor;
                        ti,mode-sleep = <0>;
-                       ti,tstep = <0>;
                        ti,smps-range = <1>;
                };
 
index defc05383f8325a3cd72c34a6dc5a3f7a70766c8..229c66b12cc21b9ebe6479932491ecd5e8784118 100644 (file)
@@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
+ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-keystone/
+
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1265,6 @@ F:        drivers/rtc/rtc-coh901331.c
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 
 ARM/Ux500 ARM ARCHITECTURE
-M:     Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
 M:     Linus Walleij <linus.walleij@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -5576,9 +5581,9 @@ S:        Maintained
 F:     drivers/media/tuners/mxl5007t.*
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Andrew Gallatin <gallatin@myri.com>
+M:     Hyong-Youb Kim <hykim@myri.com>
 L:     netdev@vger.kernel.org
-W:     http://www.myri.com/scs/download-Myri10GE.html
+W:     https://www.myricom.com/support/downloads/myri10ge.html
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
@@ -7361,7 +7366,6 @@ F:        drivers/net/ethernet/sfc/
 
 SGI GRU DRIVER
 M:     Dimitri Sivanich <sivanich@sgi.com>
-M:     Robin Holt <holt@sgi.com>
 S:     Maintained
 F:     drivers/misc/sgi-gru/
 
@@ -7381,7 +7385,8 @@ S:        Maintained for 2.6.
 F:     Documentation/sgi-visws.txt
 
 SGI XP/XPC/XPNET DRIVER
-M:     Robin Holt <holt@sgi.com>
+M:     Cliff Whickman <cpw@sgi.com>
+M:     Robin Holt <robinmholt@gmail.com>
 S:     Maintained
 F:     drivers/misc/sgi-xp/
 
@@ -8664,6 +8669,11 @@ T:       git git://git.alsa-project.org/alsa-kernel.git
 S:     Maintained
 F:     sound/usb/midi.*
 
+USB NETWORKING DRIVERS
+L:     linux-usb@vger.kernel.org
+S:     Odd Fixes
+F:     drivers/net/usb/
+
 USB OHCI DRIVER
 M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
index f93d4f7a90c296bcffeec561c183ff9d492735ee..a5a55f4547c6e79fa17a0051dc5cce1daaec2a50 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Linux for Workgroups
 
 # *DOCUMENTATION*
index 8d2ae24b9f4a815e253340c75c633be0a18fae9d..1feb169274fe613cc5f0360c797668bf84497c4b 100644 (file)
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
        help
          Architecture has the first two arguments of clone(2) swapped.
 
+config CLONE_BACKWARDS3
+       bool
+       help
+         Architecture has tls passed as the 3rd argument of clone(2),
+         not the 5th one.
+
 config ODD_RT_SIGACTION
        bool
        help
index db2060c465404d81faab3cd8d2c8427588c1354f..9c1167b0459b6b3e4afd9f2a7e60f1f2f8584e1a 100644 (file)
@@ -26,7 +26,7 @@
                cpu-offset = <0x80000>;
        };
 
-       msmgpio: gpio@fd510000 {
+       msmgpio: gpio@800000 {
                compatible = "qcom,msm-gpio";
                gpio-controller;
                #gpio-cells = <2>;
@@ -34,7 +34,7 @@
                interrupts = <0 32 0x4>;
                interrupt-controller;
                #interrupt-cells = <2>;
-               reg = <0xfd510000 0x4000>;
+               reg = <0x800000 0x4000>;
        };
 
        serial@16440000 {
index 08b72678abff0cc4dbd9058fb95bf26c4b779b01..65d7b601651c390f78272602c79d6fb3b172ce4f 100644 (file)
 };
 
 &mmc1 {
-       vmmc-supply = <&vmmcsd_fixed>;
+       vmmc-supply = <&ldo9_reg>;
        bus-width = <4>;
 };
 
 
                        regulators {
                                smps123_reg: smps123 {
+                                       /* VDD_OPP_MPU */
                                        regulator-name = "smps123";
                                        regulator-min-microvolt = < 600000>;
                                        regulator-max-microvolt = <1500000>;
                                };
 
                                smps45_reg: smps45 {
+                                       /* VDD_OPP_MM */
                                        regulator-name = "smps45";
                                        regulator-min-microvolt = < 600000>;
                                        regulator-max-microvolt = <1310000>;
                                };
 
                                smps6_reg: smps6 {
+                                       /* VDD_DDR3 - over VDD_SMPS6 */
                                        regulator-name = "smps6";
                                        regulator-min-microvolt = <1200000>;
                                        regulator-max-microvolt = <1200000>;
                                };
 
                                smps7_reg: smps7 {
+                                       /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
                                        regulator-name = "smps7";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                smps8_reg: smps8 {
+                                       /* VDD_OPP_CORE */
                                        regulator-name = "smps8";
                                        regulator-min-microvolt = < 600000>;
                                        regulator-max-microvolt = <1310000>;
                                };
 
                                smps9_reg: smps9 {
+                                       /* VDDA_2v1_AUD over VDD_2v1 */
                                        regulator-name = "smps9";
                                        regulator-min-microvolt = <2100000>;
                                        regulator-max-microvolt = <2100000>;
-                                       regulator-always-on;
-                                       regulator-boot-on;
                                        ti,smps-range = <0x80>;
                                };
 
                                smps10_reg: smps10 {
+                                       /* VBUS_5V_OTG */
                                        regulator-name = "smps10";
                                        regulator-min-microvolt = <5000000>;
                                        regulator-max-microvolt = <5000000>;
                                };
 
                                ldo1_reg: ldo1 {
+                                       /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <2800000>;
-                                       regulator-max-microvolt = <2800000>;
-                                       regulator-always-on;
-                                       regulator-boot-on;
+                                       regulator-min-microvolt = <1500000>;
+                                       regulator-max-microvolt = <1800000>;
                                };
 
                                ldo2_reg: ldo2 {
+                                       /* VCC_2V8_DISP: Does not go anywhere */
                                        regulator-name = "ldo2";
-                                       regulator-min-microvolt = <2900000>;
-                                       regulator-max-microvolt = <2900000>;
-                                       regulator-always-on;
-                                       regulator-boot-on;
+                                       regulator-min-microvolt = <2800000>;
+                                       regulator-max-microvolt = <2800000>;
+                                       /* Unused */
+                                       status = "disabled";
                                };
 
                                ldo3_reg: ldo3 {
+                                       /* VDDAPHY_MDM: vdda_lli */
                                        regulator-name = "ldo3";
-                                       regulator-min-microvolt = <3000000>;
-                                       regulator-max-microvolt = <3000000>;
-                                       regulator-always-on;
+                                       regulator-min-microvolt = <1500000>;
+                                       regulator-max-microvolt = <1500000>;
                                        regulator-boot-on;
+                                       /* Only if Modem is used */
+                                       status = "disabled";
                                };
 
                                ldo4_reg: ldo4 {
+                                       /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <2200000>;
-                                       regulator-max-microvolt = <2200000>;
-                                       regulator-always-on;
-                                       regulator-boot-on;
+                                       regulator-min-microvolt = <1500000>;
+                                       regulator-max-microvolt = <1800000>;
                                };
 
                                ldo5_reg: ldo5 {
+                                       /* VDDA_1V8_PHY: usb/sata/hdmi.. */
                                        regulator-name = "ldo5";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo6_reg: ldo6 {
+                                       /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
                                        regulator-name = "ldo6";
-                                       regulator-min-microvolt = <1500000>;
-                                       regulator-max-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1200000>;
+                                       regulator-max-microvolt = <1200000>;
                                        regulator-always-on;
                                        regulator-boot-on;
                                };
 
                                ldo7_reg: ldo7 {
+                                       /* VDD_VPP: vpp1 */
                                        regulator-name = "ldo7";
-                                       regulator-min-microvolt = <1500000>;
-                                       regulator-max-microvolt = <1500000>;
-                                       regulator-always-on;
-                                       regulator-boot-on;
+                                       regulator-min-microvolt = <2000000>;
+                                       regulator-max-microvolt = <2000000>;
+                                       /* Only for efuse reprograming! */
+                                       status = "disabled";
                                };
 
                                ldo8_reg: ldo8 {
+                                       /* VDD_3v0: Does not go anywhere */
                                        regulator-name = "ldo8";
-                                       regulator-min-microvolt = <1500000>;
-                                       regulator-max-microvolt = <1500000>;
-                                       regulator-always-on;
+                                       regulator-min-microvolt = <3000000>;
+                                       regulator-max-microvolt = <3000000>;
                                        regulator-boot-on;
+                                       /* Unused */
+                                       status = "disabled";
                                };
 
                                ldo9_reg: ldo9 {
+                                       /* VCC_DV_SDIO: vdds_sdcard */
                                        regulator-name = "ldo9";
                                        regulator-min-microvolt = <1800000>;
-                                       regulator-max-microvolt = <3300000>;
-                                       regulator-always-on;
+                                       regulator-max-microvolt = <3000000>;
                                        regulator-boot-on;
                                };
 
                                ldoln_reg: ldoln {
+                                       /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
                                        regulator-name = "ldoln";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldousb_reg: ldousb {
+                                       /* VDDA_3V_USB: VDDA_USBHS33 */
                                        regulator-name = "ldousb";
                                        regulator-min-microvolt = <3250000>;
                                        regulator-max-microvolt = <3250000>;
                                        regulator-always-on;
                                        regulator-boot-on;
                                };
+
+                               regen3_reg: regen3 {
+                                       /* REGEN3 controls LDO9 supply to card */
+                                       regulator-name = "regen3";
+                                       regulator-always-on;
+                                       regulator-boot-on;
+                               };
                        };
                };
        };
index 7321403cab8a2eaf6811bc93c8ee78b181afbe08..f5b9898d9c6e60330417abc3cfad494d27474d75 100644 (file)
@@ -6,10 +6,12 @@
                #address-cells = <1>;
                #size-cells = <0>;
                cpu@0 {
+                       device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0>;
                };
                cpu@1 {
+                       device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <1>;
                };
index 2fcb3f2ca160411f12575672e0cdd68319605865..5592be6f2f7a7ad58cf9eb3cf0cd17cd38e207f8 100644 (file)
        };
 
        usb-phy@c5004000 {
+               status = "okay";
                nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
                        GPIO_ACTIVE_LOW>;
        };
index 6462a721ebd4cc52105fec3e8f6970614ca1d82e..a252c0bfacf50e5adb09d339e42ed0bedfd1ac08 100644 (file)
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
 {
        return 1 << mpidr_hash.bits;
 }
+
+extern int platform_can_cpu_hotplug(void);
+
 #endif
index f8b8965666e9b14842786742f48ccdef49000802..b07c09e5a0ac86c6ddd5f7bc9ba25425c784e147 100644 (file)
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
                "       subs    %1, %0, %0, ror #16\n"
                "       addeq   %0, %0, %4\n"
                "       strexeq %2, %0, [%3]"
-               : "=&r" (slock), "=&r" (contended), "=r" (res)
+               : "=&r" (slock), "=&r" (contended), "=&r" (res)
                : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
                : "cc");
        } while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%1]\n"
-"      teq     %0, #0\n"
-"      strexeq %0, %2, [%1]"
-       : "=&r" (tmp)
-       : "r" (&rw->lock), "r" (0x80000000)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       teq     %0, #0\n"
+               "       strexeq %1, %3, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock), "r" (0x80000000)
+               : "cc");
+       } while (res);
 
-       if (tmp == 0) {
+       if (!contended) {
                smp_mb();
                return 1;
        } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp, tmp2 = 1;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%2]\n"
-"      adds    %0, %0, #1\n"
-"      strexpl %1, %0, [%2]\n"
-       : "=&r" (tmp), "+r" (tmp2)
-       : "r" (&rw->lock)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       adds    %0, %0, #1\n"
+               "       strexpl %1, %0, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock)
+               : "cc");
+       } while (res);
 
-       smp_mb();
-       return tmp2 == 0;
+       /* If the lock is negative, then it is already held for write. */
+       if (contended < 0x80000000) {
+               smp_mb();
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /* read_can_lock - would read_trylock() succeed? */
index 46e7cfb3e7219d2ac4db9013d5dc673c155167ed..0baf7f0d939484264b089c772112657cb9f15c75 100644 (file)
@@ -43,6 +43,7 @@ struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            fullmm;
        struct vm_area_struct   *vma;
+       unsigned long           start, end;
        unsigned long           range_start;
        unsigned long           range_end;
        unsigned int            nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = fullmm;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->vma = NULL;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
index d40d0ef389db61ef7f4eadcf810c9569de35cba2..9cbe70c8b0ef7b8d16a806602608fba205966d31 100644 (file)
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
        .endm
 
        .macro  kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 #ifndef CONFIG_MMU
 #warning "NPTL on non MMU needs fixing"
 #else
index 25442f451148ee107ad8db89de891c8ff42b3bc0..fc7920288a3d90a3f9c3ca38be03ff845f84515a 100644 (file)
@@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec)
 
 void set_fiq_handler(void *start, unsigned int length)
 {
-#if defined(CONFIG_CPU_USE_DOMAINS)
-       void *base = (void *)0xffff0000;
-#else
        void *base = vectors_page;
-#endif
        unsigned offset = FIQ_OFFSET;
 
        memcpy(base + offset, start, length);
+       if (!cache_is_vipt_nonaliasing())
+               flush_icache_range(base + offset, offset + length);
        flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
-       if (!vectors_high())
-               flush_icache_range(offset, offset + length);
 }
 
 int claim_fiq(struct fiq_handler *f)
index 4fb074c446bf901df288b3878341a169dacb843b..d7c82df692436df0248fa1a00502cf74b7fca23c 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/mach-types.h>
+#include <asm/smp_plat.h>
 #include <asm/system_misc.h>
 
 extern const unsigned char relocate_new_kernel[];
@@ -38,6 +39,14 @@ int machine_kexec_prepare(struct kimage *image)
        __be32 header;
        int i, err;
 
+       /*
+        * Validate that if the current HW supports SMP, then the SW supports
+        * and implements CPU hotplug for the current HW. If not, we won't be
+        * able to kexec reliably, so fail the prepare operation.
+        */
+       if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+               return -EINVAL;
+
        /*
         * No segment at default ATAGs address. try to locate
         * a dtb using magic.
@@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image)
        unsigned long reboot_code_buffer_phys;
        void *reboot_code_buffer;
 
-       if (num_online_cpus() > 1) {
-               pr_err("kexec: error: multiple CPUs still online\n");
-               return;
-       }
+       /*
+        * This can only happen if machine_shutdown() failed to disable some
+        * CPU, and that can only happen if the checks in
+        * machine_kexec_prepare() were not correct. If this fails, we can't
+        * reliably kexec anyway, so BUG_ON is appropriate.
+        */
+       BUG_ON(num_online_cpus() > 1);
 
        page_list = image->head & PAGE_MASK;
 
index d9f5cd4e533fef948f68510e3a6fb3b4996b750b..e186ee1e63f6c85261f96844a594080e719c2e07 100644 (file)
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -EINVAL;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        struct pmu *leader_pmu = event->group_leader->pmu;
 
+       if (is_software_event(event))
+               return 1;
+
        if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
                return 1;
 
index 536c85fe72a838aafe3371e0280175b8cda2b452..94f6b05f9e24e8cd1d79063f03a9b2dd16791c67 100644 (file)
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
 {
        return in_gate_area(NULL, addr);
 }
-#define is_gate_vma(vma)       ((vma) = &gate_vma)
+#define is_gate_vma(vma)       ((vma) == &gate_vma)
 #else
 #define is_gate_vma(vma)       0
 #endif
index c2b4f8f0be9a31b20126cc76ba69b14f2d22b585..2dc19349eb19fc23feafa1a2b93db17eb5e6394a 100644 (file)
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
        return -ENOSYS;
 }
 
+int platform_can_cpu_hotplug(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       if (smp_ops.cpu_kill)
+               return 1;
+#endif
+
+       return 0;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 static void percpu_timer_stop(void);
 
index 614e41e7881b538b77bc4c5ab0b5430de7175bea..905efc8cac797d5d30ae0db0ff0e28eba7fa140d 100644 (file)
@@ -121,8 +121,7 @@ config MSM_SMD
        bool
 
 config MSM_GPIOMUX
-       depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
-       bool "MSM V1 TLMM GPIOMUX architecture"
+       bool
        help
          Support for MSM V1 TLMM GPIOMUX architecture.
 
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644 (file)
index 27de2ab..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/kernel.h>
-#include "gpiomux.h"
-#include "proc_comm.h"
-
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
-{
-       unsigned tlmm_config  = (val & ~GPIOMUX_CTL_MASK) |
-                               ((gpio & 0x3ff) << 4);
-       unsigned tlmm_disable = 0;
-       int rc;
-
-       rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
-                          &tlmm_config, &tlmm_disable);
-       if (rc)
-               pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
-                      __func__, rc, tlmm_config, tlmm_disable);
-}
index 8e82f41a89235e4fbef3f7f2b7e0b7c26c84dbfc..4410d7766f93be5995c620c1c36cf2be8ce23877 100644 (file)
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
 int msm_gpiomux_write(unsigned gpio,
                      gpiomux_config_t active,
                      gpiomux_config_t suspended);
-
-/* Architecture-internal function for use by the framework only.
- * This function can assume the following:
- * - the gpio value has passed a bounds-check
- * - the gpiomux spinlock has been obtained
- *
- * This function is not for public consumption.  External users
- * should use msm_gpiomux_write.
- */
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
 #else
 static inline int msm_gpiomux_write(unsigned gpio,
                                    gpiomux_config_t active,
index 393aeefaebb05ebe28f903083501de2461b2eaf3..043e5705f2a60ce3c6ffecf70fad28be592ec214 100644 (file)
@@ -42,7 +42,7 @@
 
 /* Using generic display panel */
 static struct tfp410_platform_data omap4_dvi_panel = {
-       .i2c_bus_num            = 3,
+       .i2c_bus_num            = 2,
        .power_down_gpio        = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
 };
 
index 5cc92874be7e6f22d81767c0f75e141376ab83d6..f99f68e1e85bd3748c5965a0bd27c47d0f6d538e 100644 (file)
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
        struct device_node *node = pdev->dev.of_node;
        const char *oh_name;
        int oh_cnt, i, ret = 0;
+       bool device_active = false;
 
        oh_cnt = of_property_count_strings(node, "ti,hwmods");
        if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
                        goto odbfd_exit1;
                }
                hwmods[i] = oh;
+               if (oh->flags & HWMOD_INIT_NO_IDLE)
+                       device_active = true;
        }
 
        od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
 
        pdev->dev.pm_domain = &omap_device_pm_domain;
 
+       if (device_active) {
+               omap_device_enable(pdev);
+               pm_runtime_set_active(&pdev->dev);
+       }
+
 odbfd_exit1:
        kfree(hwmods);
 odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_device *od = to_omap_device(pdev);
+       int i;
 
        if (!od)
                return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
         * If omap_device state is enabled, but has no driver bound,
         * idle it.
         */
+
+       /*
+        * Some devices (like memory controllers) are always kept
+        * enabled, and should not be idled even with no drivers.
+        */
+       for (i = 0; i < od->hwmods_cnt; i++)
+               if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
+                       return 0;
+
        if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
                if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
                        dev_warn(dev, "%s: enabled but no driver.  Idling\n",
index 7341eff63f56df8f58d5b72d8db9a71feb86112d..7f4db12b1459881ddc2fcbf66d404bab28d89792 100644 (file)
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
 
                np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
                if (np)
-                       va_start = of_iomap(np, 0);
+                       va_start = of_iomap(np, oh->mpu_rt_idx);
        } else {
                va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
        }
index aab33fd814c0329e178cbfe265fdf9ff5ffb766c..e1482a9b3bc22f7cd7de82ac9f66474e1efbccc8 100644 (file)
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
 #define MODULEMODE_HWCTRL              1
 #define MODULEMODE_SWCTRL              2
 
+#define DEBUG_OMAP2UART1_FLAGS 0
+#define DEBUG_OMAP2UART2_FLAGS 0
+#define DEBUG_OMAP2UART3_FLAGS 0
+#define DEBUG_OMAP3UART3_FLAGS 0
+#define DEBUG_OMAP3UART4_FLAGS 0
+#define DEBUG_OMAP4UART3_FLAGS 0
+#define DEBUG_OMAP4UART4_FLAGS 0
+#define DEBUG_TI81XXUART1_FLAGS        0
+#define DEBUG_TI81XXUART2_FLAGS        0
+#define DEBUG_TI81XXUART3_FLAGS        0
+#define DEBUG_AM33XXUART1_FLAGS        0
+
+#define DEBUG_OMAPUART_FLAGS   (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
+
+#if defined(CONFIG_DEBUG_OMAP2UART1)
+#undef DEBUG_OMAP2UART1_FLAGS
+#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART2)
+#undef DEBUG_OMAP2UART2_FLAGS
+#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART3)
+#undef DEBUG_OMAP2UART3_FLAGS
+#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART3)
+#undef DEBUG_OMAP3UART3_FLAGS
+#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART4)
+#undef DEBUG_OMAP3UART4_FLAGS
+#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART3)
+#undef DEBUG_OMAP4UART3_FLAGS
+#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART4)
+#undef DEBUG_OMAP4UART4_FLAGS
+#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART1)
+#undef DEBUG_TI81XXUART1_FLAGS
+#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART2)
+#undef DEBUG_TI81XXUART2_FLAGS
+#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART3)
+#undef DEBUG_TI81XXUART3_FLAGS
+#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_AM33XXUART1)
+#undef DEBUG_AM33XXUART1_FLAGS
+#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#endif
 
 /**
  * struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
  * @voltdm: pointer to voltage domain (filled in at runtime)
  * @dev_attr: arbitrary device attributes that can be passed to the driver
  * @_sysc_cache: internal-use hwmod flags
+ * @mpu_rt_idx: index of device address space for register target (for DT boot)
  * @_mpu_rt_va: cached register target start address (internal use)
  * @_mpu_port: cached MPU register target slave (internal use)
  * @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
        struct list_head                node;
        struct omap_hwmod_ocp_if        *_mpu_port;
        u16                             flags;
+       u8                              mpu_rt_idx;
        u8                              response_lat;
        u8                              rst_lines_cnt;
        u8                              opt_clks_cnt;
index d05fc7b54567f0dbb6597a1d641ce130caf7ee9b..56cebb05509e15c0054f342e158a21214b7a6603 100644 (file)
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
index 28bbd56346a93ab67fc377eb1ea066a8be8280f7..eb2f3b93b51c9a4d7ce32eed8f23d63cb3083d33 100644 (file)
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
        .clkdm_name     = "cpsw_125mhz_clkdm",
        .flags          = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
        .main_clk       = "cpsw_125mhz_gclk",
+       .mpu_rt_idx     = 1,
        .prcm           = {
                .omap4  = {
                        .clkctrl_offs   = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &uart_class,
        .clkdm_name     = "l4_wkup_clkdm",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .main_clk       = "dpll_per_m2_div4_wkupdm_ck",
        .prcm           = {
                .omap4  = {
index f7a3df2fb579613a761f9a484dbb0254cadcda9c..0c3a427da5445a5d2f990e05f2f3fcec50634143 100644 (file)
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
+                               HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
        .mpu_irqs       = uart4_mpu_irqs,
        .sdma_reqs      = uart4_sdma_reqs,
        .main_clk       = "uart4_fck",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
index d04b5e60fdbec74c6accf27c349a79f07a8c01fb..9c3b504477d7b341b4492ec350bb3404769fa219 100644 (file)
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
-       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
-                               HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .main_clk       = "func_48m_fclk",
        .prcm = {
                .omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
        .main_clk       = "func_48m_fclk",
        .prcm = {
                .omap4 = {
index f37ae96b70a1a9e2c67085b2a0a466992358a4e6..3c70f5c1860fc2b96401fe6ad1256c1a975539e0 100644 (file)
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &omap54xx_uart_hwmod_class,
        .clkdm_name     = "l4per_clkdm",
-       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+       .flags          = DEBUG_OMAP4UART3_FLAGS,
        .main_clk       = "func_48m_fclk",
        .prcm = {
                .omap4 = {
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &omap54xx_uart_hwmod_class,
        .clkdm_name     = "l4per_clkdm",
+       .flags          = DEBUG_OMAP4UART4_FLAGS,
        .main_clk       = "func_48m_fclk",
        .prcm = {
                .omap4 = {
index 3a674de6cb632d21dade993c9e0cb3ca2c7babd8..a388f8c1bcb3cb2c69a199613ee3710eeb333c7b 100644 (file)
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
                                pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
                                        uart_name, uart->num);
                        }
-
-                       /*
-                        * omap-uart can be used for earlyprintk logs
-                        * So if omap-uart is used as console then prevent
-                        * uart reset and idle to get logs from omap-uart
-                        * until uart console driver is available to take
-                        * care for console messages.
-                        * Idling or resetting omap-uart while printing logs
-                        * early boot logs can stall the boot-up.
-                        */
-                       oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
                }
        } while (1);
 
index e115f67421078cd7390cc2cc92a6b0ae43c4b36c..c5be60d85e4b95fa1339f64492d28097ac3763dc 100644 (file)
@@ -1162,9 +1162,6 @@ static void __init eva_init(void)
        gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
        gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
 
-       /* Touchscreen */
-       gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
-
        /* GETHER */
        gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
 
index d5554646916c34a962a9e1790653fc22d8beefd0..3354a85c90f7091c776ab248df641d9dddb3990d 100644 (file)
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
                                  "usb1", "usb1"),
        /* SDHI0 */
        PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
-                                 "sdhi0", "sdhi0"),
+                                 "sdhi0_data4", "sdhi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+                                 "sdhi0_ctrl", "sdhi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+                                 "sdhi0_cd", "sdhi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+                                 "sdhi0_wp", "sdhi0"),
 };
 
 #define FPGA   0x18200000
index d73e21d3ea8ad9cf036483262a2010334ec0e062..8d6bd5c5efb9cd092fe4dd857057a1e6f59b9420 100644 (file)
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
 #define GPIO_KEY(c, g, d, ...) \
        { .code = c, .gpio = g, .desc = d, .active_low = 1 }
 
-static __initdata struct gpio_keys_button gpio_buttons[] = {
+static struct gpio_keys_button gpio_buttons[] = {
        GPIO_KEY(KEY_4,         RCAR_GP_PIN(1, 28),     "SW2-pin4"),
        GPIO_KEY(KEY_3,         RCAR_GP_PIN(1, 26),     "SW2-pin3"),
        GPIO_KEY(KEY_2,         RCAR_GP_PIN(1, 24),     "SW2-pin2"),
index 78ebc7559f5309e025c3796ff90017293570bc6e..4c09bae86edf02cb4f9408821fea25cd84d9dddb 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-       __INIT
-
 /*
  * ST specific entry point for secondary CPUs.  This provides
  * a "holding pen" into which all secondary cores are held until we're
index 46b3beb4b773c85cd381f72301b8f2e9dfdc7a97..717031a762c27966aabc7786f6d2a900034b0b08 100644 (file)
@@ -35,6 +35,7 @@ struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            fullmm;
        struct vm_area_struct   *vma;
+       unsigned long           start, end;
        unsigned long           range_start;
        unsigned long           range_end;
        unsigned int            nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = fullmm;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->vma = NULL;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
index f9143196345271698a674b360769cf05c8173de4..7de083d19b7ee0b5cf9f63a14f953b7311a1564e 100644 (file)
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
 static struct platform_device rmt_ts_device = {
        .name   = "ucb1400_ts",
        .id     = -1,
-       }
 };
 #endif
 
index 33a97929d055ca9d1131ffce9c517b3a4597bb38..77d442ab28c8625860c1787df12ac9dd74d7f004 100644 (file)
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
 endmenu
 
 source "init/Kconfig"
+source "kernel/Kconfig.freezer"
 source "drivers/Kconfig"
 source "fs/Kconfig"
 
index ef3a9de01954511a352fa5b24285136789425e21..bc5efc7c3f3f8ead3608780ba5e2f5b9e212e20c 100644 (file)
@@ -22,7 +22,7 @@
  * unmapping a portion of the virtual address space, these hooks are called according to
  * the following template:
  *
- *     tlb <- tlb_gather_mmu(mm, full_mm_flush);       // start unmap for address space MM
+ *     tlb <- tlb_gather_mmu(mm, start, end);          // start unmap for address space MM
  *     {
  *       for each vma that needs a shootdown do {
  *         tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
        unsigned int            max;
        unsigned char           fullmm;         /* non-zero means full mm flush */
        unsigned char           need_flush;     /* really unmapped some PTEs? */
+       unsigned long           start, end;
        unsigned long           start_addr;
        unsigned long           end_addr;
        struct page             **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
        tlb->nr = 0;
-       tlb->fullmm = full_mm_flush;
+       tlb->fullmm = !(start | (end+1));
+       tlb->start = start;
+       tlb->end = end;
        tlb->start_addr = ~0UL;
 }
 
index 2291a7d69d49a27c541a4746492d7be01ba17727..fa277aecfb78f1256dae50e30b0d873afbb72949 100644 (file)
 #include <asm/machdep.h>
 #include <asm/natfeat.h>
 
+extern long nf_get_id2(const char *feature_name);
+
 asm("\n"
-"      .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+"      .global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
 "      .short  0x7300\n"
 "      rts\n"
 "nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
 "1:    moveq.l #0,%d0\n"
 "      rts\n"
 "      .section __ex_table,\"a\"\n"
-"      .long   nf_get_id,1b\n"
+"      .long   nf_get_id2,1b\n"
 "      .long   nf_call,1b\n"
 "      .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
 EXPORT_SYMBOL_GPL(nf_call);
 
+long nf_get_id(const char *feature_name)
+{
+       /* feature_name may be in vmalloc()ed memory, so make a copy */
+       char name_copy[32];
+       size_t n;
+
+       n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+       if (n >= sizeof(name_copy))
+               return 0;
+
+       return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
 void nfprint(const char *fmt, ...)
 {
        static char buf[256];
index 444ea8a09e9f3386434e89d502c41d1f4107302e..ef881cfbbca90987bf0e93c9271537317c15886c 100644 (file)
                unsigned long long n64;                         \
        } __n;                                                  \
        unsigned long __rem, __upper;                           \
+       unsigned long __base = (base);                          \
                                                                \
        __n.n64 = (n);                                          \
        if ((__upper = __n.n32[0])) {                           \
                asm ("divul.l %2,%1:%0"                         \
-                       : "=d" (__n.n32[0]), "=d" (__upper)     \
-                       : "d" (base), "0" (__n.n32[0]));        \
+                    : "=d" (__n.n32[0]), "=d" (__upper)        \
+                    : "d" (__base), "0" (__n.n32[0]));         \
        }                                                       \
        asm ("divu.l %2,%1:%0"                                  \
-               : "=d" (__n.n32[1]), "=d" (__rem)               \
-               : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+            : "=d" (__n.n32[1]), "=d" (__rem)                  \
+            : "d" (__base), "1" (__upper), "0" (__n.n32[1]));  \
        (n) = __n.n64;                                          \
        __rem;                                                  \
 })
index d22a4ecffff422f542e96f81527b45a4e3f9c6c2..4fab52294d9874b5b07558682ec5160a470c1092 100644 (file)
@@ -28,7 +28,7 @@ config MICROBLAZE
        select GENERIC_CLOCKEVENTS
        select GENERIC_IDLE_POLL_SETUP
        select MODULES_USE_ELF_RELA
-       select CLONE_BACKWARDS
+       select CLONE_BACKWARDS3
 
 config SWAP
        def_bool n
index 1dc086087a723fd9c90c427d05f4eb3c8ca919f6..fa44f3ec530214f5014664cda4611b1a5a027251 100644 (file)
@@ -17,6 +17,8 @@
 #define current_cpu_type()     current_cpu_data.cputype
 #endif
 
+#define boot_cpu_type()                cpu_data[0].cputype
+
 /*
  * SMP assumption: Options of CPU 0 are a superset of all processors.
  * This is true for all known MIPS systems.
index 159abc8842d214b263e93bd94d9558e997402541..126da74d4c5559faf40962b80bddef9a39721757 100644 (file)
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
        int i, cpu = 1, boot_cpu = 0;
 
 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+       int cpu_hw_intr;
+
        /* arbitration priority */
        clear_c0_brcm_cmt_ctrl(0x30);
 
@@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void)
         * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
         * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
         */
-       change_c0_brcm_cmt_intr(0xf8018000,
-                                       (0x02 << 27) | (0x03 << 15));
+       if (boot_cpu == 0)
+               cpu_hw_intr = 0x02;
+       else
+               cpu_hw_intr = 0x1d;
+
+       change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
 
        /* single core, 2 threads (2 pipelines) */
        max_cpus = 2;
index e4b1140cdae060dca0de8bfdd6a5985b1429de58..3a2b6e9f25cfb95ab0f2f5ee1eeccf069bde8f22 100644 (file)
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
                        reg.control[i] |= M_PERFCTL_USER;
                if (ctr[i].exl)
                        reg.control[i] |= M_PERFCTL_EXL;
-               if (current_cpu_type() == CPU_XLR)
+               if (boot_cpu_type() == CPU_XLR)
                        reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
                reg.counter[i] = 0x80000000 - ctr[i].count;
        }
index d22dc0d6f28922e0f6beefbc2cabad28da0d70e1..2b7e837dc2e2c275e698e101eb1fd961bc44d7ee 100644 (file)
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
                .end   = PNX8335_IP3902_PORTS_END,
                .flags = IORESOURCE_MEM,
        },
+#ifdef CONFIG_SOC_PNX8335
        [1] = {
                .start = PNX8335_PIC_ETHERNET_INT,
                .end   = PNX8335_PIC_ETHERNET_INT,
                .flags = IORESOURCE_IRQ,
        },
+#endif
 };
 
 static struct platform_device pnx833x_ethernet_device = {
index 99dbab1c59ac2ef78d4f4566da9c47b3e3a6350f..d60bf98fa5cf399430d5beaaed9e3cd9c7188e67 100644 (file)
@@ -55,6 +55,7 @@ config GENERIC_CSUM
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
index 3bf72cd2c8fcbf0f8feebe915d4dc2aca3f44f28..dbd9d3c991e86ca61e16fff87047bd6956b21420 100644 (file)
@@ -566,7 +566,7 @@ config SCHED_SMT
 config PPC_DENORMALISATION
        bool "PowerPC denormalisation exception handling"
        depends on PPC_BOOK3S_64
-       default "n"
+       default "y" if PPC_POWERNV
        ---help---
          Add support for handling denormalisation of single precision
          values.  Useful for bare metal only.  If unsure say Y here.
index 47a35b08b9635ce375e266a6c12fd3b02869302b..e378cccfca55bb20db094f12a2009ad3ba7bf1a4 100644 (file)
@@ -247,6 +247,10 @@ struct thread_struct {
        unsigned long   tm_orig_msr;    /* Thread's MSR on ctx switch */
        struct pt_regs  ckpt_regs;      /* Checkpointed registers */
 
+       unsigned long   tm_tar;
+       unsigned long   tm_ppr;
+       unsigned long   tm_dscr;
+
        /*
         * Transactional FP and VSX 0-31 register set.
         * NOTE: the sense of these is the opposite of the integer ckpt_regs!
index a6840e4e24f7abe68d918d6dc576bcab18d0183d..99222e27f1739c543faa916d3d766be5f99e5fbd 100644 (file)
 #define SPRN_HRMOR     0x139   /* Real mode offset register */
 #define SPRN_HSRR0     0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG    8       /* Enable Target Address Register */
+#define FSCR_EBB_LG    7       /* Enable Event Based Branching */
+#define FSCR_TM_LG     5       /* Enable Transactional Memory */
+#define FSCR_PM_LG     4       /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG   3       /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG   2       /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1       /* Enable VMX/VSX  */
+#define FSCR_FP_LG     0       /* Enable Floating Point */
 #define SPRN_FSCR      0x099   /* Facility Status & Control Register */
-#define   FSCR_TAR     (1 << (63-55)) /* Enable Target Address Register */
-#define   FSCR_EBB     (1 << (63-56)) /* Enable Event Based Branching */
-#define   FSCR_DSCR    (1 << (63-61)) /* Enable Data Stream Control Register */
+#define   FSCR_TAR     __MASK(FSCR_TAR_LG)
+#define   FSCR_EBB     __MASK(FSCR_EBB_LG)
+#define   FSCR_DSCR    __MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR     0xbe    /* HV=1 Facility Status & Control Register */
-#define   HFSCR_TAR    (1 << (63-55)) /* Enable Target Address Register */
-#define   HFSCR_EBB    (1 << (63-56)) /* Enable Event Based Branching */
-#define   HFSCR_TM     (1 << (63-58)) /* Enable Transactional Memory */
-#define   HFSCR_PM     (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define   HFSCR_BHRB   (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define   HFSCR_DSCR   (1 << (63-61)) /* Enable Data Stream Control Register */
-#define   HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX  */
-#define   HFSCR_FP     (1 << (63-63)) /* Enable Floating Point */
+#define   HFSCR_TAR    __MASK(FSCR_TAR_LG)
+#define   HFSCR_EBB    __MASK(FSCR_EBB_LG)
+#define   HFSCR_TM     __MASK(FSCR_TM_LG)
+#define   HFSCR_PM     __MASK(FSCR_PM_LG)
+#define   HFSCR_BHRB   __MASK(FSCR_BHRB_LG)
+#define   HFSCR_DSCR   __MASK(FSCR_DSCR_LG)
+#define   HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define   HFSCR_FP     __MASK(FSCR_FP_LG)
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0    (1ul << (63-0))
index 49a13e0ef2344fb83c6dd89ed4ab8fd179d34c01..294c2cedcf7a622b478b748a1c5c3ddb8f537674 100644 (file)
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
 
 extern void giveup_fpu(struct task_struct *);
 extern void load_up_fpu(void);
index c7e8afc2ead0cde5d75b68244990092a0dea5bc3..8207459efe5619e4b9e2d824221104d111507e67 100644 (file)
@@ -138,6 +138,9 @@ int main(void)
        DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
        DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
        DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+       DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+       DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+       DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
        DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
        DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
                                         transact_vr[0]));
index ea9414c8088d01289411eded7d67315c9a2e8661..55593ee2d5aacfee6ef9e86c5adfd4057c5cab10 100644 (file)
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
 
 static int __init eeh_init_proc(void)
 {
-       if (machine_is(pseries))
+       if (machine_is(pseries) || machine_is(powernv))
                proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
        return 0;
 }
index ab15b8d057ad361f609b06277eb0a993574098cd..2bd0b885b0fe901328d9ae44f7703cca0a159676 100644 (file)
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 
 #ifdef CONFIG_PPC_BOOK3S_64
 BEGIN_FTR_SECTION
-       /*
-        * Back up the TAR across context switches.  Note that the TAR is not
-        * available for use in the kernel.  (To provide this, the TAR should
-        * be backed up/restored on exception entry/exit instead, and be in
-        * pt_regs.  FIXME, this should be in pt_regs anyway (for debug).)
-        */
-       mfspr   r0,SPRN_TAR
-       std     r0,THREAD_TAR(r3)
-
        /* Event based branch registers */
        mfspr   r0, SPRN_BESCR
        std     r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
        ld      r7,DSCR_DEFAULT@toc(2)
        ld      r0,THREAD_DSCR(r4)
        cmpwi   r6,0
+       li      r8, FSCR_DSCR
        bne     1f
        ld      r0,0(r7)
-1:     cmpd    r0,r25
+       b       3f
+1:
+  BEGIN_FTR_SECTION_NESTED(70)
+       mfspr   r6, SPRN_FSCR
+       or      r6, r6, r8
+       mtspr   SPRN_FSCR, r6
+    BEGIN_FTR_SECTION_NESTED(69)
+       mfspr   r6, SPRN_HFSCR
+       or      r6, r6, r8
+       mtspr   SPRN_HFSCR, r6
+    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+       b       4f
+  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+  BEGIN_FTR_SECTION_NESTED(70)
+       mfspr   r6, SPRN_FSCR
+       andc    r6, r6, r8
+       mtspr   SPRN_FSCR, r6
+    BEGIN_FTR_SECTION_NESTED(69)
+       mfspr   r6, SPRN_HFSCR
+       andc    r6, r6, r8
+       mtspr   SPRN_HFSCR, r6
+    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4:     cmpd    r0,r25
        beq     2f
        mtspr   SPRN_DSCR,r0
 2:
index 4e00d223b2e30924dba5e82370d5bae09e7b56dc..902ca3c6b4b6496d8e606623187c9f1da1500f2a 100644 (file)
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
        . = 0x4f80
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       facility_unavailable_relon_hv
+       b       hv_facility_unavailable_relon_hv
 
        STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
 #ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        b       .ret_from_except
 
        STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+       STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
 
        .align  7
        .globl  __end_handlers
@@ -1188,7 +1189,7 @@ __end_handlers:
        STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
        STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
        STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
-       STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
+       STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 /*
index c517dbe705fdd5fd5944ae693c04aaaf52510e85..8083be20fe5ec2c6608ab979f988cc0d31eb6862 100644 (file)
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
        struct ppc64_tlb_batch *batch;
 #endif
 
+       /* Back up the TAR across context switches.
+        * Note that the TAR is not available for use in the kernel.  (To
+        * provide this, the TAR should be backed up/restored on exception
+        * entry/exit instead, and be in pt_regs.  FIXME, this should be in
+        * pt_regs anyway (for debug).)
+        * Save the TAR here before we do treclaim/trecheckpoint as these
+        * will change the TAR.
+        */
+       save_tar(&prev->thread);
+
        __switch_to_tm(prev);
 
 #ifdef CONFIG_SMP
index 51be8fb2480313610b5971b6ef70a89f58f794a2..0554d1f6d70dae8eeb964806ccd869b9e2a5e89f 100644 (file)
@@ -233,6 +233,16 @@ dont_backup_fp:
        std     r5, _CCR(r7)
        std     r6, _XER(r7)
 
+
+       /* ******************** TAR, PPR, DSCR ********** */
+       mfspr   r3, SPRN_TAR
+       mfspr   r4, SPRN_PPR
+       mfspr   r5, SPRN_DSCR
+
+       std     r3, THREAD_TM_TAR(r12)
+       std     r4, THREAD_TM_PPR(r12)
+       std     r5, THREAD_TM_DSCR(r12)
+
        /* MSR and flags:  We don't change CRs, and we don't need to alter
         * MSR.
         */
@@ -347,6 +357,16 @@ dont_restore_fp:
        mtmsr   r6                              /* FP/Vec off again! */
 
 restore_gprs:
+
+       /* ******************** TAR, PPR, DSCR ********** */
+       ld      r4, THREAD_TM_TAR(r3)
+       ld      r5, THREAD_TM_PPR(r3)
+       ld      r6, THREAD_TM_DSCR(r3)
+
+       mtspr   SPRN_TAR,       r4
+       mtspr   SPRN_PPR,       r5
+       mtspr   SPRN_DSCR,      r6
+
        /* ******************** CR,LR,CCR,MSR ********** */
        ld      r3, _CTR(r7)
        ld      r4, _LINK(r7)
index bf33c22e38a40848221762497ca1a7b704a27f9f..e435bc089ea3ccccb98d0acbc490071e9d75bb18 100644 (file)
@@ -44,9 +44,7 @@
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include <asm/pmc.h>
-#ifdef CONFIG_PPC32
 #include <asm/reg.h>
-#endif
 #ifdef CONFIG_PMAC_BACKLIGHT
 #include <asm/backlight.h>
 #endif
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
 }
 
+#ifdef CONFIG_PPC64
 void facility_unavailable_exception(struct pt_regs *regs)
 {
        static char *facility_strings[] = {
-               "FPU",
-               "VMX/VSX",
-               "DSCR",
-               "PMU SPRs",
-               "BHRB",
-               "TM",
-               "AT",
-               "EBB",
-               "TAR",
+               [FSCR_FP_LG] = "FPU",
+               [FSCR_VECVSX_LG] = "VMX/VSX",
+               [FSCR_DSCR_LG] = "DSCR",
+               [FSCR_PM_LG] = "PMU SPRs",
+               [FSCR_BHRB_LG] = "BHRB",
+               [FSCR_TM_LG] = "TM",
+               [FSCR_EBB_LG] = "EBB",
+               [FSCR_TAR_LG] = "TAR",
        };
-       char *facility, *prefix;
+       char *facility = "unknown";
        u64 value;
+       u8 status;
+       bool hv;
 
-       if (regs->trap == 0xf60) {
-               value = mfspr(SPRN_FSCR);
-               prefix = "";
-       } else {
+       hv = (regs->trap == 0xf80);
+       if (hv)
                value = mfspr(SPRN_HFSCR);
-               prefix = "Hypervisor ";
+       else
+               value = mfspr(SPRN_FSCR);
+
+       status = value >> 56;
+       if (status == FSCR_DSCR_LG) {
+               /* User is acessing the DSCR.  Set the inherit bit and allow
+                * the user to set it directly in future by setting via the
+                * H/FSCR DSCR bit.
+                */
+               current->thread.dscr_inherit = 1;
+               if (hv)
+                       mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+               else
+                       mtspr(SPRN_FSCR,  value | FSCR_DSCR);
+               return;
        }
 
-       value = value >> 56;
+       if ((status < ARRAY_SIZE(facility_strings)) &&
+           facility_strings[status])
+               facility = facility_strings[status];
 
        /* We restore the interrupt state now */
        if (!arch_irq_disabled_regs(regs))
                local_irq_enable();
 
-       if (value < ARRAY_SIZE(facility_strings))
-               facility = facility_strings[value];
-       else
-               facility = "unknown";
-
        pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
-               prefix, facility, regs->nip, regs->msr);
+              hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
 
        if (user_mode(regs)) {
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
 
        die("Unexpected facility unavailable exception", regs, SIGABRT);
 }
+#endif
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 
index 2efa9dde741a1aad3ed206481e9c272a94eb7cdf..7629cd3eb91ad69e9460204a7f7c2e8d081a1084 100644 (file)
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
                rma_size <<= PAGE_SHIFT;
                rmls = lpcr_rmls(rma_size);
                err = -EINVAL;
-               if (rmls < 0) {
+               if ((long)rmls < 0) {
                        pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
                        goto out_srcu;
                }
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
        /* Allocate the guest's logical partition ID */
 
        lpid = kvmppc_alloc_lpid();
-       if (lpid < 0)
+       if ((long)lpid < 0)
                return -ENOMEM;
        kvm->arch.lpid = lpid;
 
index 19498a567a81f25c67c7f1b3f010182fcb680e2b..c6e13d9a9e15b0be5f2b75a1151c7eea4b93b8e5 100644 (file)
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_shadow_vcpu;
 
+       err = -ENOMEM;
        p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
-       /* the real shared page fills the last 4k of our page */
-       vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
        if (!p)
                goto uninit_vcpu;
+       /* the real shared page fills the last 4k of our page */
+       vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
 
 #ifdef CONFIG_PPC_BOOK3S_64
        /* default to book3s_64 (970fx) */
index 9f8671a44551481b25a8c5b1404dd152c9f6df02..6a5f2b1f32ca167d3ec6b39b0dfd69067d6b37d2 100644 (file)
@@ -569,35 +569,6 @@ error:
        return ret;
 }
 
-static int unzip_oops(char *oops_buf, char *big_buf)
-{
-       struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
-       u64 timestamp = oops_hdr->timestamp;
-       char *big_oops_data = NULL;
-       char *oops_data_buf = NULL;
-       size_t big_oops_data_sz;
-       int unzipped_len;
-
-       big_oops_data = big_buf + sizeof(struct oops_log_info);
-       big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
-       oops_data_buf = oops_buf + sizeof(struct oops_log_info);
-
-       unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
-                                       oops_hdr->report_length,
-                                       big_oops_data_sz);
-
-       if (unzipped_len < 0) {
-               pr_err("nvram: decompression failed; returned %d\n",
-                                                               unzipped_len);
-               return -1;
-       }
-       oops_hdr = (struct oops_log_info *)big_buf;
-       oops_hdr->version = OOPS_HDR_VERSION;
-       oops_hdr->report_length = (u16) unzipped_len;
-       oops_hdr->timestamp = timestamp;
-       return 0;
-}
-
 static int nvram_pstore_open(struct pstore_info *psi)
 {
        /* Reset the iterator to start reading partitions again */
@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
        unsigned int err_type, id_no, size = 0;
        struct nvram_os_partition *part = NULL;
        char *buff = NULL, *big_buff = NULL;
-       int rc, sig = 0;
+       int sig = 0;
        loff_t p;
 
-read_partition:
        read_type++;
 
        switch (nvram_type_ids[read_type]) {
@@ -749,30 +719,46 @@ read_partition:
                *id = id_no;
 
        if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+               int length, unzipped_len;
+               size_t hdr_size;
+
                oops_hdr = (struct oops_log_info *)buff;
-               *buf = buff + sizeof(*oops_hdr);
+               if (oops_hdr->version < OOPS_HDR_VERSION) {
+                       /* Old format oops header had 2-byte record size */
+                       hdr_size = sizeof(u16);
+                       length = oops_hdr->version;
+                       time->tv_sec = 0;
+                       time->tv_nsec = 0;
+               } else {
+                       hdr_size = sizeof(*oops_hdr);
+                       length = oops_hdr->report_length;
+                       time->tv_sec = oops_hdr->timestamp;
+                       time->tv_nsec = 0;
+               }
+               *buf = kmalloc(length, GFP_KERNEL);
+               if (*buf == NULL)
+                       return -ENOMEM;
+               memcpy(*buf, buff + hdr_size, length);
+               kfree(buff);
 
                if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
                        big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
                        if (!big_buff)
                                return -ENOMEM;
 
-                       rc = unzip_oops(buff, big_buff);
+                       unzipped_len = nvram_decompress(*buf, big_buff,
+                                               length, big_oops_buf_sz);
 
-                       if (rc != 0) {
-                               kfree(buff);
+                       if (unzipped_len < 0) {
+                               pr_err("nvram: decompression failed, returned "
+                                       "rc %d\n", unzipped_len);
                                kfree(big_buff);
-                               goto read_partition;
+                       } else {
+                               *buf = big_buff;
+                               length = unzipped_len;
                        }
-
-                       oops_hdr = (struct oops_log_info *)big_buff;
-                       *buf = big_buff + sizeof(*oops_hdr);
-                       kfree(buff);
                }
-
-               time->tv_sec = oops_hdr->timestamp;
-               time->tv_nsec = 0;
-               return oops_hdr->report_length;
+               return length;
        }
 
        *buf = buff;
@@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
 static void __init nvram_init_oops_partition(int rtas_partition_exists)
 {
        int rc;
+       size_t size;
 
        rc = pseries_nvram_init_os_partition(&oops_log_partition);
        if (rc != 0) {
@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
        big_oops_buf_sz = (oops_data_sz * 100) / 45;
        big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
        if (big_oops_buf) {
-               stream.workspace = kmalloc(zlib_deflate_workspacesize(
-                               WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+               size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+                       zlib_inflate_workspacesize());
+               stream.workspace = kmalloc(size, GFP_KERNEL);
                if (!stream.workspace) {
                        pr_err("nvram: No memory for compression workspace; "
                                "skipping compression of %s partition data\n",
index 22f75b504f7f9bb8ef91ef80e8a1433995e6c6dc..8a4cae78f03c91e510f307d25e8f5467211725c3 100644 (file)
@@ -118,6 +118,7 @@ config S390
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZ4
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_XZ
@@ -227,11 +228,12 @@ config MARCH_Z196
          not work on older machines.
 
 config MARCH_ZEC12
-       bool "IBM zEC12"
+       bool "IBM zBC12 and zEC12"
        select HAVE_MARCH_ZEC12_FEATURES if 64BIT
        help
-         Select this to enable optimizations for IBM zEC12 (2827 series). The
-         kernel will be slightly faster but will not work on older machines.
+         Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+         2827 series). The kernel will be slightly faster but will not work on
+         older machines.
 
 endchoice
 
@@ -709,6 +711,7 @@ config S390_GUEST
        def_bool y
        prompt "s390 support for virtio devices"
        depends on 64BIT
+       select TTY
        select VIRTUALIZATION
        select VIRTIO
        select VIRTIO_CONSOLE
index 3ad8f61c99852c3fe7183a15aacfcd96ace3535a..866ecbe670e499632ef59d514dd04a280f46b890 100644 (file)
@@ -6,9 +6,9 @@
 
 BITS := $(if $(CONFIG_64BIT),64,31)
 
-targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
-          vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
-          sizes.h head$(BITS).o
+targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head$(BITS).o
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
 
 suffix-$(CONFIG_KERNEL_GZIP)  := gz
 suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4)  := lz4
 suffix-$(CONFIG_KERNEL_LZMA)  := lzma
 suffix-$(CONFIG_KERNEL_LZO)  := lzo
 suffix-$(CONFIG_KERNEL_XZ)  := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
        $(call if_changed,gzip)
 $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
        $(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+       $(call if_changed,lz4)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
        $(call if_changed,lzma)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
index c4c6a1cf221bde749673b59dccb6e3c6113a0b4c..57cbaff1f39778b98b1de870e34f99cc14043687 100644 (file)
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
 #include "../../../../lib/decompress_bunzip2.c"
 #endif
 
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZMA
 #include "../../../../lib/decompress_unlzma.c"
 #endif
index 4d8604e311f3aaf6b1799bd78760bcc52a5befab..7d46767587337c3874bd61c43e01a0a72f965303 100644 (file)
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
        size -= offset;
        p = addr + offset / BITS_PER_LONG;
        if (bit) {
-               set = __flo_word(0, *p & (~0UL << bit));
+               set = __flo_word(0, *p & (~0UL >> bit));
                if (set >= size)
                        return size + offset;
                if (set < BITS_PER_LONG)
index b75d7d686684975278e1881a9aaf74590a6949ed..6d6d92b4ea113fbc692cf4dda3cac36d803128fc 100644 (file)
@@ -32,6 +32,7 @@ struct mmu_gather {
        struct mm_struct *mm;
        struct mmu_table_batch *batch;
        unsigned int fullmm;
+       unsigned long start, end;
 };
 
 struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
                                  struct mm_struct *mm,
-                                 unsigned int full_mm_flush)
+                                 unsigned long start,
+                                 unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
        tlb->batch = NULL;
        if (tlb->fullmm)
                __tlb_flush_mm(mm);
index a6fc037671b11d0a2e96226256f9e36cd8e4d65d..500aa1029bcb2d2ed12ae352a91cec068322615b 100644 (file)
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
 
 static bool is_in_guest(struct pt_regs *regs)
 {
-       unsigned long ip = instruction_pointer(regs);
-
        if (user_mode(regs))
                return false;
-
-       return ip == (unsigned long) &sie_exit;
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+       return instruction_pointer(regs) == (unsigned long) &sie_exit;
+#else
+       return false;
+#endif
 }
 
 static unsigned long guest_is_user_mode(struct pt_regs *regs)
index 497451ec5e267f3be025b9b719cf4f2278262d97..aeed8a61fa0d4f1b4862a98cab44e29beda63699 100644 (file)
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
                strcpy(elf_platform, "z196");
                break;
        case 0x2827:
+       case 0x2828:
                strcpy(elf_platform, "zEC12");
                break;
        }
index ba694d2ba51e36a2933ad722db76b00e09735f37..34c1c9a90be288d9080d93373660d020bedf2a90 100644 (file)
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                return rc;
 
        vcpu->arch.sie_block->icptcode = 0;
-       preempt_disable();
-       kvm_guest_enter();
-       preempt_enable();
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
        trace_kvm_s390_sie_enter(vcpu,
                                 atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+       /*
+        * As PF_VCPU will be used in fault handler, between guest_enter
+        * and guest_exit should be no uaccess.
+        */
+       preempt_disable();
+       kvm_guest_enter();
+       preempt_enable();
        rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+       kvm_guest_exit();
+
+       VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+                  vcpu->arch.sie_block->icptcode);
+       trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
        if (rc > 0)
                rc = 0;
        if (rc < 0) {
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                        rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
                }
        }
-       VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
-                  vcpu->arch.sie_block->icptcode);
-       trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
-       kvm_guest_exit();
 
        memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
        return rc;
index 0da3e6eb6be6cec4d55780b838492e52d093f08d..4cdc54e63ebcb366786347de18437f327ee8e2b2 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/compat.h>
 #include <asm/asm-offsets.h>
+#include <asm/facility.h>
 #include <asm/current.h>
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
        /* Only provide non-quiescing support if the host supports it */
-       if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
-           S390_lowcore.stfl_fac_list & 0x00020000)
+       if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
        /* No support for conditional-SSKE */
index ce36ea80e4f9157f5c754b51c1ff9c700106d689..ad446b0c55b6076d5aaf7f095f1ddc91b5e38bf4 100644 (file)
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
                order = 2;
                break;
        case 0x2827:    /* zEC12 */
+       case 0x2828:    /* zEC12 */
        default:
                order = 5;
                break;
index ffeb17ce7f313d914a38de3ee21dad5867991b9d..930783d2c99beb305b2346a220f414d98a4ba406 100644 (file)
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
                switch (id.machine) {
                case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
                case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
-               case 0x2827:              ops->cpu_type = "s390/zEC12"; break;
+               case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
                default: return -ENODEV;
                }
        }
index c8def8bc90209578a5a6d5cfaf8f9f258c4ec7fb..5fc237581caf3a6f3b40c41246f4d39d28f8ee55 100644 (file)
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 config MMU
        def_bool y
 
index e61d43d9f689c0ef61bc0e1e5e43b3826b66a760..362192ed12fef1789d2c23d21c654f19632006a0 100644 (file)
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
 
        init_tlb_gather(tlb);
 }
index 4febacd1a8a1a3dee2c24e3b3447081e7ca94ebb..29b0301c18aab26f2a613d397da9856a8d1eec3b 100644 (file)
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush;
+       tlb->start = start;
+       tlb->end = end;
+       tlb->fullmm = !(start | (end+1));
 
        init_tlb_gather(tlb);
 }
index d606463aa6d63bfdab5641e6bd7e6b68545e32d9..b7388a425f0994ba87a30a27fe8ba57d7e3e5457 100644 (file)
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
        unsigned long nr_pages;
 
        nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-       efi_call_phys2(sys_table->boottime->free_pages, addr, size);
+       efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
 }
 
 static void find_bits(unsigned long mask, u8 *pos, u8 *size)
index 653668d140f994e543ad52e46d0c8402d5fe9259..4a8cb8d7cbd5d2b0febd4333931b459e75f1ea1d 100644 (file)
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
-               memset(&boot_params->olpc_ofw_header, 0,
+               memset(&boot_params->ext_ramdisk_image, 0,
                       (char *)&boot_params->efi_info -
-                       (char *)&boot_params->olpc_ofw_header);
+                       (char *)&boot_params->ext_ramdisk_image);
                memset(&boot_params->kbd_status, 0,
                       (char *)&boot_params->hdr -
                       (char *)&boot_params->kbd_status);
index 50e5c58ced23b2ec8537569a71ae4ac41566281f..4c019179a57dd97d6b48ae064ef1faea0dc2e7f7 100644 (file)
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
 
 extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
 extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
 
 #ifdef CONFIG_MICROCODE_AMD_EARLY
 #ifdef CONFIG_X86_32
index f2b489cf1602349092d26d6ea68109cca3589c4d..3bf2dd0cf61fec9615253fe3b70d3c3764a35e93 100644 (file)
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS      28
+#define PTE_FILE_SHIFT1                (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2                (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3                (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4                (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1         (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2         (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3         (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte)                                              \
+       ((((pte).pte_low >> (PTE_FILE_SHIFT1))                          \
+         & ((1U << PTE_FILE_BITS1) - 1)))                              \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT2))                        \
+           & ((1U << PTE_FILE_BITS2) - 1))                             \
+          << (PTE_FILE_BITS1))                                         \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT3))                        \
+           & ((1U << PTE_FILE_BITS3) - 1))                             \
+          << (PTE_FILE_BITS1 + PTE_FILE_BITS2))                        \
+       + ((((pte).pte_low >> (PTE_FILE_SHIFT4)))                       \
+           << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off)                                              \
+       ((pte_t) { .pte_low =                                           \
+        ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1)  \
+        + ((((off) >> PTE_FILE_BITS1)                                  \
+            & ((1U << PTE_FILE_BITS2) - 1))                            \
+           << PTE_FILE_SHIFT2)                                         \
+        + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2))               \
+            & ((1U << PTE_FILE_BITS3) - 1))                            \
+           << PTE_FILE_SHIFT3)                                         \
+        + ((((off) >>                                                  \
+             (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)))      \
+           << PTE_FILE_SHIFT4)                                         \
+        + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
  */
 #define PTE_FILE_MAX_BITS      29
 #define PTE_FILE_SHIFT1                (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
            << PTE_FILE_SHIFT3)                                         \
         + _PAGE_FILE })
 
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
 /* Encode and de-code a swap entry */
 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
index 4cc9f2b7cdc35a854f185b2ab91d49a9fcd1be4f..81bb91b49a88fa22b2c07fae620e76d674692419 100644 (file)
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
  */
 #define pte_to_pgoff(pte) ((pte).pte_high)
 #define pgoff_to_pte(off)                                              \
index 7dc305a46058a230ad5eef2df9f5e54b66ac0017..1c00631164c29c1d2d676f8c384141931483363b 100644 (file)
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
        return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+       return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+       return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
index c98ac63aae487580727cac5d9f194ef04d9913cb..f4843e031131d224280c4c0d845def9a094f3082 100644 (file)
  * they do not conflict with each other.
  */
 
+#define _PAGE_BIT_SOFT_DIRTY   _PAGE_BIT_HIDDEN
+
 #ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
 #else
 #define _PAGE_SOFT_DIRTY       (_AT(pteval_t, 0))
 #endif
 
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY   _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY   (_AT(pteval_t, 0))
+#endif
+
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
 #else
index 33692eaabab58619c7481cbfc7c6f01be929e5e9..e3ddd7db723f666a98c1baefce55282632fd711b 100644 (file)
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
 #endif /* _ASM_X86_SPINLOCK_H */
index f654ecefea5b6d5348df41195a529a4dce303261..08a089043ccfbb669c889ac034091a55aaa92b75 100644 (file)
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 
 static const int amd_erratum_383[];
 static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
 
 static void init_amd(struct cpuinfo_x86 *c)
 {
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
                value &= ~(1ULL << 24);
                wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
 
-               if (cpu_has_amd_erratum(amd_erratum_383))
+               if (cpu_has_amd_erratum(c, amd_erratum_383))
                        set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
        }
 
-       if (cpu_has_amd_erratum(amd_erratum_400))
+       if (cpu_has_amd_erratum(c, amd_erratum_400))
                set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
 
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
 static const int amd_erratum_383[] =
        AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
 
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 {
-       struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
        int osvw_id = *erratum++;
        u32 range;
        u32 ms;
 
-       /*
-        * If called early enough that current_cpu_data hasn't been initialized
-        * yet, fall back to boot_cpu_data.
-        */
-       if (cpu->x86 == 0)
-               cpu = &boot_cpu_data;
-
-       if (cpu->x86_vendor != X86_VENDOR_AMD)
-               return false;
-
        if (osvw_id >= 0 && osvw_id < 65536 &&
            cpu_has(cpu, X86_FEATURE_OSVW)) {
                u64 osvw_len;
index fbc9210b45bcbe62fd2c1b765a46701c2eb21212..a45d8d4ace1016b2ea7903569c6cacb43884df20 100644 (file)
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
        case 70:
        case 71:
        case 63:
+       case 69:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
index cad791dbde9540840185c3509d04c74f8859598b..1fb6c72717bd9e721a19e634852dc06d0af6f485 100644 (file)
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
        INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
        INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
-       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
-       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
+       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
        { /* end: all zeroes */ },
 };
 
index 94ab6b90dd3fabdaf2856a5d7fc2abfd4ed2af88..63bdb29b25497edfb0a97ec7cfb9aa5b746d200a 100644 (file)
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 static void __init intel_remapping_check(int num, int slot, int func)
 {
        u8 revision;
+       u16 device;
 
+       device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
        revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
 
        /*
-        * Revision 0x13 of this chipset supports irq remapping
-        * but has an erratum that breaks its behavior, flag it as such
+        * Revision 13 of all triggering devices id in this quirk have
+        * a problem draining interrupts when irq remapping is enabled,
+        * and should be flagged as broken.  Additionally revisions 0x12
+        * and 0x22 of device id 0x3405 has this problem.
         */
        if (revision == 0x13)
                set_irq_remapping_broken();
+       else if ((device == 0x3405) &&
+           ((revision == 0x12) ||
+            (revision == 0x22)))
+               set_irq_remapping_broken();
 
 }
 
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
        { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
          PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+       { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+         PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
        { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
          PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
        {}
index 202d24f0f7e70546a2020212b97fb450e41cc9c5..5d576ab344030832617450ac2330d63f135596a4 100644 (file)
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
 
        if (cpu_has_fxsr) {
                memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
-               asm volatile("fxsave %0" : : "m" (fx_scratch));
+               asm volatile("fxsave %0" : "+m" (fx_scratch));
                mask = fx_scratch.mxcsr_mask;
                if (mask == 0)
                        mask = 0x0000ffbf;
index 47ebb1dbfbcb2413ddaf9d4cc86b7d8558eb3c72..7123b5df479d872def8ff437fcd407c5c4d5ca50 100644 (file)
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
        return 0;
 }
 
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
                                      unsigned int size)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 max_size;
 
 #define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
 
-       switch (c->x86) {
+       switch (family) {
        case 0x14:
                max_size = F14H_MPB_MAX_SIZE;
                break;
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu)
                return 0;
        }
 
-       if (__apply_microcode_amd(mc_amd))
+       if (__apply_microcode_amd(mc_amd)) {
                pr_err("CPU%d: update failed for patch_level=0x%08x\n",
                        cpu, mc_amd->hdr.patch_id);
-       else
-               pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-                       mc_amd->hdr.patch_id);
+               return -1;
+       }
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+               mc_amd->hdr.patch_id);
 
        uci->cpu_sig.rev = mc_amd->hdr.patch_id;
        c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@ static void cleanup(void)
  * driver cannot continue functioning normally. In such cases, we tear
  * down everything we've used up so far and exit.
  */
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_header_amd *mc_hdr;
        struct ucode_patch *patch;
        unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
 
        /* check if patch is for the current family */
        proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
-       if (proc_fam != c->x86)
+       if (proc_fam != family)
                return crnt_size;
 
        if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
                return crnt_size;
        }
 
-       ret = verify_patch_size(cpu, patch_size, leftover);
+       ret = verify_patch_size(family, patch_size, leftover);
        if (!ret) {
                pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
                return crnt_size;
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
        return crnt_size;
 }
 
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+                                            size_t size)
 {
        enum ucode_state ret = UCODE_ERROR;
        unsigned int leftover;
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        }
 
        while (leftover) {
-               crnt_size = verify_and_add_patch(cpu, fw, leftover);
+               crnt_size = verify_and_add_patch(family, fw, leftover);
                if (crnt_size < 0)
                        return ret;
 
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        return UCODE_OK;
 }
 
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
        /* free old equiv table */
        free_equiv_cpu_table();
 
-       ret = __load_microcode_amd(cpu, data, size);
+       ret = __load_microcode_amd(family, data, size);
 
        if (ret != UCODE_OK)
                cleanup();
 
 #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+               struct ucode_patch *p = find_patch(smp_processor_id());
                if (p) {
                        memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
                        memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, fw->data, fw->size);
+       ret = load_microcode_amd(c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index 1d14ffee57495a9793d8f9f5f01073958da6ee3e..6073104ccaa36bca776290155e42a30bdd444a8d 100644 (file)
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
        uci->cpu_sig.sig = cpuid_eax(0x00000001);
 }
 #else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
-                                                struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
 {
+       unsigned int cpu = smp_processor_id();
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        u32 rev, eax;
 
        rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
        eax = cpuid_eax(0x00000001);
 
-       uci->cpu_sig.sig = eax;
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
-       c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
-       unsigned int cpu = smp_processor_id();
-
-       collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+       uci->cpu_sig.sig = eax;
 
        if (cpu && !ucode_loaded) {
                void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
                        return;
 
                ucode = (void *)(initrd_start + ucode_offset);
-               if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+               eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+               if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
                        return;
+
                ucode_loaded = true;
        }
 
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
 {
        enum ucode_state ret;
        void *ucode;
+       u32 eax;
+
 #ifdef CONFIG_X86_32
        unsigned int bsp = boot_cpu_data.cpu_index;
        struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
                return 0;
 
        ucode = (void *)(initrd_start + ucode_offset);
-       ret = load_microcode_amd(0, ucode, ucode_size);
+       eax   = cpuid_eax(0x00000001);
+       eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+       ret = load_microcode_amd(eax, ucode, ucode_size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
index dbded5aedb818d511a46967ccb1d26d77233110f..48f8375e4c6b07edfbcefd819a8210f6e0839dfe 100644 (file)
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                                *begin = new_begin;
                }
        } else {
-               *begin = TASK_UNMAPPED_BASE;
+               *begin = mmap_legacy_base();
                *end = TASK_SIZE;
        }
 }
index 62c29a5bfe26aeb67bf63fb545294b79765e445f..f63778cb2363981ad8f98d0e068e4d789c2136b0 100644 (file)
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
  * does, but not when emulating X86_32
  */
-static unsigned long mmap_legacy_base(void)
+unsigned long mmap_legacy_base(void)
 {
        if (mmap_is_ia32())
                return TASK_UNMAPPED_BASE;
index fd6c51cc3acb52fb49adf5c7885f6ecd318fa2e3..5a74a9c1e42c85a6999f9a4ef780728f8bdc5bcb 100644 (file)
@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
        /* Clean up. */
        per_cpu(processor_device_array, pr->id) = NULL;
        per_cpu(processors, pr->id) = NULL;
-       try_offline_node(cpu_to_node(pr->id));
 
        /* Remove the CPU. */
        get_online_cpus();
@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
        acpi_unmap_lsapic(pr->id);
        put_online_cpus();
 
+       try_offline_node(cpu_to_node(pr->id));
+
  out:
        free_cpumask_var(pr->throttling.shared_cpu_map);
        kfree(pr);
index f68095756fb7465bc034ca5335f9345a806dd97d..408f6b2a5fa8ee5c6578cd9e555a8386e112e370 100644 (file)
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
 static DECLARE_RWSEM(bus_type_sem);
 
 #define PHYSICAL_NODE_STRING "physical_node"
+#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
 
 int register_acpi_bus_type(struct acpi_bus_type *type)
 {
@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
        return ret;
 }
 
-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
-                                     void *addr_p, void **ret_p)
+static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
+                                 void *not_used, void **ret_p)
 {
-       unsigned long long addr, sta;
-       acpi_status status;
+       struct acpi_device *adev = NULL;
 
-       status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
-       if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+       acpi_bus_get_device(handle, &adev);
+       if (adev) {
                *ret_p = handle;
-               status = acpi_bus_get_status_handle(handle, &sta);
-               if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
-                       return AE_CTRL_TERMINATE;
+               return AE_CTRL_TERMINATE;
        }
        return AE_OK;
 }
 
-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
+static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
 {
-       void *ret = NULL;
+       unsigned long long sta;
+       acpi_status status;
+
+       status = acpi_bus_get_status_handle(handle, &sta);
+       if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+               return false;
+
+       if (is_bridge) {
+               void *test = NULL;
+
+               /* Check if this object has at least one child device. */
+               acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                                   acpi_dev_present, NULL, NULL, &test);
+               return !!test;
+       }
+       return true;
+}
+
+struct find_child_context {
+       u64 addr;
+       bool is_bridge;
+       acpi_handle ret;
+       bool ret_checked;
+};
+
+static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
+                                void *data, void **not_used)
+{
+       struct find_child_context *context = data;
+       unsigned long long addr;
+       acpi_status status;
 
-       if (!parent)
-               return NULL;
+       status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+       if (ACPI_FAILURE(status) || addr != context->addr)
+               return AE_OK;
 
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
-                           do_acpi_find_child, &address, &ret);
-       return (acpi_handle)ret;
+       if (!context->ret) {
+               /* This is the first matching object.  Save its handle. */
+               context->ret = handle;
+               return AE_OK;
+       }
+       /*
+        * There is more than one matching object with the same _ADR value.
+        * That really is unexpected, so we are kind of beyond the scope of the
+        * spec here.  We have to choose which one to return, though.
+        *
+        * First, check if the previously found object is good enough and return
+        * its handle if so.  Second, check the same for the object that we've
+        * just found.
+        */
+       if (!context->ret_checked) {
+               if (acpi_extra_checks_passed(context->ret, context->is_bridge))
+                       return AE_CTRL_TERMINATE;
+               else
+                       context->ret_checked = true;
+       }
+       if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+               context->ret = handle;
+               return AE_CTRL_TERMINATE;
+       }
+       return AE_OK;
 }
-EXPORT_SYMBOL(acpi_get_child);
+
+acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+{
+       if (parent) {
+               struct find_child_context context = {
+                       .addr = addr,
+                       .is_bridge = is_bridge,
+               };
+
+               acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
+                                   NULL, &context, NULL);
+               return context.ret;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_find_child);
 
 int acpi_bind_one(struct device *dev, acpi_handle handle)
 {
        struct acpi_device *acpi_dev;
        acpi_status status;
        struct acpi_device_physical_node *physical_node, *pn;
-       char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+       char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
+       struct list_head *physnode_list;
+       unsigned int node_id;
        int retval = -EINVAL;
 
        if (ACPI_HANDLE(dev)) {
@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
 
        mutex_lock(&acpi_dev->physical_node_lock);
 
-       /* Sanity check. */
-       list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+       /*
+        * Keep the list sorted by node_id so that the IDs of removed nodes can
+        * be recycled easily.
+        */
+       physnode_list = &acpi_dev->physical_node_list;
+       node_id = 0;
+       list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
+               /* Sanity check. */
                if (pn->dev == dev) {
                        dev_warn(dev, "Already associated with ACPI node\n");
                        goto err_free;
                }
-
-       /* allocate physical node id according to physical_node_id_bitmap */
-       physical_node->node_id =
-               find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
-               ACPI_MAX_PHYSICAL_NODE);
-       if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
-               retval = -ENOSPC;
-               goto err_free;
+               if (pn->node_id == node_id) {
+                       physnode_list = &pn->node;
+                       node_id++;
+               }
        }
 
-       set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+       physical_node->node_id = node_id;
        physical_node->dev = dev;
-       list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+       list_add(&physical_node->node, physnode_list);
        acpi_dev->physical_node_count++;
 
        mutex_unlock(&acpi_dev->physical_node_lock);
@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
 
        mutex_lock(&acpi_dev->physical_node_lock);
        list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
-               char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+               char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
 
                entry = list_entry(node, struct acpi_device_physical_node,
                        node);
@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
                        continue;
 
                list_del(node);
-               clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
 
                acpi_dev->physical_node_count--;
 
index aa1227a7e3f23e349ed5050680d4fef8c135cfc8..04a13784dd20a4a7e42b15b98eedcff90cb67299 100644 (file)
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
                           dev->pnp.bus_id,
                           (u32) dev->wakeup.sleep_state);
 
+               mutex_lock(&dev->physical_node_lock);
+
                if (!dev->physical_node_count) {
                        seq_printf(seq, "%c%-8s\n",
                                dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
                                put_device(ldev);
                        }
                }
+
+               mutex_unlock(&dev->physical_node_lock);
        }
        mutex_unlock(&acpi_device_lock);
        return 0;
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
 {
        struct acpi_device_physical_node *entry;
 
+       mutex_lock(&adev->physical_node_lock);
+
        list_for_each_entry(entry,
                &adev->physical_node_list, node)
                if (entry->dev && device_can_wakeup(entry->dev)) {
                        bool enable = !device_may_wakeup(entry->dev);
                        device_set_wakeup_enable(entry->dev, enable);
                }
+
+       mutex_unlock(&adev->physical_node_lock);
 }
 
 static ssize_t
index 0ec434d2586da29d65003aed0b7d9c7dd244944e..e1284b8dc6eef9b800f997ecbf6bc5e60214eacc 100644 (file)
@@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
         * Some systems always report current brightness level as maximum
         * through _BQC, we need to test another value for them.
         */
-       test_level = current_level == max_level ? br->levels[2] : max_level;
+       test_level = current_level == max_level ? br->levels[3] : max_level;
 
        result = acpi_video_device_lcd_set_level(device, test_level);
        if (result)
index 4ec7c04b3f8279dff1f2e39f356106e92815e39f..26386f0b89a8f4583c397ca9da154a9c499c6d2f 100644 (file)
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
                /* sentinel */
        }
 };
+MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
 
 static struct platform_driver pata_imx_driver = {
        .probe          = pata_imx_probe,
index e69102696533e7224b74a1237f64c478eeb2ab18..3455f833e473c8a053898c4e96e540612dd1d10e 100644 (file)
@@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
                }
        }
 
-       return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+       return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+                       map->reg_stride);
 }
 
 int regcache_sync_block(struct regmap *map, void *block,
index 99cb944a002dc4dfd5f2010a6f2de9d75c09ebc8..4d45dba7fb8f9f1c6e97d8fa4192ff4e63d6230d 100644 (file)
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
        int i;
 
        bio_for_each_segment(bv, bio, i) {
-               page = bv->bv_page;
                /* Non-zero page count for non-head members of
-                * compound pages is no longer allowed by the kernel,
-                * but this has never been seen here.
+                * compound pages is no longer allowed by the kernel.
                 */
-               if (unlikely(PageCompound(page)))
-                       if (compound_trans_head(page) != page) {
-                               pr_crit("page tail used for block I/O\n");
-                               BUG();
-                       }
+               page = compound_trans_head(bv->bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -924,10 +918,13 @@ static void
 bio_pagedec(struct bio *bio)
 {
        struct bio_vec *bv;
+       struct page *page;
        int i;
 
-       bio_for_each_segment(bv, bio, i)
-               atomic_dec(&bv->bv_page->_count);
+       bio_for_each_segment(bv, bio, i) {
+               page = compound_trans_head(bv->bv_page);
+               atomic_dec(&page->_count);
+       }
 }
 
 static void
index 1b456fe9b87a12f98e8a6d691487c252271cb3e6..fc45567ad3acef079db496c9a2f495e19e5b5bba 100644 (file)
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
        unsigned long flags;
 
        spin_lock_irqsave(&portdev->ports_lock, flags);
-       list_for_each_entry(port, &portdev->ports, list)
-               if (port->cdev->dev == dev)
+       list_for_each_entry(port, &portdev->ports, list) {
+               if (port->cdev->dev == dev) {
+                       kref_get(&port->kref);
                        goto out;
+               }
+       }
        port = NULL;
 out:
        spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
 
        port = filp->private_data;
 
+       /* Port is hot-unplugged. */
+       if (!port->guest_connected)
+               return -ENODEV;
+
        if (!port_has_data(port)) {
                /*
                 * If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
                if (ret < 0)
                        return ret;
        }
-       /* Port got hot-unplugged. */
+       /* Port got hot-unplugged while we were waiting above. */
        if (!port->guest_connected)
                return -ENODEV;
        /*
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
        if (is_rproc_serial(port->out_vq->vdev))
                return -EINVAL;
 
+       /*
+        * pipe->nrbufs == 0 means there are no data to transfer,
+        * so this returns just 0 for no data.
+        */
+       pipe_lock(pipe);
+       if (!pipe->nrbufs) {
+               ret = 0;
+               goto error_out;
+       }
+
        ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
        if (ret < 0)
-               return ret;
+               goto error_out;
 
        buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
-       if (!buf)
-               return -ENOMEM;
+       if (!buf) {
+               ret = -ENOMEM;
+               goto error_out;
+       }
 
        sgl.n = 0;
        sgl.len = 0;
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
        sgl.sg = buf->sg;
        sg_init_table(sgl.sg, sgl.size);
        ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+       pipe_unlock(pipe);
        if (likely(ret > 0))
                ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
 
        if (unlikely(ret <= 0))
                free_buf(buf, true);
        return ret;
+
+error_out:
+       pipe_unlock(pipe);
+       return ret;
 }
 
 static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
        struct port *port;
        int ret;
 
+       /* We get the port with a kref here */
        port = find_port_by_devt(cdev->dev);
+       if (!port) {
+               /* Port was unplugged before we could proceed */
+               return -ENXIO;
+       }
        filp->private_data = port;
 
-       /* Prevent against a port getting hot-unplugged at the same time */
-       spin_lock_irq(&port->portdev->ports_lock);
-       kref_get(&port->kref);
-       spin_unlock_irq(&port->portdev->ports_lock);
-
        /*
         * Don't allow opening of console port devices -- that's done
         * via /dev/hvc
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
 
        port = container_of(kref, struct port, kref);
 
-       sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
-       device_destroy(pdrvdata.class, port->dev->devt);
-       cdev_del(port->cdev);
-
-       kfree(port->name);
-
-       debugfs_remove(port->debugfs_file);
-
        kfree(port);
 }
 
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
        spin_unlock_irq(&port->portdev->ports_lock);
 
        if (port->guest_connected) {
+               /* Let the app know the port is going down. */
+               send_sigio_to_port(port);
+
+               /* Do this after sigio is actually sent */
                port->guest_connected = false;
                port->host_connected = false;
-               wake_up_interruptible(&port->waitqueue);
 
-               /* Let the app know the port is going down. */
-               send_sigio_to_port(port);
+               wake_up_interruptible(&port->waitqueue);
        }
 
        if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
         */
        port->portdev = NULL;
 
+       sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+       device_destroy(pdrvdata.class, port->dev->devt);
+       cdev_del(port->cdev);
+
+       kfree(port->name);
+
+       debugfs_remove(port->debugfs_file);
+
        /*
         * Locks around here are not necessary - a port can't be
         * opened after we removed the port struct from ports_list
index 1bdb882c845bd48a96597bb8f65c2cbe2a3c03c2..4e5739773c33a25f6bf5503e9261b18a11a473d2 100644 (file)
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
        DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
        DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
        DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
-       DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
-       DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+       DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+                                               CLK_GET_RATE_NOCACHE, 0),
+       DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+                                               CLK_GET_RATE_NOCACHE, 0),
        DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
-       DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
-       DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
+       DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+                                               4, 3, CLK_GET_RATE_NOCACHE, 0),
+       DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+                                               8, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
 };
 
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
                        E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
        GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
-                       CLK_IGNORE_UNUSED, 0),
+                       CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
 };
 
index 5c205b60a82a21aa7bd780f17133c9d6a00bcf5c..089d3e30e2216e44b20067fdaf9ded66708d0bb2 100644 (file)
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
 static DEFINE_SPINLOCK(ddrpll_lock);
 static DEFINE_SPINLOCK(iopll_lock);
 static DEFINE_SPINLOCK(armclk_lock);
+static DEFINE_SPINLOCK(swdtclk_lock);
 static DEFINE_SPINLOCK(ddrclk_lock);
 static DEFINE_SPINLOCK(dciclk_lock);
 static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
        }
        clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
                        swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
-                       SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock);
+                       SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
 
        /* DDR clocks */
        clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
                        CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
                        CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
                        &gem0clk_lock);
-       clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0,
-                       SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock);
+       clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+                       CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+                       &gem0clk_lock);
        clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
                        "gem0_emio_mux", CLK_SET_RATE_PARENT,
                        SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
                        CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
                        CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
                        &gem1clk_lock);
-       clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0,
-                       SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock);
+       clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+                       CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+                       &gem1clk_lock);
        clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
                        "gem1_emio_mux", CLK_SET_RATE_PARENT,
                        SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
index 0ceb2eff5a7e7044adfcf37a5522b131017d6088..f97cb3d8c5a232a60f319e04da5c0616624925d4 100644 (file)
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
        return count;
 }
 
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+               const char *buf, size_t count)
 {
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
        unsigned int input, j;
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
        if (input > 1)
                input = 1;
 
-       if (input == cs_tuners->ignore_nice) /* nothing to do */
+       if (input == cs_tuners->ignore_nice_load) /* nothing to do */
                return count;
 
-       cs_tuners->ignore_nice = input;
+       cs_tuners->ignore_nice_load = input;
 
        /* we need to re-evaluate prev_cpu_idle */
        for_each_online_cpu(j) {
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
                dbs_info = &per_cpu(cs_cpu_dbs_info, j);
                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
                                        &dbs_info->cdbs.prev_cpu_wall, 0);
-               if (cs_tuners->ignore_nice)
+               if (cs_tuners->ignore_nice_load)
                        dbs_info->cdbs.prev_cpu_nice =
                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
        }
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
 show_store_one(cs, sampling_down_factor);
 show_store_one(cs, up_threshold);
 show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
 show_store_one(cs, freq_step);
 declare_show_sampling_rate_min(cs);
 
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
 gov_sys_pol_attr_rw(sampling_down_factor);
 gov_sys_pol_attr_rw(up_threshold);
 gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
 gov_sys_pol_attr_rw(freq_step);
 gov_sys_pol_attr_ro(sampling_rate_min);
 
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
        &sampling_down_factor_gov_sys.attr,
        &up_threshold_gov_sys.attr,
        &down_threshold_gov_sys.attr,
-       &ignore_nice_gov_sys.attr,
+       &ignore_nice_load_gov_sys.attr,
        &freq_step_gov_sys.attr,
        NULL
 };
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
        &sampling_down_factor_gov_pol.attr,
        &up_threshold_gov_pol.attr,
        &down_threshold_gov_pol.attr,
-       &ignore_nice_gov_pol.attr,
+       &ignore_nice_load_gov_pol.attr,
        &freq_step_gov_pol.attr,
        NULL
 };
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
        tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
        tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
        tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
-       tuners->ignore_nice = 0;
+       tuners->ignore_nice_load = 0;
        tuners->freq_step = DEF_FREQUENCY_STEP;
 
        dbs_data->tuners = tuners;
index 7b839a8db2a7ae6f0bc647e249c326a70623b62a..e59afaa9da23af0dfd826751e817ab6b41e94a0b 100644 (file)
@@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
        unsigned int j;
 
        if (dbs_data->cdata->governor == GOV_ONDEMAND)
-               ignore_nice = od_tuners->ignore_nice;
+               ignore_nice = od_tuners->ignore_nice_load;
        else
-               ignore_nice = cs_tuners->ignore_nice;
+               ignore_nice = cs_tuners->ignore_nice_load;
 
        policy = cdbs->cur_policy;
 
@@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                cs_tuners = dbs_data->tuners;
                cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
                sampling_rate = cs_tuners->sampling_rate;
-               ignore_nice = cs_tuners->ignore_nice;
+               ignore_nice = cs_tuners->ignore_nice_load;
        } else {
                od_tuners = dbs_data->tuners;
                od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
                sampling_rate = od_tuners->sampling_rate;
-               ignore_nice = od_tuners->ignore_nice;
+               ignore_nice = od_tuners->ignore_nice_load;
                od_ops = dbs_data->cdata->gov_ops;
                io_busy = od_tuners->io_is_busy;
        }
index 6663ec3b30565ac7efb6a7b5842e301ad2160bba..d5f12b4b11b8af09c608bd6f74f01601e0a91f2d 100644 (file)
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
 
 /* Per policy Governers sysfs tunables */
 struct od_dbs_tuners {
-       unsigned int ignore_nice;
+       unsigned int ignore_nice_load;
        unsigned int sampling_rate;
        unsigned int sampling_down_factor;
        unsigned int up_threshold;
@@ -175,7 +175,7 @@ struct od_dbs_tuners {
 };
 
 struct cs_dbs_tuners {
-       unsigned int ignore_nice;
+       unsigned int ignore_nice_load;
        unsigned int sampling_rate;
        unsigned int sampling_down_factor;
        unsigned int up_threshold;
index 93eb5cbcc1f639bf1ec52479a169a49deecd72f5..c087347d66884f03a4f1b2a94bf4fcbaceeda7a0 100644 (file)
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
        return count;
 }
 
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
-               size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+               const char *buf, size_t count)
 {
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
        unsigned int input;
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
        if (input > 1)
                input = 1;
 
-       if (input == od_tuners->ignore_nice) { /* nothing to do */
+       if (input == od_tuners->ignore_nice_load) { /* nothing to do */
                return count;
        }
-       od_tuners->ignore_nice = input;
+       od_tuners->ignore_nice_load = input;
 
        /* we need to re-evaluate prev_cpu_idle */
        for_each_online_cpu(j) {
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
                dbs_info = &per_cpu(od_cpu_dbs_info, j);
                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
                        &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
-               if (od_tuners->ignore_nice)
+               if (od_tuners->ignore_nice_load)
                        dbs_info->cdbs.prev_cpu_nice =
                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
 show_store_one(od, io_is_busy);
 show_store_one(od, up_threshold);
 show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
 show_store_one(od, powersave_bias);
 declare_show_sampling_rate_min(od);
 
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
 gov_sys_pol_attr_rw(io_is_busy);
 gov_sys_pol_attr_rw(up_threshold);
 gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
 gov_sys_pol_attr_rw(powersave_bias);
 gov_sys_pol_attr_ro(sampling_rate_min);
 
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
        &sampling_rate_gov_sys.attr,
        &up_threshold_gov_sys.attr,
        &sampling_down_factor_gov_sys.attr,
-       &ignore_nice_gov_sys.attr,
+       &ignore_nice_load_gov_sys.attr,
        &powersave_bias_gov_sys.attr,
        &io_is_busy_gov_sys.attr,
        NULL
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
        &sampling_rate_gov_pol.attr,
        &up_threshold_gov_pol.attr,
        &sampling_down_factor_gov_pol.attr,
-       &ignore_nice_gov_pol.attr,
+       &ignore_nice_load_gov_pol.attr,
        &powersave_bias_gov_pol.attr,
        &io_is_busy_gov_pol.attr,
        NULL
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
        }
 
        tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
-       tuners->ignore_nice = 0;
+       tuners->ignore_nice_load = 0;
        tuners->powersave_bias = default_powersave_bias;
        tuners->io_is_busy = should_io_be_busy();
 
index bb838b98507747a02ade005d99074a4c2a1d95a8..9536852c504a559865bc023a2d75ebed22a0ffd9 100644 (file)
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
                clk_put(cpuclk);
                return -EINVAL;
        }
-       ret = clk_set_rate(cpuclk, rate);
-       if (ret) {
-               clk_put(cpuclk);
-               return ret;
-       }
 
        /* clock table init */
        for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
             i++)
                loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
+       ret = clk_set_rate(cpuclk, rate);
+       if (ret) {
+               clk_put(cpuclk);
+               return ret;
+       }
+
        policy->cur = loongson2_cpufreq_get(policy->cpu);
 
        cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
index b67f45f5c2712a8c16be7afad085bd745649a708..5039fbc88254eabd570e9da6d50d51ed0cff8240 100644 (file)
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
                                                    shdma_chan);
        struct sh_dmae_desc *sh_desc = container_of(sdesc,
                                        struct sh_dmae_desc, shdma_desc);
-       return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
-               sh_chan->xmit_shift;
+       return sh_desc->hw.tcr -
+               (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
 }
 
 /* Called from error IRQ or NMI */
index 98d670825a1a115a43fd5c6d49bd494ce839c94b..6e8887fe6c1b44fb83d09df82d10d50483924de5 100644 (file)
@@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
 
        astbo->gem.driver_private = NULL;
        astbo->bo.bdev = &ast->ttm.bdev;
+       astbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
        ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
index 0047012045c27cff7cfa8dbd386ca239d850e4be..69fd8f1ac8df192729c89f4dd981a5bb5b7611db 100644 (file)
@@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
 
        cirrusbo->gem.driver_private = NULL;
        cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+       cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
        cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
index 8bcce7866d368e12df7c86edb17ff235ba58f4a7..f92da0a32f0d30d02fe8afeb09f5272a16b8a060 100644 (file)
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
        /* Subtract time delta from raw timestamp to get final
         * vblank_time timestamp for end of vblank.
         */
-       etime = ktime_sub_ns(etime, delta_ns);
+       if (delta_ns < 0)
+               etime = ktime_add_ns(etime, -delta_ns);
+       else
+               etime = ktime_sub_ns(etime, delta_ns);
        *vblank_time = ktime_to_timeval(etime);
 
        DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
index dc53a527126b0569800ff2df3a8a36ebbf904855..9e6578330801638caeb91e7f92e8e0139660eb6f 100644 (file)
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                   struct sg_table *sg,
                                   enum dma_data_direction dir)
 {
+       struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+       mutex_lock(&obj->base.dev->struct_mutex);
+
        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
        sg_free_table(sg);
        kfree(sg);
+
+       i915_gem_object_unpin_pages(obj);
+
+       mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
index f2326fc60ac93d4a2f1c760333945fb33bbe0524..6f514297c4837882a64f9480b9f944c3913f7b0c 100644 (file)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
 
 #define PORT_HOTPLUG_STAT      (dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 29)
 #define   PORTC_HOTPLUG_LIVE_STATUS               (1 << 28)
-#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 27)
+#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 27)
 #define   PORTD_HOTPLUG_INT_STATUS             (3 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS             (3 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS             (3 << 17)
index 5fb305840db89ecd80b5cd42b69ff9c35dd9432b..be79f477a38f9e48de386332e4062f09484a3453 100644 (file)
@@ -8269,9 +8269,11 @@ check_crtc_state(struct drm_device *dev)
 
                list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                                    base.head) {
+                       enum pipe pipe;
                        if (encoder->base.crtc != &crtc->base)
                                continue;
-                       if (encoder->get_config)
+                       if (encoder->get_config &&
+                           encoder->get_hw_state(encoder, &pipe))
                                encoder->get_config(encoder, &pipe_config);
                }
 
@@ -10040,6 +10042,8 @@ struct intel_display_error_state {
 
        u32 power_well_driver;
 
+       int num_transcoders;
+
        struct intel_cursor_error_state {
                u32 control;
                u32 position;
@@ -10048,16 +10052,7 @@ struct intel_display_error_state {
        } cursor[I915_MAX_PIPES];
 
        struct intel_pipe_error_state {
-               enum transcoder cpu_transcoder;
-               u32 conf;
                u32 source;
-
-               u32 htotal;
-               u32 hblank;
-               u32 hsync;
-               u32 vtotal;
-               u32 vblank;
-               u32 vsync;
        } pipe[I915_MAX_PIPES];
 
        struct intel_plane_error_state {
@@ -10069,6 +10064,19 @@ struct intel_display_error_state {
                u32 surface;
                u32 tile_offset;
        } plane[I915_MAX_PIPES];
+
+       struct intel_transcoder_error_state {
+               enum transcoder cpu_transcoder;
+
+               u32 conf;
+
+               u32 htotal;
+               u32 hblank;
+               u32 hsync;
+               u32 vtotal;
+               u32 vblank;
+               u32 vsync;
+       } transcoder[4];
 };
 
 struct intel_display_error_state *
@@ -10076,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
-       enum transcoder cpu_transcoder;
+       int transcoders[] = {
+               TRANSCODER_A,
+               TRANSCODER_B,
+               TRANSCODER_C,
+               TRANSCODER_EDP,
+       };
        int i;
 
+       if (INTEL_INFO(dev)->num_pipes == 0)
+               return NULL;
+
        error = kmalloc(sizeof(*error), GFP_ATOMIC);
        if (error == NULL)
                return NULL;
@@ -10087,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev)
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
        for_each_pipe(i) {
-               cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
-               error->pipe[i].cpu_transcoder = cpu_transcoder;
-
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
                        error->cursor[i].control = I915_READ(CURCNTR(i));
                        error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10113,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev)
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
 
-               error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
                error->pipe[i].source = I915_READ(PIPESRC(i));
-               error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
-               error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
-               error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
-               error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
-               error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
-               error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+       }
+
+       error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+       if (HAS_DDI(dev_priv->dev))
+               error->num_transcoders++; /* Account for eDP. */
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               enum transcoder cpu_transcoder = transcoders[i];
+
+               error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+               error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+               error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+               error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+               error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+               error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+               error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+               error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
        }
 
        /* In the code above we read the registers without checking if the power
@@ -10142,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 {
        int i;
 
+       if (!error)
+               return;
+
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
        if (HAS_POWER_WELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
-               err_printf(m, "  CPU transcoder: %c\n",
-                          transcoder_name(error->pipe[i].cpu_transcoder));
-               err_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
-               err_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
-               err_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
-               err_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
-               err_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
-               err_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
-               err_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
 
                err_printf(m, "Plane [%d]:\n", i);
                err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
@@ -10178,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "  POS: %08x\n", error->cursor[i].position);
                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
        }
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               err_printf(m, "  CPU transcoder: %c\n",
+                          transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
+               err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
+               err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
+               err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
+               err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
+               err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
+               err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
+       }
 }
 #endif
index 67e2c1f1c9a8309b809c35f32395e26e67236992..5950888ae1d00bd7cf70ebb18d5595ee0ee8446e 100644 (file)
@@ -497,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
                goto out;
        }
 
-       /* scale to hardware */
-       level = level * freq / max;
+       /* scale to hardware, but be careful to not overflow */
+       if (freq < max)
+               level = level * freq / max;
+       else
+               level = freq / max * level;
 
        dev_priv->backlight.level = level;
        if (dev_priv->backlight.device)
@@ -515,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
+       /*
+        * Do not disable backlight on the vgaswitcheroo path. When switching
+        * away from i915, the other client may depend on i915 to handle the
+        * backlight. This will leave the backlight on unnecessarily when
+        * another client is not activated.
+        */
+       if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+               DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+               return;
+       }
+
        spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
        dev_priv->backlight.enabled = false;
index f895d1508df8fbb9646ed064f1d2ce8244d4f4d0..b0e4a0bd1313c0dafac84df1b6af56e7ccced950 100644 (file)
@@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
                }
        } else {
                if (enable_requested) {
+                       unsigned long irqflags;
+                       enum pipe p;
+
                        I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+                       POSTING_READ(HSW_PWR_WELL_DRIVER);
                        DRM_DEBUG_KMS("Requesting to disable the power well\n");
+
+                       /*
+                        * After this, the registers on the pipes that are part
+                        * of the power well will become zero, so we have to
+                        * adjust our counters according to that.
+                        *
+                        * FIXME: Should we do this in general in
+                        * drm_vblank_post_modeset?
+                        */
+                       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+                       for_each_pipe(p)
+                               if (p != PIPE_A)
+                                       dev->last_vblank[p] = 0;
+                       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
                }
        }
 }
index 13878d5de063f4b08b97ee2f5735dc2e3e7cb37b..d70e4a92773bfda96079b1406ffffa9a2f65a0e7 100644 (file)
@@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
 
        mgabo->gem.driver_private = NULL;
        mgabo->bo.bdev = &mdev->ttm.bdev;
+       mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
 
        mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
index 0bfd55e088201d79e3499b8c98585895f2c736c8..9953e1fbc46d74e53adf50eab2eb05e602287f54 100644 (file)
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
 {
        struct rv7xx_power_info *pi;
        struct evergreen_power_info *eg_pi;
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       u16 data_offset, size;
-       u8 frev, crev;
        struct atom_clock_dividers dividers;
        int ret;
 
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
        eg_pi->vddci_control =
                radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
-               pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-               pi->dynamic_ss = true;
-       }
+       rv770_get_engine_memory_ss(rdev);
 
        pi->asi = RV770_ASI_DFLT;
        pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
 
        pi->dynamic_pcie_gen2 = true;
 
-       if (pi->gfx_clock_gating &&
-           (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+       if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
                pi->thermal_protection = true;
        else
                pi->thermal_protection = false;
index 6dacec4e20901bd1f12e428a387434a8db8a91a8..8928bd109c1647b684293cb49ed91198419eb33c 100644 (file)
@@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
        if (rdev->wb.enabled) {
                rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
        } else {
+               mutex_lock(&rdev->srbm_mutex);
                cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
                rptr = RREG32(CP_HQD_PQ_RPTR);
                cik_srbm_select(rdev, 0, 0, 0, 0);
+               mutex_unlock(&rdev->srbm_mutex);
        }
        rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
 
@@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
        if (rdev->wb.enabled) {
                wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
        } else {
+               mutex_lock(&rdev->srbm_mutex);
                cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
                wptr = RREG32(CP_HQD_PQ_WPTR);
                cik_srbm_select(rdev, 0, 0, 0, 0);
+               mutex_unlock(&rdev->srbm_mutex);
        }
        wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
 
@@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
        WREG32(CP_CPF_DEBUG, tmp);
 
        /* init the pipes */
+       mutex_lock(&rdev->srbm_mutex);
        for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
                int me = (i < 4) ? 1 : 2;
                int pipe = (i < 4) ? i : (i - 4);
@@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                WREG32(CP_HPD_EOP_CONTROL, tmp);
        }
        cik_srbm_select(rdev, 0, 0, 0, 0);
+       mutex_unlock(&rdev->srbm_mutex);
 
        /* init the queues.  Just two for now. */
        for (i = 0; i < 2; i++) {
@@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                mqd->static_thread_mgmt23[0] = 0xffffffff;
                mqd->static_thread_mgmt23[1] = 0xffffffff;
 
+               mutex_lock(&rdev->srbm_mutex);
                cik_srbm_select(rdev, rdev->ring[idx].me,
                                rdev->ring[idx].pipe,
                                rdev->ring[idx].queue, 0);
@@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
 
                cik_srbm_select(rdev, 0, 0, 0, 0);
+               mutex_unlock(&rdev->srbm_mutex);
 
                radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
                radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
+       mutex_lock(&rdev->srbm_mutex);
        for (i = 0; i < 16; i++) {
                cik_srbm_select(rdev, 0, 0, 0, i);
                /* CP and shaders */
@@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
                /* XXX SDMA RLC - todo */
        }
        cik_srbm_select(rdev, 0, 0, 0, 0);
+       mutex_unlock(&rdev->srbm_mutex);
 
        cik_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev)
        struct radeon_ring *ring;
        int r;
 
+       cik_mc_program(rdev);
+
        if (rdev->flags & RADEON_IS_IGP) {
                if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
                    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       cik_mc_program(rdev);
        r = cik_pcie_gart_enable(rdev);
        if (r)
                return r;
@@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        cik_cp_enable(rdev, false);
        cik_sdma_enable(rdev, false);
-       r600_uvd_rbc_stop(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_suspend(rdev);
        cik_irq_suspend(rdev);
        radeon_wb_disable(rdev);
@@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_fini(rdev);
        cik_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
@@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev)
 
        /* programm the VCPU memory controller bits 0-27 */
        addr = rdev->uvd.gpu_addr >> 3;
-       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
+       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
        WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
index 9bcdd174780f2a8ec618cf255ad2c37690a2237f..7e5d0b570a30531a8752e6b4f29bb732528bb7fe 100644 (file)
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
 {
        struct rv7xx_power_info *pi;
        struct evergreen_power_info *eg_pi;
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       uint16_t data_offset, size;
-       uint8_t frev, crev;
        struct atom_clock_dividers dividers;
        int ret;
 
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
        eg_pi->vddci_control =
                radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
-               pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-               pi->dynamic_ss = true;
-       }
+       rv770_get_engine_memory_ss(rdev);
 
        pi->asi = RV770_ASI_DFLT;
        pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
 
        pi->dynamic_pcie_gen2 = true;
 
-       if (pi->gfx_clock_gating &&
-           (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+       if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
                pi->thermal_protection = true;
        else
                pi->thermal_protection = false;
index 038dcac7670cf540950d4ce64a26830da6325e0d..d5b49e33315e299aa4b2a2085d0fdd3d1c8a99bc 100644 (file)
@@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev)
        /* enable aspm */
        evergreen_program_aspm(rdev);
 
+       evergreen_mc_program(rdev);
+
        if (ASIC_IS_DCE5(rdev)) {
                if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
                        r = ni_init_microcode(rdev);
@@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       evergreen_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                evergreen_agp_enable(rdev);
        } else {
@@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev)
 int evergreen_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_suspend(rdev);
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
-       r600_uvd_rbc_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
@@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index bb9ea3641312af1a8da0aff84486a124c629704a..b0e280058b9b09ea22eb75f5c13ab6395e1698ba 100644 (file)
@@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
        u32 base_rate = 24000;
+       u32 max_ratio = clock / base_rate;
+       u32 dto_phase;
+       u32 dto_modulo = clock;
+       u32 wallclock_ratio;
+       u32 dto_cntl;
 
        if (!dig || !dig->afmt)
                return;
 
+       if (max_ratio >= 8) {
+               dto_phase = 192 * 1000;
+               wallclock_ratio = 3;
+       } else if (max_ratio >= 4) {
+               dto_phase = 96 * 1000;
+               wallclock_ratio = 2;
+       } else if (max_ratio >= 2) {
+               dto_phase = 48 * 1000;
+               wallclock_ratio = 1;
+       } else {
+               dto_phase = 24 * 1000;
+               wallclock_ratio = 0;
+       }
+       dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+       dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+       WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+
        /* XXX two dtos; generally use dto0 for hdmi */
        /* Express [24MHz / target pixel clock] as an exact rational
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
        WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
-       WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
-       WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+       WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+       WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
 }
 
 
index a7baf67aef6cae99eacdd958b1aa00e2fd925841..0d582ac1dc31841b0844df7cfe4b186bd7b6da0e 100644 (file)
 #define DCCG_AUDIO_DTO0_MODULE            0x05b4
 #define DCCG_AUDIO_DTO0_LOAD              0x05b8
 #define DCCG_AUDIO_DTO0_CNTL              0x05bc
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
 
 #define DCCG_AUDIO_DTO1_PHASE             0x05c0
 #define DCCG_AUDIO_DTO1_MODULE            0x05c4
index 56bd4f3be4febfe75522deb222a0218c7792b248..ccb4f8b548524b46900f6d88444179c9f6a3069f 100644 (file)
@@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev)
        if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
                err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
-               if (err)
-                       goto out;
-               if (rdev->smc_fw->size != smc_req_size) {
+               if (err) {
+                       printk(KERN_ERR
+                              "smc: error loading firmware \"%s\"\n",
+                              fw_name);
+                       release_firmware(rdev->smc_fw);
+                       rdev->smc_fw = NULL;
+               } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "ni_mc: Bogus length %zu in firmware \"%s\"\n",
                               rdev->mc_fw->size, fw_name);
@@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev)
        /* enable aspm */
        evergreen_program_aspm(rdev);
 
+       evergreen_mc_program(rdev);
+
        if (rdev->flags & RADEON_IS_IGP) {
                if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
                        r = ni_init_microcode(rdev);
@@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       evergreen_mc_program(rdev);
        r = cayman_pcie_gart_enable(rdev);
        if (r)
                return r;
@@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        cayman_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
-       r600_uvd_rbc_stop(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_suspend(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
@@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
index 4f9b9bc20daaccde2911de65882c5a9c32d23f15..f0f5f748938ad6c30292d2b109bef4c3a2f8fc29 100644 (file)
@@ -4067,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev)
        struct rv7xx_power_info *pi;
        struct evergreen_power_info *eg_pi;
        struct ni_power_info *ni_pi;
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       u16 data_offset, size;
-       u8 frev, crev;
        struct atom_clock_dividers dividers;
        int ret;
 
@@ -4162,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev)
        eg_pi->vddci_control =
                radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
-               pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-               pi->dynamic_ss = true;
-       }
+       rv770_get_engine_memory_ss(rdev);
 
        pi->asi = RV770_ASI_DFLT;
        pi->pasi = CYPRESS_HASI_DFLT;
@@ -4188,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev)
 
        pi->dynamic_pcie_gen2 = true;
 
-       if (pi->gfx_clock_gating &&
-           (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+       if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
                pi->thermal_protection = true;
        else
                pi->thermal_protection = false;
index 10f712e37003030e81a7770e94ca696c4c8f2c08..e66e7207735036e2ffab5e18f37854384595bf80 100644 (file)
@@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev)
        if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
                err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
-               if (err)
-                       goto out;
-               if (rdev->smc_fw->size != smc_req_size) {
+               if (err) {
+                       printk(KERN_ERR
+                              "smc: error loading firmware \"%s\"\n",
+                              fw_name);
+                       release_firmware(rdev->smc_fw);
+                       rdev->smc_fw = NULL;
+               } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "smc: Bogus length %zu in firmware \"%s\"\n",
                               rdev->smc_fw->size, fw_name);
@@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
        return 0;
 }
 
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
+void r600_uvd_stop(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 
        /* force RBC into idle state */
        WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+       /* Stall UMC and register bus before resetting VCPU */
+       WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+       mdelay(1);
+
+       /* put VCPU into reset */
+       WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+       mdelay(5);
+
+       /* disable VCPU clock */
+       WREG32(UVD_VCPU_CNTL, 0x0);
+
+       /* Unstall UMC and register bus */
+       WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+       WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
        ring->ready = false;
 }
 
@@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev)
        /* disable interupt */
        WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
 
+       /* Stall UMC and register bus before resetting VCPU */
+       WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+       mdelay(1);
+
        /* put LMI, VCPU, RBC etc... into reset */
        WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
               LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev)
        WREG32(UVD_MPC_SET_ALU, 0);
        WREG32(UVD_MPC_SET_MUX, 0x88);
 
-       /* Stall UMC */
-       WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-       WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
        /* take all subblocks out of reset, except VCPU */
        WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
        mdelay(5);
@@ -3312,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev)
        /* enable pcie gen2 link */
        r600_pcie_gen2_enable(rdev);
 
+       r600_mc_program(rdev);
+
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
                r = r600_init_microcode(rdev);
                if (r) {
@@ -3324,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r600_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                r600_agp_enable(rdev);
        } else {
index f48240bb8c5640badff804e97dce556e31f86bd9..f264df5470f7fd6bd0ba0cabf3d31e7396829c2b 100644 (file)
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        u32 base_rate = 24000;
+       u32 max_ratio = clock / base_rate;
+       u32 dto_phase;
+       u32 dto_modulo = clock;
+       u32 wallclock_ratio;
+       u32 dto_cntl;
 
        if (!dig || !dig->afmt)
                return;
 
+       if (max_ratio >= 8) {
+               dto_phase = 192 * 1000;
+               wallclock_ratio = 3;
+       } else if (max_ratio >= 4) {
+               dto_phase = 96 * 1000;
+               wallclock_ratio = 2;
+       } else if (max_ratio >= 2) {
+               dto_phase = 48 * 1000;
+               wallclock_ratio = 1;
+       } else {
+               dto_phase = 24 * 1000;
+               wallclock_ratio = 0;
+       }
+
        /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
         * doesn't matter which one you use.  Just use the first one.
         */
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
                /* according to the reg specs, this should DCE3.2 only, but in
                 * practice it seems to cover DCE3.0 as well.
                 */
-               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
-               WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
-               WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+               if (dig->dig_encoder == 0) {
+                       dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+                       dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+                       WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+                       WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+                       WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+               } else {
+                       dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+                       dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+                       WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+                       WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+                       WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+               }
        } else {
                /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
                WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
index 8e3fe815edab3224492e126bf7e47d5193be448f..7c780839a7f43858395bdb4ec309bc6a9b1e37fd 100644 (file)
 #define DCCG_AUDIO_DTO0_LOAD              0x051c
 #       define DTO_LOAD                   (1 << 31)
 #define DCCG_AUDIO_DTO0_CNTL              0x0520
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
 
 #define DCCG_AUDIO_DTO1_PHASE             0x0524
 #define DCCG_AUDIO_DTO1_MODULE            0x0528
index 2f08219c39b617d67ab07f83e7aaff7072bd51b7..9f19259667dfa71e254052735be72ce0f14e75d9 100644 (file)
@@ -1468,7 +1468,6 @@ struct radeon_uvd {
        void                    *cpu_addr;
        uint64_t                gpu_addr;
        void                    *saved_bo;
-       unsigned                fw_size;
        atomic_t                handles[RADEON_MAX_UVD_HANDLES];
        struct drm_file         *filp[RADEON_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
@@ -2066,6 +2065,7 @@ struct radeon_device {
        const struct firmware *mec_fw;  /* CIK MEC firmware */
        const struct firmware *sdma_fw; /* CIK SDMA firmware */
        const struct firmware *smc_fw;  /* SMC firmware */
+       const struct firmware *uvd_fw;  /* UVD firmware */
        struct r600_blit r600_blit;
        struct r600_vram_scratch vram_scratch;
        int msi_enabled; /* msi enabled */
@@ -2095,6 +2095,8 @@ struct radeon_device {
        /* ACPI interface */
        struct radeon_atif              atif;
        struct radeon_atcs              atcs;
+       /* srbm instance registers */
+       struct mutex                    srbm_mutex;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -2161,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
                WREG32(reg, tmp_);                              \
        } while (0)
 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
 #define WREG32_PLL_P(reg, val, mask)                           \
        do {                                                    \
                uint32_t tmp_ = RREG32_PLL(reg);                \
index 902479fa737ff62c1b2917b4d0bfce0c34176d74..3d61d5aac18ff4e01d3f89934807d9abb261dcdc 100644 (file)
@@ -441,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
 /* uvd */
 int r600_uvd_init(struct radeon_device *rdev);
 int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
 int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_uvd_fence_emit(struct radeon_device *rdev,
                         struct radeon_fence *fence);
index 82335e38ec4f268e05f9d4bc1cbd875eae8523a5..63398ae1dbf51fb682fe7e51623de255c58ae0c7 100644 (file)
@@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev,
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
        mutex_init(&rdev->gpu_clock_mutex);
+       mutex_init(&rdev->srbm_mutex);
        init_rwsem(&rdev->pm.mclk_lock);
        init_rwsem(&rdev->exclusive_lock);
        init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+       radeon_pm_suspend(rdev);
        radeon_suspend(rdev);
 
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1566,7 @@ retry:
                }
        }
 
+       radeon_pm_resume(rdev);
        drm_helper_resume_force_mode(rdev->ddev);
 
        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
index 7ddb0efe2408cadc2de230109689a159cdbf0fbd..ddb8f8e04eb549f4fd264b2f704cdd407bcc65e0 100644 (file)
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 
                } else {
                        /* put fence directly behind firmware */
-                       index = ALIGN(rdev->uvd.fw_size, 8);
+                       index = ALIGN(rdev->uvd_fw->size, 8);
                        rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
                        rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
                }
index 6a51d943ccf44d8301758a4b99e3aba35e4c2668..b990b1a2bd50af523e93a7715050a534ba84d987 100644 (file)
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
        if (rdev->gart.robj == NULL) {
                return;
        }
-       radeon_gart_table_vram_unpin(rdev);
        radeon_bo_unref(&rdev->gart.robj);
 }
 
index f374c467aacaf57c9a46c7c655250ef91a44516c..c557850cd3450a5b0f363f24315d3051cb96cf3c 100644 (file)
@@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_VERDE:
        case CHIP_OLAND:
        case CHIP_HAINAN:
-               if (radeon_dpm == 1)
+               /* DPM requires the RLC, RV770+ dGPU requires SMC */
+               if (!rdev->rlc_fw)
+                       rdev->pm.pm_method = PM_METHOD_PROFILE;
+               else if ((rdev->family >= CHIP_RV770) &&
+                        (!(rdev->flags & RADEON_IS_IGP)) &&
+                        (!rdev->smc_fw))
+                       rdev->pm.pm_method = PM_METHOD_PROFILE;
+               else if (radeon_dpm == 1)
                        rdev->pm.pm_method = PM_METHOD_DPM;
                else
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
index 414fd145d20eee458ebf8e0a5c1dfb1940ae2589..b79f4f5cdd626108c8790394cc6e57ddd9b27bce 100644 (file)
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
 
 int radeon_uvd_init(struct radeon_device *rdev)
 {
-       const struct firmware *fw;
        unsigned long bo_size;
        const char *fw_name;
        int i, r;
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
                return -EINVAL;
        }
 
-       r = request_firmware(&fw, fw_name, rdev->dev);
+       r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
        if (r) {
                dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
                        fw_name);
                return r;
        }
 
-       bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
+       bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
                  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
        r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
                             RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev)
 
        radeon_bo_unreserve(rdev->uvd.vcpu_bo);
 
-       rdev->uvd.fw_size = fw->size;
-       memset(rdev->uvd.cpu_addr, 0, bo_size);
-       memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
-
-       release_firmware(fw);
-
        for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
                atomic_set(&rdev->uvd.handles[i], 0);
                rdev->uvd.filp[i] = NULL;
@@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev)
        }
 
        radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+       release_firmware(rdev->uvd_fw);
 }
 
 int radeon_uvd_suspend(struct radeon_device *rdev)
 {
        unsigned size;
+       void *ptr;
+       int i;
 
        if (rdev->uvd.vcpu_bo == NULL)
                return 0;
 
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+               if (atomic_read(&rdev->uvd.handles[i]))
+                       break;
+
+       if (i == RADEON_MAX_UVD_HANDLES)
+               return 0;
+
        size = radeon_bo_size(rdev->uvd.vcpu_bo);
+       size -= rdev->uvd_fw->size;
+
+       ptr = rdev->uvd.cpu_addr;
+       ptr += rdev->uvd_fw->size;
+
        rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-       memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
+       memcpy(rdev->uvd.saved_bo, ptr, size);
 
        return 0;
 }
 
 int radeon_uvd_resume(struct radeon_device *rdev)
 {
+       unsigned size;
+       void *ptr;
+
        if (rdev->uvd.vcpu_bo == NULL)
                return -EINVAL;
 
+       memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+       size = radeon_bo_size(rdev->uvd.vcpu_bo);
+       size -= rdev->uvd_fw->size;
+
+       ptr = rdev->uvd.cpu_addr;
+       ptr += rdev->uvd_fw->size;
+
        if (rdev->uvd.saved_bo != NULL) {
-               unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
-               memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
+               memcpy(ptr, rdev->uvd.saved_bo, size);
                kfree(rdev->uvd.saved_bo);
                rdev->uvd.saved_bo = NULL;
-       }
+       } else
+               memset(ptr, 0, size);
 
        return 0;
 }
@@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
 {
        int i, r;
        for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-               if (rdev->uvd.filp[i] == filp) {
-                       uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+               uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+               if (handle != 0 && rdev->uvd.filp[i] == filp) {
                        struct radeon_fence *fence;
 
                        r = radeon_uvd_get_destroy_msg(rdev,
@@ -336,9 +356,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return -EINVAL;
        }
 
+       if (bo->tbo.sync_obj) {
+               r = radeon_fence_wait(bo->tbo.sync_obj, false);
+               if (r) {
+                       DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+                       return r;
+               }
+       }
+
        r = radeon_bo_kmap(bo, &ptr);
-       if (r)
+       if (r) {
+               DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
                return r;
+       }
 
        msg = ptr + offset;
 
@@ -364,8 +394,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                radeon_bo_kunmap(bo);
                return 0;
        } else {
-               /* it's a create msg, no special handling needed */
                radeon_bo_kunmap(bo);
+
+               if (msg_type != 0) {
+                       DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+                       return -EINVAL;
+               }
+
+               /* it's a create msg, no special handling needed */
        }
 
        /* create or decode, validate the handle */
@@ -388,7 +424,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 
 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                               int data0, int data1,
-                              unsigned buf_sizes[])
+                              unsigned buf_sizes[], bool *has_msg_cmd)
 {
        struct radeon_cs_chunk *relocs_chunk;
        struct radeon_cs_reloc *reloc;
@@ -417,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
 
        if (cmd < 0x4) {
                if ((end - start) < buf_sizes[cmd]) {
-                       DRM_ERROR("buffer to small (%d / %d)!\n",
+                       DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
                                  (unsigned)(end - start), buf_sizes[cmd]);
                        return -EINVAL;
                }
@@ -442,9 +478,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
        }
 
        if (cmd == 0) {
+               if (*has_msg_cmd) {
+                       DRM_ERROR("More than one message in a UVD-IB!\n");
+                       return -EINVAL;
+               }
+               *has_msg_cmd = true;
                r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
                if (r)
                        return r;
+       } else if (!*has_msg_cmd) {
+               DRM_ERROR("Message needed before other commands are send!\n");
+               return -EINVAL;
        }
 
        return 0;
@@ -453,7 +497,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
                             struct radeon_cs_packet *pkt,
                             int *data0, int *data1,
-                            unsigned buf_sizes[])
+                            unsigned buf_sizes[],
+                            bool *has_msg_cmd)
 {
        int i, r;
 
@@ -467,7 +512,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
                        *data1 = p->idx;
                        break;
                case UVD_GPCOM_VCPU_CMD:
-                       r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+                       r = radeon_uvd_cs_reloc(p, *data0, *data1,
+                                               buf_sizes, has_msg_cmd);
                        if (r)
                                return r;
                        break;
@@ -488,6 +534,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
        struct radeon_cs_packet pkt;
        int r, data0 = 0, data1 = 0;
 
+       /* does the IB has a msg command */
+       bool has_msg_cmd = false;
+
        /* minimum buffer sizes */
        unsigned buf_sizes[] = {
                [0x00000000]    =       2048,
@@ -514,8 +563,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                        return r;
                switch (pkt.type) {
                case RADEON_PACKET_TYPE0:
-                       r = radeon_uvd_cs_reg(p, &pkt, &data0,
-                                             &data1, buf_sizes);
+                       r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+                                             buf_sizes, &has_msg_cmd);
                        if (r)
                                return r;
                        break;
@@ -527,6 +576,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
        } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+
+       if (!has_msg_cmd) {
+               DRM_ERROR("UVD-IBs need a msg command!\n");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 363018c60412c1d7374ee60a176abe2f5c49dce7..bdd888b4db2bbc7bf4f6f2cc718512fa999e223f 100644 (file)
@@ -1944,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
 
 int rv6xx_dpm_init(struct radeon_device *rdev)
 {
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       uint16_t data_offset, size;
-       uint8_t frev, crev;
+       struct radeon_atom_ss ss;
        struct atom_clock_dividers dividers;
        struct rv6xx_power_info *pi;
        int ret;
@@ -1989,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
 
        pi->gfx_clock_gating = true;
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
+       pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                      ASIC_INTERNAL_ENGINE_SS, 0);
+       pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                      ASIC_INTERNAL_MEMORY_SS, 0);
+
+       /* Disable sclk ss, causes hangs on a lot of systems */
+       pi->sclk_ss = false;
+
+       if (pi->sclk_ss || pi->mclk_ss)
                pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
+       else
                pi->dynamic_ss = false;
-       }
 
        pi->dynamic_pcie_gen2 = true;
 
index 30ea14e8854c465add9148cb4099142a3295fbd0..f5e92cfcc140984bd63e1a892fa88277bb7530c3 100644 (file)
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv730_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv730_mgcg_init));
                break;
        case CHIP_RV710:
                radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv710_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv710_mgcg_init));
                break;
        case CHIP_RV740:
                radeon_program_register_sequence(rdev,
                                                 rv740_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv740_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv740_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv740_mgcg_init));
                break;
        default:
                break;
@@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev)
 
        /* programm the VCPU memory controller bits 0-27 */
        addr = rdev->uvd.gpu_addr >> 3;
-       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
+       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
        WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev)
        /* enable pcie gen2 link */
        rv770_pcie_gen2_enable(rdev);
 
+       rv770_mc_program(rdev);
+
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
                r = r600_init_microcode(rdev);
                if (r) {
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rv770_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                rv770_agp_enable(rdev);
        } else {
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev)
 int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_suspend(rdev);
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index 2d347925f77d6d116c95c01cc3045c148ac446b1..094c67a29d0deb7789d6b0d38a1d0a718515b81b 100644 (file)
@@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
        return 0;
 }
 
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+       struct radeon_atom_ss ss;
+
+       pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                      ASIC_INTERNAL_ENGINE_SS, 0);
+       pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                      ASIC_INTERNAL_MEMORY_SS, 0);
+
+       if (pi->sclk_ss || pi->mclk_ss)
+               pi->dynamic_ss = true;
+       else
+               pi->dynamic_ss = false;
+}
+
 int rv770_dpm_init(struct radeon_device *rdev)
 {
        struct rv7xx_power_info *pi;
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       uint16_t data_offset, size;
-       uint8_t frev, crev;
        struct atom_clock_dividers dividers;
        int ret;
 
@@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
        pi->mvdd_control =
                radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
-               pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-               pi->dynamic_ss = false;
-       }
+       rv770_get_engine_memory_ss(rdev);
 
        pi->asi = RV770_ASI_DFLT;
        pi->pasi = RV770_HASI_DFLT;
@@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
 
        pi->dynamic_pcie_gen2 = true;
 
-       if (pi->gfx_clock_gating &&
-           (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+       if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
                pi->thermal_protection = true;
        else
                pi->thermal_protection = false;
index 96b1b2a62a8a2a0d5950574788d0936aba6ec730..9244effc6b59e2e69c6882b0d4dc35dd2517379c 100644 (file)
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
 void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
                                             struct radeon_ps *new_ps,
                                             struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
 
 /* smc */
 int rv770_read_smc_soft_register(struct radeon_device *rdev,
index 6ca904673a4fa7046cee901d26c2b049546f2234..daa8d2df8ec502ca72a5b9897be75ae59a9a797c 100644 (file)
@@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev)
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
        err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
-       if (err)
-               goto out;
-       if (rdev->smc_fw->size != smc_req_size) {
+       if (err) {
+               printk(KERN_ERR
+                      "smc: error loading firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(rdev->smc_fw);
+               rdev->smc_fw = NULL;
+       } else if (rdev->smc_fw->size != smc_req_size) {
                printk(KERN_ERR
                       "si_smc: Bogus length %zu in firmware \"%s\"\n",
                       rdev->smc_fw->size, fw_name);
@@ -6418,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev)
        /* enable aspm */
        si_program_aspm(rdev);
 
+       si_mc_program(rdev);
+
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
            !rdev->rlc_fw || !rdev->mc_fw) {
                r = si_init_microcode(rdev);
@@ -6437,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       si_mc_program(rdev);
        r = si_pcie_gart_enable(rdev);
        if (r)
                return r;
@@ -6621,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev)
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
        if (rdev->has_uvd) {
-               r600_uvd_rbc_stop(rdev);
+               r600_uvd_stop(rdev);
                radeon_uvd_suspend(rdev);
        }
        si_irq_suspend(rdev);
@@ -6763,8 +6768,10 @@ void si_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       if (rdev->has_uvd)
+       if (rdev->has_uvd) {
+               r600_uvd_stop(rdev);
                radeon_uvd_fini(rdev);
+       }
        si_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index 41825575b403bab2b5e16b3fba33a33d3187f4f7..88699e3cd868047555ce426ecb2251c7f9f3850e 100644 (file)
@@ -2903,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 {
        struct ni_ps *ps = ni_get_ps(rps);
        struct radeon_clock_and_voltage_limits *max_limits;
-       bool disable_mclk_switching;
+       bool disable_mclk_switching = false;
+       bool disable_sclk_switching = false;
        u32 mclk, sclk;
        u16 vddc, vddci;
        int i;
@@ -2911,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
            ni_dpm_vblank_too_short(rdev))
                disable_mclk_switching = true;
-       else
-               disable_mclk_switching = false;
+
+       if (rps->vclk || rps->dclk) {
+               disable_mclk_switching = true;
+               disable_sclk_switching = true;
+       }
 
        if (rdev->pm.dpm.ac_power)
                max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2940,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 
        if (disable_mclk_switching) {
                mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
-               sclk = ps->performance_levels[0].sclk;
-               vddc = ps->performance_levels[0].vddc;
                vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
        } else {
-               sclk = ps->performance_levels[0].sclk;
                mclk = ps->performance_levels[0].mclk;
-               vddc = ps->performance_levels[0].vddc;
                vddci = ps->performance_levels[0].vddci;
        }
 
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+               vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+       } else {
+               sclk = ps->performance_levels[0].sclk;
+               vddc = ps->performance_levels[0].vddc;
+       }
+
        /* adjusted low state */
        ps->performance_levels[0].sclk = sclk;
        ps->performance_levels[0].mclk = mclk;
        ps->performance_levels[0].vddc = vddc;
        ps->performance_levels[0].vddci = vddci;
 
-       for (i = 1; i < ps->performance_level_count; i++) {
-               if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
-                       ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
-               if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
-                       ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[0].sclk;
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (sclk < ps->performance_levels[i].sclk)
+                               sclk = ps->performance_levels[i].sclk;
+               }
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       ps->performance_levels[i].sclk = sclk;
+                       ps->performance_levels[i].vddc = vddc;
+               }
+       } else {
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+                               ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+                       if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+                               ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+               }
        }
 
        if (disable_mclk_switching) {
@@ -6253,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev)
        struct evergreen_power_info *eg_pi;
        struct ni_power_info *ni_pi;
        struct si_power_info *si_pi;
-       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-       u16 data_offset, size;
-       u8 frev, crev;
        struct atom_clock_dividers dividers;
        int ret;
        u32 mask;
@@ -6346,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev)
        si_pi->vddc_phase_shed_control =
                radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
 
-       if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
-               pi->sclk_ss = true;
-               pi->mclk_ss = true;
-               pi->dynamic_ss = true;
-       } else {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-               pi->dynamic_ss = true;
-       }
+       rv770_get_engine_memory_ss(rdev);
 
        pi->asi = RV770_ASI_DFLT;
        pi->pasi = CYPRESS_HASI_DFLT;
@@ -6366,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev)
        eg_pi->sclk_deep_sleep = true;
        si_pi->sclk_deep_sleep_above_low = false;
 
-       if (pi->gfx_clock_gating &&
-           (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+       if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
                pi->thermal_protection = true;
        else
                pi->thermal_protection = false;
index 7a5764843bfb646d2b90526a2e9f6619931b48f9..cd33084c78602146d42efca60e358d08f00da219 100644 (file)
@@ -488,8 +488,6 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
        if (djrcv_dev->querying_devices)
                return 0;
 
-       djrcv_dev->querying_devices = true;
-
        dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
        if (!dj_report)
                return -ENOMEM;
index 0f34bca9f5e577c1f1e04d43908bc9a7a91f4bce..6099f50b28aaa80f74e9ac69373f73efe4e852f1 100644 (file)
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
                                          u16 value)
 {
        return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
-              && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+              || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
 }
 
 static void adt7470_init_client(struct i2c_client *client)
index ccec916bc3eb58f9b6376ff109965cd1529f5d7c..af8f65fb1c050192e7a21234dd6dc855ca55c923 100644 (file)
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c)
                bus_frequency = KEMPLD_I2C_FREQ_MAX;
 
        if (pld->info.spec_major == 1)
-               prescale = pld->pld_clock / bus_frequency * 5 - 1000;
+               prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
        else
-               prescale = pld->pld_clock / bus_frequency * 4 - 3000;
+               prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
 
        if (prescale < 0)
                prescale = 0;
index df8ff5aea5b5b2b4aea8191f66b18f45408fd6ea..e2e9a0dade9665ec0d89598754c5734d2480c09c 100644 (file)
@@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
         * based on this empirical measurement and a lot of previous frobbing.
         */
        i2c->cmd_err = 0;
-       if (msg->len < 8) {
+       if (0) {        /* disable PIO mode until a proper fix is made */
                ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
                if (ret)
                        mxs_i2c_reset(i2c);
index 0ad208a69c298aef50bb4b0ff6744b60753e9a08..3ceac3e91dde4cd30339bc3c61feb4ad570a03c7 100644 (file)
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
 {
        unsigned int stepconfig;
        int i, steps;
-       u32 step_en;
 
        /*
         * There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
                adc_dev->channel_step[i] = steps;
                steps++;
        }
-       step_en = get_adc_step_mask(adc_dev);
-       am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
 }
 
 static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                int *val, int *val2, long mask)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
-       int i;
-       unsigned int fifo1count, read;
+       int i, map_val;
+       unsigned int fifo1count, read, stepid;
        u32 step = UINT_MAX;
        bool found = false;
+       u32 step_en;
+       unsigned long timeout = jiffies + usecs_to_jiffies
+                               (IDLE_TIMEOUT * adc_dev->channels);
+       step_en = get_adc_step_mask(adc_dev);
+       am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
+       /* Wait for ADC sequencer to complete sampling */
+       while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
+               if (time_after(jiffies, timeout))
+                       return -EAGAIN;
+               }
+       map_val = chan->channel + TOTAL_CHANNELS;
 
        /*
         * When the sub-system is first enabled,
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
        for (i = 0; i < fifo1count; i++) {
                read = tiadc_readl(adc_dev, REG_FIFO1);
-               if (read >> 16 == step) {
-                       *val = read & 0xfff;
+               stepid = read & FIFOREAD_CHNLID_MASK;
+               stepid = stepid >> 0x10;
+
+               if (stepid == map_val) {
+                       read = read & FIFOREAD_DATA_MASK;
                        found = true;
+                       *val = read;
                }
        }
-       am335x_tsc_se_update(adc_dev->mfd_tscadc);
+
        if (found == false)
                return -EBUSY;
        return IIO_VAL_INT;
index ea8a4146620de5c6ad551644facb278654ab1c1c..0dd9bb8731301e755dc29ef2851371f70deb819d 100644 (file)
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
 void iio_trigger_poll(struct iio_trigger *trig, s64 time)
 {
        int i;
-       if (!trig->use_count)
-               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
-                       if (trig->subirqs[i].enabled) {
-                               trig->use_count++;
+
+       if (!atomic_read(&trig->use_count)) {
+               atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+                       if (trig->subirqs[i].enabled)
                                generic_handle_irq(trig->subirq_base + i);
-                       }
+                       else
+                               iio_trigger_notify_done(trig);
+               }
+       }
 }
 EXPORT_SYMBOL(iio_trigger_poll);
 
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
 {
        int i;
-       if (!trig->use_count)
-               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
-                       if (trig->subirqs[i].enabled) {
-                               trig->use_count++;
+
+       if (!atomic_read(&trig->use_count)) {
+               atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+                       if (trig->subirqs[i].enabled)
                                handle_nested_irq(trig->subirq_base + i);
-                       }
+                       else
+                               iio_trigger_notify_done(trig);
+               }
+       }
 }
 EXPORT_SYMBOL(iio_trigger_poll_chained);
 
 void iio_trigger_notify_done(struct iio_trigger *trig)
 {
-       trig->use_count--;
-       if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+       if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+               trig->ops->try_reenable)
                if (trig->ops->try_reenable(trig))
                        /* Missed an interrupt so launch new poll now */
                        iio_trigger_poll(trig, 0);
index efdc873e58d122d322d53a3876f88a46f4ddb04f..a9857022f71d424a02baa2aaf84652781db58fa7 100644 (file)
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct v4l2_subdev *sd = to_sd(ctrl);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret;
+       int ret = -EINVAL;
 
        switch (ctrl->id) {
        case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
        }
 
-       return 0;
+       return ret;
 }
 
 static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
index df4ada880e4215519b33cc81ab3cdb5d2c8e6dce..bd9405df1bd67abf3a8051d0cefabe3a7ad0c362 100644 (file)
@@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
 
 #ifdef CONFIG_OF
 static const struct of_device_id coda_dt_ids[] = {
-       { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+       { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
        { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
        { /* sentinel */ }
 };
index 553d87e5ceab41b9d2d081c6fc55dbe4b17db1ed..fd6289d60cde2c8a25450f6cb6d24c18923f0e81 100644 (file)
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
        }
        *vfd = g2d_videodev;
        vfd->lock = &dev->mutex;
+       vfd->v4l2_dev = &dev->v4l2_dev;
        ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
        if (ret) {
                v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
index 5296385153d5d17ce0a3c3d6f6bf0d8513bb10f4..4f6dd42c9adb06755eebbc520ba57124f77a5e35 100644 (file)
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
                pix_mp->num_planes = 2;
                /* Set pixelformat to the format in which MFC
                   outputs the decoded frame */
-               pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+               pix_mp->pixelformat = ctx->dst_fmt->fourcc;
                pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
                pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
                pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
                        mfc_err("Unsupported format for source.\n");
                        return -EINVAL;
                }
-               if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
-                       mfc_err("Not supported format.\n");
+               if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+                       mfc_err("Unknown codec\n");
                        return -EINVAL;
                }
+               if (!IS_MFCV6(dev)) {
+                       if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
+                               mfc_err("Not supported format.\n");
+                               return -EINVAL;
+                       }
+               }
        } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
                fmt = find_format(f, MFC_FMT_RAW);
                if (!fmt) {
@@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
        struct s5p_mfc_dev *dev = video_drvdata(file);
        struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
        int ret = 0;
-       struct s5p_mfc_fmt *fmt;
        struct v4l2_pix_format_mplane *pix_mp;
 
        mfc_debug_enter();
@@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
                goto out;
        }
        if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-               fmt = find_format(f, MFC_FMT_RAW);
-               if (!fmt) {
-                       mfc_err("Unsupported format for source.\n");
-                       return -EINVAL;
-               }
-               if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
-                       mfc_err("Not supported format.\n");
-                       return -EINVAL;
-               } else if (IS_MFCV6(dev) &&
-                               (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
-                       mfc_err("Not supported format.\n");
-                       return -EINVAL;
-               }
-               ctx->dst_fmt = fmt;
-               mfc_debug_leave();
-               return ret;
-       } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-               mfc_err("Wrong type error for S_FMT : %d", f->type);
-               return -EINVAL;
-       }
-       fmt = find_format(f, MFC_FMT_DEC);
-       if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
-               mfc_err("Unknown codec\n");
-               ret = -EINVAL;
+               /* dst_fmt is validated by call to vidioc_try_fmt */
+               ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
+               ret = 0;
                goto out;
-       }
-       if (fmt->type != MFC_FMT_DEC) {
-               mfc_err("Wrong format selected, you should choose "
-                                       "format for decoding\n");
+       } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+               /* src_fmt is validated by call to vidioc_try_fmt */
+               ctx->src_fmt = find_format(f, MFC_FMT_DEC);
+               ctx->codec_mode = ctx->src_fmt->codec_mode;
+               mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+               pix_mp->height = 0;
+               pix_mp->width = 0;
+               if (pix_mp->plane_fmt[0].sizeimage)
+                       ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+               else
+                       pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+                                                               DEF_CPB_SIZE;
+               pix_mp->plane_fmt[0].bytesperline = 0;
+               ctx->state = MFCINST_INIT;
+               ret = 0;
+               goto out;
+       } else {
+               mfc_err("Wrong type error for S_FMT : %d", f->type);
                ret = -EINVAL;
                goto out;
        }
-       if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
-               mfc_err("Not supported format.\n");
-               return -EINVAL;
-       }
-       ctx->src_fmt = fmt;
-       ctx->codec_mode = fmt->codec_mode;
-       mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
-       pix_mp->height = 0;
-       pix_mp->width = 0;
-       if (pix_mp->plane_fmt[0].sizeimage)
-               ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
-       else
-               pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
-                                                               DEF_CPB_SIZE;
-       pix_mp->plane_fmt[0].bytesperline = 0;
-       ctx->state = MFCINST_INIT;
+
 out:
        mfc_debug_leave();
        return ret;
index 2549967b2f8589bb46007b8282520250392287b7..59e56f4c8ce34ded878308537b5de1a57595acf1 100644 (file)
@@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
 
 static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
 {
+       struct s5p_mfc_dev *dev = video_drvdata(file);
        struct s5p_mfc_fmt *fmt;
        struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
 
@@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
                        return -EINVAL;
                }
 
+               if (!IS_MFCV6(dev)) {
+                       if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
+                               mfc_err("Not supported format.\n");
+                               return -EINVAL;
+                       }
+               } else if (IS_MFCV6(dev)) {
+                       if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+                               mfc_err("Not supported format.\n");
+                               return -EINVAL;
+                       }
+               }
+
                if (fmt->num_planes != pix_fmt_mp->num_planes) {
                        mfc_err("failed to try output format\n");
                        return -EINVAL;
@@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
 {
        struct s5p_mfc_dev *dev = video_drvdata(file);
        struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
-       struct s5p_mfc_fmt *fmt;
        struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
        int ret = 0;
 
@@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
                goto out;
        }
        if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-               fmt = find_format(f, MFC_FMT_ENC);
-               if (!fmt) {
-                       mfc_err("failed to set capture format\n");
-                       return -EINVAL;
-               }
+               /* dst_fmt is validated by call to vidioc_try_fmt */
+               ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
                ctx->state = MFCINST_INIT;
-               ctx->dst_fmt = fmt;
                ctx->codec_mode = ctx->dst_fmt->codec_mode;
                ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
                pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
                }
                mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
        } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-               fmt = find_format(f, MFC_FMT_RAW);
-               if (!fmt) {
-                       mfc_err("failed to set output format\n");
-                       return -EINVAL;
-               }
-
-               if (!IS_MFCV6(dev) &&
-                               (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
-                       mfc_err("Not supported format.\n");
-                       return -EINVAL;
-               } else if (IS_MFCV6(dev) &&
-                               (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
-                       mfc_err("Not supported format.\n");
-                       return -EINVAL;
-               }
-
-               if (fmt->num_planes != pix_fmt_mp->num_planes) {
-                       mfc_err("failed to set output format\n");
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ctx->src_fmt = fmt;
+               /* src_fmt is validated by call to vidioc_try_fmt */
+               ctx->src_fmt = find_format(f, MFC_FMT_RAW);
                ctx->img_width = pix_fmt_mp->width;
                ctx->img_height = pix_fmt_mp->height;
                mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
index 4851cc2e4a4d3227b59df97c8020298b41796c77..c4ff9739a7ae8cb099b21637dc23723e5e170d1b 100644 (file)
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 
        *eedata = data;
        *eedata_len = len;
-       dev_config = (void *)eedata;
+       dev_config = (void *)*eedata;
 
        switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
        case 0:
index cb694055ba7d103ad75bfd73b2f41a0b0cb1e315..6e5070774dc2ca480fe3970c75bb79c0a893e47b 100644 (file)
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface,
 
        dev->workqueue = 0;
 
+       /* init video transfer queues first of all */
+       /* to prevent oops in hdpvr_delete() on error paths */
+       INIT_LIST_HEAD(&dev->free_buff_list);
+       INIT_LIST_HEAD(&dev->rec_buff_list);
+
        /* register v4l2_device early so it can be used for printks */
        if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
                dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface,
        if (!dev->workqueue)
                goto error;
 
-       /* init video transfer queues */
-       INIT_LIST_HEAD(&dev->free_buff_list);
-       INIT_LIST_HEAD(&dev->rec_buff_list);
-
        dev->options = hdpvr_default_options;
 
        if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface,
                                    video_nr[atomic_inc_return(&dev_nr)]);
        if (retval < 0) {
                v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
-               goto error;
+               goto reg_fail;
        }
 
        /* let the user know what node this device is now attached to */
index 8864436464bf2d9e6aaf3199fb58a5d9e10294ab..7c5b86006ee627c43a52003fb816c7fceb10b6f4 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_USBTV
         tristate "USBTV007 video capture support"
-        depends on VIDEO_DEV
+        depends on VIDEO_V4L2
         select VIDEOBUF2_VMALLOC
 
         ---help---
index bf43f874685e2e5ba6acf97e891b8a09a7676432..91650173941ad5629a56cae9bacff0a4072002f1 100644 (file)
@@ -57,7 +57,7 @@
 #define USBTV_CHUNK_SIZE       256
 #define USBTV_CHUNK            240
 #define USBTV_CHUNKS           (USBTV_WIDTH * USBTV_HEIGHT \
-                                       / 2 / USBTV_CHUNK)
+                                       / 4 / USBTV_CHUNK)
 
 /* Chunk header. */
 #define USBTV_MAGIC_OK(chunk)  ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,6 +89,7 @@ struct usbtv {
        /* Number of currently processed frame, useful find
         * out when a new one begins. */
        u32 frame_id;
+       int chunks_done;
 
        int iso_size;
        unsigned int sequence;
@@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
        return 0;
 }
 
+/* Copy data from chunk into a frame buffer, deinterlacing the data
+ * into every second line. Unfortunately, they don't align nicely into
+ * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
+ * Therefore, we break down the chunk into two halves before copyting,
+ * so that we can interleave a line if needed. */
+static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
+{
+       int half;
+
+       for (half = 0; half < 2; half++) {
+               int part_no = chunk_no * 2 + half;
+               int line = part_no / 3;
+               int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
+
+               u32 *dst = &frame[part_index * USBTV_CHUNK/2];
+               memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
+               src += USBTV_CHUNK/2;
+       }
+}
+
 /* Called for each 256-byte image chunk.
  * First word identifies the chunk, followed by 240 words of image
  * data and padding. */
@@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
        frame_id = USBTV_FRAME_ID(chunk);
        odd = USBTV_ODD(chunk);
        chunk_no = USBTV_CHUNK_NO(chunk);
-
-       /* Deinterlace. TODO: Use interlaced frame format. */
-       chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
-       chunk_no += !odd * 3;
-
        if (chunk_no >= USBTV_CHUNKS)
                return;
 
        /* Beginning of a frame. */
-       if (chunk_no == 0)
+       if (chunk_no == 0) {
                usbtv->frame_id = frame_id;
+               usbtv->chunks_done = 0;
+       }
+
+       if (usbtv->frame_id != frame_id)
+               return;
 
        spin_lock_irqsave(&usbtv->buflock, flags);
        if (list_empty(&usbtv->bufs)) {
@@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
        buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
        frame = vb2_plane_vaddr(&buf->vb, 0);
 
-       /* Copy the chunk. */
-       memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1],
-                       USBTV_CHUNK * sizeof(chunk[1]));
+       /* Copy the chunk data. */
+       usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
+       usbtv->chunks_done++;
 
        /* Last chunk in a frame, signalling an end */
-       if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) {
+       if (odd && chunk_no == USBTV_CHUNKS-1) {
                int size = vb2_plane_size(&buf->vb, 0);
+               enum vb2_buffer_state state = usbtv->chunks_done ==
+                                               USBTV_CHUNKS ?
+                                               VB2_BUF_STATE_DONE :
+                                               VB2_BUF_STATE_ERROR;
 
                buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
                buf->vb.v4l2_buf.sequence = usbtv->sequence++;
                v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
                vb2_set_plane_payload(&buf->vb, 0, size);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+               vb2_buffer_done(&buf->vb, state);
                list_del(&buf->list);
        }
 
@@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
        if (*nbuffers < 2)
                *nbuffers = 2;
        *nplanes = 1;
-       sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32);
+       sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
 
        return 0;
 }
index 07f257d44a1e00a67b84a58685c5a2e490ce5ca8..e48cb339c0c6e5b71eb4ec24bef6d4f8238ea665 100644 (file)
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
  * The bonding ndo_neigh_setup is called at init time beofre any
  * slave exists. So we must declare proxy setup function which will
  * be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
  */
 static int bond_neigh_setup(struct net_device *dev,
                            struct neigh_parms *parms)
 {
-       parms->neigh_setup   = bond_neigh_init;
+       /* modify only our neigh_parms */
+       if (parms->dev == dev)
+               parms->neigh_setup = bond_neigh_init;
 
        return 0;
 }
index 25723d8ee20130b19ad95b885a99e5c1ba019517..925ab8ec9329b5bbfadb6e94faa12de0b7f91ceb 100644 (file)
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
                if ((mc->ptr + rec_len) > mc->end)
                        goto decode_failed;
 
-               memcpy(cf->data, mc->ptr, rec_len);
+               memcpy(cf->data, mc->ptr, cf->can_dlc);
                mc->ptr += rec_len;
        }
 
index f1b121ee5525bfa974010663da5807e9b4d038e5..55d79cb53a79637bb09297d49f1cf1d1ed533147 100644 (file)
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
        struct arc_emac_priv *priv = netdev_priv(ndev);
        unsigned int work_done;
 
-       for (work_done = 0; work_done <= budget; work_done++) {
+       for (work_done = 0; work_done < budget; work_done++) {
                unsigned int *last_rx_bd = &priv->last_rx_bd;
                struct net_device_stats *stats = &priv->stats;
                struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
index d80e34b8285ff4457b6660e1e2fe6fe338f2e63e..ce9b387b5a1962949582354f7b0dcfecb621a08d 100644 (file)
@@ -1502,6 +1502,7 @@ struct bnx2x {
 #define BC_SUPPORTS_DCBX_MSG_NON_PMF   (1 << 21)
 #define IS_VF_FLAG                     (1 << 22)
 #define INTERRUPTS_ENABLED_FLAG                (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD          (1 << 24)
 
 #define BP_NOMCP(bp)                   ((bp)->flags & NO_MCP_FLAG)
 
@@ -1830,6 +1831,8 @@ struct bnx2x {
 
        int fp_array_size;
        u32 dump_preset_idx;
+       bool                                    stats_started;
+       struct semaphore                        stats_sema;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed {
        BNX2X_PCI_LINK_SPEED_5000 = 5000,
        BNX2X_PCI_LINK_SPEED_8000 = 8000
 };
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
 #endif /* bnx2x.h */
index 0c94df47e0e8ee46d273488413be3d56d9c29ef6..f9122f2d6b657d0e674c7b036635b60e97586609 100644 (file)
@@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                bnx2x_pfc_set_pfc(bp);
 
                bnx2x_dcbx_update_ets_params(bp);
+
+               /* ets may affect cmng configuration: reinit it in hw */
+               bnx2x_set_local_cmng(bp);
+
                bnx2x_dcbx_resume_hw_tx(bp);
 
                return;
index 5018e52ae2ad8fac5a194b0f137b66d5b8aaf0b4..32767f6aa33f473a126259e4877fa608ed51b3bf 100644 (file)
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
 
        #define DRV_MSG_CODE_EEE_RESULTS_ACK            0xda000000
 
+       #define DRV_MSG_CODE_RMMOD                      0xdb000000
+       #define REQ_BC_VER_4_RMMOD_CMD                  0x0007080f
+
        #define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
        #define REQ_BC_VER_4_SET_MF_BW                  0x00060202
        #define DRV_MSG_CODE_SET_MF_BW_ACK              0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
 
        #define FW_MSG_CODE_EEE_RESULS_ACK              0xda100000
 
+       #define FW_MSG_CODE_RMMOD_ACK                   0xdb100000
+
        #define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
        #define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
 
index e06186c305d8f88d584a184175abcfd829300d4d..955d6cfd9cb7c48179b587a7bf5239a572b7e9e8 100644 (file)
@@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
        input.port_rate = bp->link_vars.line_speed;
 
-       if (cmng_type == CMNG_FNS_MINMAX) {
+       if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
                int vn;
 
                /* read mf conf from shmem */
@@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
        }
 }
 
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+       int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+       if (cmng_fns != CMNG_FNS_NONE) {
+               bnx2x_cmng_fns_init(bp, false, cmng_fns);
+               storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+       } else {
+               /* rate shaping and fairness are disabled */
+               DP(NETIF_MSG_IFUP,
+                  "single function mode without fairness\n");
+       }
+}
+
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 
-       if (bp->link_vars.link_up && bp->link_vars.line_speed) {
-               int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
-               if (cmng_fns != CMNG_FNS_NONE) {
-                       bnx2x_cmng_fns_init(bp, false, cmng_fns);
-                       storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
-               } else
-                       /* rate shaping and fairness are disabled */
-                       DP(NETIF_MSG_IFUP,
-                          "single function mode without fairness\n");
-       }
+       if (bp->link_vars.link_up && bp->link_vars.line_speed)
+               bnx2x_set_local_cmng(bp);
 
        __bnx2x_link_report(bp);
 
@@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
 
        bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
                        BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+       bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+                       BC_SUPPORTS_RMMOD_CMD : 0;
+
        boot_mode = SHMEM_RD(bp,
                        dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
                        PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        spin_lock_init(&bp->stats_lock);
+       sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        bnx2x_dcbnl_update_applist(bp, true);
 #endif
 
+       if (IS_PF(bp) &&
+           !BP_NOMCP(bp) &&
+           (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+               bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
        /* Close the interface - either directly or implicitly */
        if (remove_netdev) {
                unregister_netdev(dev);
        } else {
                rtnl_lock();
-               if (netif_running(dev))
-                       bnx2x_close(dev);
+               dev_close(dev);
                rtnl_unlock();
        }
 
index 95861efb505187f07bd1a176640e4f44af14a5de..44104fb27947fba144575a33a0be3cf1343fe39d 100644 (file)
@@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
 alloc_mem_err:
        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
                       sizeof(struct bnx2x_vf_mbx_msg));
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
                       sizeof(union pf_vf_bulletin));
        return -ENOMEM;
 }
index 98366abd02bda520cf1876189cced5926175288f..d63d1327b051e4a80c76027b03e723c332d0a317 100644 (file)
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
  * Statistics service functions
  */
 
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
 {
        struct dmae_command *dmae;
        u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
        *stats_comp = 0;
 }
 
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
 {
        /* vfs travel through here as part of the statistics FSM, but no action
         * is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
+
+       bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
 {
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       bnx2x_stats_pmf_update(bp);
-       bnx2x_stats_start(bp);
+       __bnx2x_stats_pmf_update(bp);
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+       __bnx2x_stats_pmf_update(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
         */
        if (IS_VF(bp))
                return;
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
        bnx2x_stats_comp(bp);
-       bnx2x_stats_start(bp);
+       __bnx2x_stats_start(bp);
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
        /* Make sure we use the value of the counter
         * used for sending the last stats ramrod.
         */
-       spin_lock_bh(&bp->stats_lock);
        cur_stats_counter = bp->stats_counter - 1;
-       spin_unlock_bh(&bp->stats_lock);
 
        /* are storm stats valid? */
        if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 {
        u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 
-       if (bnx2x_edebug_stats_stopped(bp))
+       /* we run update from timer context, so give up
+        * if somebody is in the middle of transition
+        */
+       if (down_trylock(&bp->stats_sema))
                return;
 
+       if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+               goto out;
+
        if (IS_PF(bp)) {
                if (*stats_comp != DMAE_COMP_VAL)
-                       return;
+                       goto out;
 
                if (bp->port.pmf)
                        bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
                                BNX2X_ERR("storm stats were not updated for 3 times\n");
                                bnx2x_panic();
                        }
-                       return;
+                       goto out;
                }
        } else {
                /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        /* vf is done */
        if (IS_VF(bp))
-               return;
+               goto out;
 
        if (netif_msg_timer(bp)) {
                struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
 
        bnx2x_hw_stats_post(bp);
        bnx2x_storm_stats_post(bp);
+
+out:
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
 {
        int update = 0;
 
+       if (down_timeout(&bp->stats_sema, HZ/10))
+               BNX2X_ERR("Unable to acquire stats lock\n");
+
+       bp->stats_started = false;
+
        bnx2x_stats_comp(bp);
 
        if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
                bnx2x_hw_stats_post(bp);
                bnx2x_stats_comp(bp);
        }
+
+       up(&bp->stats_sema);
 }
 
 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
        enum bnx2x_stats_state state;
+       void (*action)(struct bnx2x *bp);
        if (unlikely(bp->panic))
                return;
 
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+       action = bnx2x_stats_stm[state][event].action;
        spin_unlock_bh(&bp->stats_lock);
 
-       bnx2x_stats_stm[state][event].action(bp);
+       action(bp);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
index ddebc7a5dda0d14f32e8df6b7066b2d75c752a72..0da2214ef1b9e895903612809b21ed070cf32fa3 100644 (file)
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
 done:
        if (state == pci_channel_io_perm_failure) {
-               tg3_napi_enable(tp);
-               dev_close(netdev);
+               if (netdev) {
+                       tg3_napi_enable(tp);
+                       dev_close(netdev);
+               }
                err = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_disable_device(pdev);
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        rtnl_lock();
 
        if (pci_enable_device(pdev)) {
-               netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+               dev_err(&pdev->dev,
+                       "Cannot re-enable PCI device after reset.\n");
                goto done;
        }
 
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        pci_restore_state(pdev);
        pci_save_state(pdev);
 
-       if (!netif_running(netdev)) {
+       if (!netdev || !netif_running(netdev)) {
                rc = PCI_ERS_RESULT_RECOVERED;
                goto done;
        }
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        rc = PCI_ERS_RESULT_RECOVERED;
 
 done:
-       if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+       if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
                tg3_napi_enable(tp);
                dev_close(netdev);
        }
index 687ec4a8bb48de001cfa9d39ef233ee7c69a5d96..9c89dc8fe1057dc1c410728bd252f135f9a80879 100644 (file)
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
                q->pg_chunk.offset = 0;
                mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
                                       0, q->alloc_size, PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
-                       __free_pages(q->pg_chunk.page, order);
-                       q->pg_chunk.page = NULL;
-                       return -EIO;
-               }
                q->pg_chunk.mapping = mapping;
        }
        sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
        return flits_to_desc(flits);
 }
 
-
-/*     map_skb - map a packet main body and its page fragments
- *     @pdev: the PCI device
- *     @skb: the packet
- *     @addr: placeholder to save the mapped addresses
- *
- *     map the main body of an sk_buff and its page fragments, if any.
- */
-static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
-                  dma_addr_t *addr)
-{
-       const skb_frag_t *fp, *end;
-       const struct skb_shared_info *si;
-
-       *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
-                              PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(pdev, *addr))
-               goto out_err;
-
-       si = skb_shinfo(skb);
-       end = &si->frags[si->nr_frags];
-
-       for (fp = si->frags; fp < end; fp++) {
-               *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
-                                          DMA_TO_DEVICE);
-               if (pci_dma_mapping_error(pdev, *addr))
-                       goto unwind;
-       }
-       return 0;
-
-unwind:
-       while (fp-- > si->frags)
-               dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
-                              DMA_TO_DEVICE);
-
-       pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
-out_err:
-       return -ENOMEM;
-}
-
 /**
- *     write_sgl - populate a scatter/gather list for a packet
+ *     make_sgl - populate a scatter/gather list for a packet
  *     @skb: the packet
  *     @sgp: the SGL to populate
  *     @start: start address of skb main body data to include in the SGL
  *     @len: length of skb main body data to include in the SGL
- *     @addr: the list of the mapped addresses
+ *     @pdev: the PCI device
  *
- *     Copies the scatter/gather list for the buffers that make up a packet
+ *     Generates a scatter/gather list for the buffers that make up a packet
  *     and returns the SGL size in 8-byte words.  The caller must size the SGL
  *     appropriately.
  */
-static inline unsigned int write_sgl(const struct sk_buff *skb,
+static inline unsigned int make_sgl(const struct sk_buff *skb,
                                    struct sg_ent *sgp, unsigned char *start,
-                                   unsigned int len, const dma_addr_t *addr)
+                                   unsigned int len, struct pci_dev *pdev)
 {
-       unsigned int i, j = 0, k = 0, nfrags;
+       dma_addr_t mapping;
+       unsigned int i, j = 0, nfrags;
 
        if (len) {
+               mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
                sgp->len[0] = cpu_to_be32(len);
-               sgp->addr[j++] = cpu_to_be64(addr[k++]);
+               sgp->addr[0] = cpu_to_be64(mapping);
+               j = 1;
        }
 
        nfrags = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < nfrags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+               mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+                                          DMA_TO_DEVICE);
                sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
-               sgp->addr[j] = cpu_to_be64(addr[k++]);
+               sgp->addr[j] = cpu_to_be64(mapping);
                j ^= 1;
                if (j == 0)
                        ++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                            const struct port_info *pi,
                            unsigned int pidx, unsigned int gen,
                            struct sge_txq *q, unsigned int ndesc,
-                           unsigned int compl, const dma_addr_t *addr)
+                           unsigned int compl)
 {
        unsigned int flits, sgl_flits, cntrl, tso_info;
        struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        }
 
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+       sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
                         htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        struct netdev_queue *txq;
        struct sge_qset *qs;
        struct sge_txq *q;
-       dma_addr_t addr[MAX_SKB_FRAGS + 1];
 
        /*
         * The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
-               dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
        q->in_use += ndesc;
        if (unlikely(credits - ndesc < q->stop_thres)) {
                t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (likely(!skb_shared(skb)))
                skb_orphan(skb);
 
-       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
        check_ring_tx_db(adap, q);
        return NETDEV_TX_OK;
 }
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
  */
 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
                          struct sge_txq *q, unsigned int pidx,
-                         unsigned int gen, unsigned int ndesc,
-                         const dma_addr_t *addr)
+                         unsigned int gen, unsigned int ndesc)
 {
        unsigned int sgl_flits, flits;
        struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
 
        flits = skb_transport_offset(skb) / 8;
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
-                            skb_tail_pointer(skb) -
-                            skb_transport_header(skb), addr);
+       sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+                            skb->tail - skb->transport_header,
+                            adap->pdev);
        if (need_skb_unmap()) {
                setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
                skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again:   reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                goto again;
        }
 
-       if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
-               spin_unlock(&q->lock);
-               return NET_XMIT_SUCCESS;
-       }
-
        gen = q->gen;
        q->in_use += ndesc;
        pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        }
        spin_unlock(&q->lock);
 
-       write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+       write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
        check_ring_tx_db(adap, q);
        return NET_XMIT_SUCCESS;
 }
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
        struct sge_txq *q = &qs->txq[TXQ_OFLD];
        const struct port_info *pi = netdev_priv(qs->netdev);
        struct adapter *adap = pi->adapter;
-       unsigned int written = 0;
 
        spin_lock(&q->lock);
 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again:  reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                        break;
                }
 
-               if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
-                       break;
-
                gen = q->gen;
                q->in_use += ndesc;
                pidx = q->pidx;
                q->pidx += ndesc;
-               written += ndesc;
                if (q->pidx >= q->size) {
                        q->pidx -= q->size;
                        q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                __skb_unlink(skb, &q->sendq);
                spin_unlock(&q->lock);
 
-               write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
-                            (dma_addr_t *)skb->head);
+               write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
                spin_lock(&q->lock);
        }
        spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        set_bit(TXQ_LAST_PKT_DB, &q->flags);
 #endif
        wmb();
-       if (likely(written))
-               t3_write_reg(adap, A_SG_KDOORBELL,
-                            F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+       t3_write_reg(adap, A_SG_KDOORBELL,
+                    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 }
 
 /**
index 6e6e0a117ee2fe16061ae8e0b77a51aaacd3336b..8ec5d74ad44d75e4fb9d7c871b0ddd95654c74d1 100644 (file)
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
 
                adapter->max_event_queues = le16_to_cpu(desc->eq_count);
                adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+
+               /* Clear flags that driver is not interested in */
+               adapter->if_cap_flags &=  BE_IF_CAP_FLAGS_WANT;
        }
 err:
        mutex_unlock(&adapter->mbox_lock);
index 5228d88c5a024e8e20be18e4f9fec69de7865e42..1b3b9e886412ddd689a0abbee536e6ba0d4ad50c 100644 (file)
@@ -563,6 +563,12 @@ enum be_if_flags {
        BE_IF_FLAGS_MULTICAST = 0x1000
 };
 
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+                        BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+                        BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+                        BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+                        BE_IF_FLAGS_UNTAGGED)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
index c896079728e1312893fdf03f0b0e111f2a6676cc..ef94a591f9e550ed31d654c2cc1fa2ba7bc1bef9 100644 (file)
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
 }
 
 /* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
-                         struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+                        struct sk_buff *skb, unsigned int bufsize)
 {
        struct skge_rx_desc *rd = e->desc;
-       u64 map;
+       dma_addr_t map;
 
        map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
                             PCI_DMA_FROMDEVICE);
 
-       rd->dma_lo = map;
-       rd->dma_hi = map >> 32;
+       if (pci_dma_mapping_error(skge->hw->pdev, map))
+               return -1;
+
+       rd->dma_lo = lower_32_bits(map);
+       rd->dma_hi = upper_32_bits(map);
        e->skb = skb;
        rd->csum1_start = ETH_HLEN;
        rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
        rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, bufsize);
+       return 0;
 }
 
 /* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
                        return -ENOMEM;
 
                skb_reserve(skb, NET_IP_ALIGN);
-               skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+               if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+                       dev_kfree_skb(skb);
+                       return -EIO;
+               }
        } while ((e = e->next) != ring->start);
 
        ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
 
        BUG_ON(skge->dma & 7);
 
-       if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+       if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
                dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
                err = -EINVAL;
                goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        struct skge_tx_desc *td;
        int i;
        u32 control, len;
-       u64 map;
+       dma_addr_t map;
 
        if (skb_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        e->skb = skb;
        len = skb_headlen(skb);
        map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(hw->pdev, map))
+               goto mapping_error;
+
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, len);
 
-       td->dma_lo = map;
-       td->dma_hi = map >> 32;
+       td->dma_lo = lower_32_bits(map);
+       td->dma_hi = upper_32_bits(map);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
 
                        map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
                                               skb_frag_size(frag), DMA_TO_DEVICE);
+                       if (dma_mapping_error(&hw->pdev->dev, map))
+                               goto mapping_unwind;
 
                        e = e->next;
                        e->skb = skb;
                        tf = e->desc;
                        BUG_ON(tf->control & BMU_OWN);
 
-                       tf->dma_lo = map;
-                       tf->dma_hi = (u64) map >> 32;
+                       tf->dma_lo = lower_32_bits(map);
+                       tf->dma_hi = upper_32_bits(map);
                        dma_unmap_addr_set(e, mapaddr, map);
                        dma_unmap_len_set(e, maplen, skb_frag_size(frag));
 
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        }
 
        return NETDEV_TX_OK;
+
+mapping_unwind:
+       e = skge->tx_ring.to_use;
+       pci_unmap_single(hw->pdev,
+                        dma_unmap_addr(e, mapaddr),
+                        dma_unmap_len(e, maplen),
+                        PCI_DMA_TODEVICE);
+       while (i-- > 0) {
+               e = e->next;
+               pci_unmap_page(hw->pdev,
+                              dma_unmap_addr(e, mapaddr),
+                              dma_unmap_len(e, maplen),
+                              PCI_DMA_TODEVICE);
+       }
+
+mapping_error:
+       if (net_ratelimit())
+               dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
 
                pci_dma_sync_single_for_cpu(skge->hw->pdev,
                                            dma_unmap_addr(e, mapaddr),
-                                           len, PCI_DMA_FROMDEVICE);
+                                           dma_unmap_len(e, maplen),
+                                           PCI_DMA_FROMDEVICE);
                skb_copy_from_linear_data(e->skb, skb->data, len);
                pci_dma_sync_single_for_device(skge->hw->pdev,
                                               dma_unmap_addr(e, mapaddr),
-                                              len, PCI_DMA_FROMDEVICE);
+                                              dma_unmap_len(e, maplen),
+                                              PCI_DMA_FROMDEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
                struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                if (!nskb)
                        goto resubmit;
 
+               if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+                       dev_kfree_skb(nskb);
+                       goto resubmit;
+               }
+
                pci_unmap_single(skge->hw->pdev,
                                 dma_unmap_addr(e, mapaddr),
                                 dma_unmap_len(e, maplen),
                                 PCI_DMA_FROMDEVICE);
                skb = e->skb;
                prefetch(skb->data);
-               skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
        }
 
        skb_put(skb, len);
index c571de85d0f995bb2c302210cfbd979b9e1bec9e..5472cbd34028d9038539824c4167a0c3010a3a01 100644 (file)
@@ -46,7 +46,7 @@
 #include "mlx5_core.h"
 
 enum {
-       CMD_IF_REV = 4,
+       CMD_IF_REV = 5,
 };
 
 enum {
index c02cbcfd0fb83a4b10d198888c88f67fc4670cc9..443cc4d7b024c02d2cc77861868e1c1b17ee2524 100644 (file)
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                case MLX5_EVENT_TYPE_PAGE_REQUEST:
                        {
                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
-                               s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+                               s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 
                                mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
                                mlx5_core_req_pages_handler(dev, func_id, npages);
index 72a5222447f558b3e4a118ccaac70de358af8c64..f012658b6a927baf1fca6a2d39d806346c61a1a6 100644 (file)
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
        caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
        caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
        caps->log_max_mcg = out->hca_cap.log_max_mcg;
-       caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+       caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
        caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
        caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
        caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
index 748f10a155c42e4923850f718df8b2142eeff01b..3e6670c4a7cd215998bf2302f2573acb8540ffb7 100644 (file)
@@ -55,33 +55,9 @@ enum {
 };
 
 static DEFINE_SPINLOCK(health_lock);
-
 static LIST_HEAD(health_list);
 static struct work_struct health_work;
 
-static health_handler_t reg_handler;
-int mlx5_register_health_report_handler(health_handler_t handler)
-{
-       spin_lock_irq(&health_lock);
-       if (reg_handler) {
-               spin_unlock_irq(&health_lock);
-               return -EEXIST;
-       }
-       reg_handler = handler;
-       spin_unlock_irq(&health_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(mlx5_register_health_report_handler);
-
-void mlx5_unregister_health_report_handler(void)
-{
-       spin_lock_irq(&health_lock);
-       reg_handler = NULL;
-       spin_unlock_irq(&health_lock);
-}
-EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
-
 static void health_care(struct work_struct *work)
 {
        struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
                priv = container_of(health, struct mlx5_priv, health);
                dev = container_of(priv, struct mlx5_core_dev, priv);
                mlx5_core_warn(dev, "handling bad device here\n");
+               /* nothing yet */
                spin_lock_irq(&health_lock);
-               if (reg_handler)
-                       reg_handler(dev->pdev, health->health,
-                                   sizeof(health->health));
-
                list_del_init(&health->list);
                spin_unlock_irq(&health_lock);
        }
index 4a3e137931a38b898468061ae8334016e4e2f23e..3a2408d448203623754d0aa87e35c16e809da43f 100644 (file)
@@ -43,10 +43,16 @@ enum {
        MLX5_PAGES_TAKE         = 2
 };
 
+enum {
+       MLX5_BOOT_PAGES         = 1,
+       MLX5_INIT_PAGES         = 2,
+       MLX5_POST_INIT_PAGES    = 3
+};
+
 struct mlx5_pages_req {
        struct mlx5_core_dev *dev;
        u32     func_id;
-       s16     npages;
+       s32     npages;
        struct work_struct work;
 };
 
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
 
 struct mlx5_query_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       __be16                  num_boot_pages;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  init_pages;
-       __be16                  num_pages;
+       __be32                  num_pages;
 };
 
 struct mlx5_manage_pages_inbox {
        struct mlx5_inbox_hdr   hdr;
-       __be16                  rsvd0;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  rsvd1;
-       __be16                  num_entries;
-       u8                      rsvd2[16];
+       __be32                  num_entries;
        __be64                  pas[0];
 };
 
 struct mlx5_manage_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       u8                      rsvd0[2];
-       __be16                  num_entries;
-       u8                      rsvd1[20];
+       __be32                  num_entries;
+       u8                      rsvd[4];
        __be64                  pas[0];
 };
 
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
 }
 
 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
-                               s16 *pages, s16 *init_pages, u16 *boot_pages)
+                               s32 *npages, int boot)
 {
        struct mlx5_query_pages_inbox   in;
        struct mlx5_query_pages_outbox  out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+       in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
        if (err)
                return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       if (pages)
-               *pages = be16_to_cpu(out.num_pages);
-
-       if (init_pages)
-               *init_pages = be16_to_cpu(out.init_pages);
-
-       if (boot_pages)
-               *boot_pages = be16_to_cpu(out.num_boot_pages);
-
+       *npages = be32_to_cpu(out.num_pages);
        *func_id = be16_to_cpu(out.func_id);
 
        return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
        in->func_id = cpu_to_be16(func_id);
-       in->num_entries = cpu_to_be16(npages);
+       in->num_entries = cpu_to_be32(npages);
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        mlx5_core_dbg(dev, "err %d\n", err);
        if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
        in.func_id = cpu_to_be16(func_id);
-       in.num_entries = cpu_to_be16(npages);
+       in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                goto out_free;
        }
 
-       num_claimed = be16_to_cpu(out->num_entries);
+       num_claimed = be32_to_cpu(out->num_entries);
        if (nclaimed)
                *nclaimed = num_claimed;
 
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
 }
 
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages)
+                                s32 npages)
 {
        struct mlx5_pages_req *req;
 
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
 
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
 {
-       u16 uninitialized_var(boot_pages);
-       s16 uninitialized_var(init_pages);
        u16 uninitialized_var(func_id);
+       s32 uninitialized_var(npages);
        int err;
 
-       err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
-                                  &boot_pages);
+       err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
        if (err)
                return err;
 
+       mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+                     npages, boot ? "boot" : "init", func_id);
 
-       mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
-                     init_pages, boot_pages, func_id);
-       return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
+       return give_pages(dev, func_id, npages, 0);
 }
 
 static int optimal_reclaimed_pages(void)
index 92da9980a0a0a2b86ed2189313c7105ad693354a..9d4bb7f839041967eaed1ef6fd0fd51b40f6ec6d 100644 (file)
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
        u8 val;
        int ret, max_sds_rings = adapter->max_sds_rings;
 
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               netdev_info(netdev, "Device is resetting\n");
+               return -EBUSY;
+       }
+
        if (qlcnic_get_diag_lock(adapter)) {
                netdev_info(netdev, "Device in diagnostics mode\n");
                return -EBUSY;
index 9f4b8d5f08657d04fff6f0996b46313a1eea75dd..345d987aede491b8d5daf0e1e268b0ed8a9f2a51 100644 (file)
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
                return -EIO;
        }
 
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
        qlcnic_83xx_idc_attach_driver(adapter);
 
        return 0;
index ee013fcc33220f4d7bfdf8e60600accaefab77cd..bc05d016c85943834b687e0b2221495c88ea7190 100644 (file)
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_out_disable_mbx_intr;
 
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
 
        pci_set_drvdata(pdev, adapter);
 
@@ -3085,7 +3086,8 @@ done:
        adapter->fw_fail_cnt = 0;
        adapter->flags &= ~QLCNIC_FW_HANG;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
-       qlcnic_set_drv_version(adapter);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
 
        if (!qlcnic_clr_drv_state(adapter))
                qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
index 10ed82b3baca0970f65ce9797a18fc35cf76b1e3..660c3f5b22377a956c0caed68e59e6761d282239 100644 (file)
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
 
        if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
                err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
-               if (!err) {
-                       dev_info(&adapter->pdev->dev,
-                                "Failed to get current beacon state\n");
+               if (err) {
+                       netdev_err(adapter->netdev,
+                                  "Failed to get current beacon state\n");
                } else {
                        if (h_beacon_state == QLCNIC_BEACON_DISABLE)
                                ahw->beacon_state = 0;
index 6f35f8404d68adeffa84e9adbdbe4927176db74e..d2e591955bdde3fd03b40dac1c9c22e3d013c23d 100644 (file)
@@ -524,6 +524,7 @@ rx_status_loop:
                                         PCI_DMA_FROMDEVICE);
                if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
                        dev->stats.rx_dropped++;
+                       kfree_skb(new_skb);
                        goto rx_next;
                }
 
index c9d942a5c335d022a3eb8bd59ebe5345d655d557..1ef9d8a555aa33100d2103fd0d8e3a48f403a735 100644 (file)
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
        unsigned int entry = priv->cur_tx % txsize;
-       struct dma_desc *desc = priv->dma_tx + entry;
+       struct dma_desc *desc;
        unsigned int nopaged_len = skb_headlen(skb);
        unsigned int bmax, len;
 
+       if (priv->extend_desc)
+               desc = (struct dma_desc *)(priv->dma_etx + entry);
+       else
+               desc = priv->dma_tx + entry;
+
        if (priv->plat->enh_desc)
                bmax = BUF_SIZE_8KiB;
        else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                                                STMMAC_RING_MODE);
                wmb();
                entry = (++priv->cur_tx) % txsize;
-               desc = priv->dma_tx + entry;
+
+               if (priv->extend_desc)
+                       desc = (struct dma_desc *)(priv->dma_etx + entry);
+               else
+                       desc = priv->dma_tx + entry;
 
                desc->des2 = dma_map_single(priv->device, skb->data + bmax,
                                            len, DMA_TO_DEVICE);
index f2ccb36e868590917a4cea295eb93dc20cb9ea8c..0a9bb9d30c3f0b78e28f9233e1775392d260aff4 100644 (file)
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 
        skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
                                 GFP_KERNEL);
-       if (unlikely(skb == NULL)) {
+       if (!skb) {
                pr_err("%s: Rx init fails; skb is NULL\n", __func__);
-               return 1;
+               return -ENOMEM;
        }
        skb_reserve(skb, NET_IP_ALIGN);
        priv->rx_skbuff[i] = skb;
        priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
                                                priv->dma_buf_sz,
                                                DMA_FROM_DEVICE);
+       if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+               pr_err("%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
 
        p->des2 = priv->rx_skbuff_dma[i];
 
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
        return 0;
 }
 
+static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+{
+       if (priv->rx_skbuff[i]) {
+               dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+                                priv->dma_buf_sz, DMA_FROM_DEVICE);
+               dev_kfree_skb_any(priv->rx_skbuff[i]);
+       }
+       priv->rx_skbuff[i] = NULL;
+}
+
 /**
  * init_dma_desc_rings - init the RX/TX descriptor rings
  * @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
  * and allocates the socket buffers. It suppors the chained and ring
  * modes.
  */
-static void init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev)
 {
        int i;
        struct stmmac_priv *priv = netdev_priv(dev);
        unsigned int txsize = priv->dma_tx_size;
        unsigned int rxsize = priv->dma_rx_size;
        unsigned int bfsize = 0;
+       int ret = -ENOMEM;
 
        /* Set the max buffer size according to the DESC mode
         * and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
                                                          dma_extended_desc),
                                                   &priv->dma_rx_phy,
                                                   GFP_KERNEL);
+               if (!priv->dma_erx)
+                       goto err_dma;
+
                priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
                                                   sizeof(struct
                                                          dma_extended_desc),
                                                   &priv->dma_tx_phy,
                                                   GFP_KERNEL);
-               if ((!priv->dma_erx) || (!priv->dma_etx))
-                       return;
+               if (!priv->dma_etx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+                                       sizeof(struct dma_extended_desc),
+                                       priv->dma_erx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
        } else {
                priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
                                                  sizeof(struct dma_desc),
                                                  &priv->dma_rx_phy,
                                                  GFP_KERNEL);
+               if (!priv->dma_rx)
+                       goto err_dma;
+
                priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
                                                  sizeof(struct dma_desc),
                                                  &priv->dma_tx_phy,
                                                  GFP_KERNEL);
-               if ((!priv->dma_rx) || (!priv->dma_tx))
-                       return;
+               if (!priv->dma_tx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+                                       sizeof(struct dma_desc),
+                                       priv->dma_rx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
        }
 
        priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
                                            GFP_KERNEL);
+       if (!priv->rx_skbuff_dma)
+               goto err_rx_skbuff_dma;
+
        priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
                                        GFP_KERNEL);
+       if (!priv->rx_skbuff)
+               goto err_rx_skbuff;
+
        priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
                                            GFP_KERNEL);
+       if (!priv->tx_skbuff_dma)
+               goto err_tx_skbuff_dma;
+
        priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
                                        GFP_KERNEL);
+       if (!priv->tx_skbuff)
+               goto err_tx_skbuff;
+
        if (netif_msg_probe(priv)) {
                pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
                         (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_rx + i;
 
-               if (stmmac_init_rx_buffers(priv, p, i))
-                       break;
+               ret = stmmac_init_rx_buffers(priv, p, i);
+               if (ret)
+                       goto err_init_rx_buffers;
 
                if (netif_msg_probe(priv))
                        pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
 
        if (netif_msg_hw(priv))
                stmmac_display_rings(priv);
+
+       return 0;
+err_init_rx_buffers:
+       while (--i >= 0)
+               stmmac_free_rx_buffers(priv, i);
+       kfree(priv->tx_skbuff);
+err_tx_skbuff:
+       kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+       kfree(priv->rx_skbuff);
+err_rx_skbuff:
+       kfree(priv->rx_skbuff_dma);
+err_rx_skbuff_dma:
+       if (priv->extend_desc) {
+               dma_free_coherent(priv->device, priv->dma_tx_size *
+                                 sizeof(struct dma_extended_desc),
+                                 priv->dma_etx, priv->dma_tx_phy);
+               dma_free_coherent(priv->device, priv->dma_rx_size *
+                                 sizeof(struct dma_extended_desc),
+                                 priv->dma_erx, priv->dma_rx_phy);
+       } else {
+               dma_free_coherent(priv->device,
+                               priv->dma_tx_size * sizeof(struct dma_desc),
+                               priv->dma_tx, priv->dma_tx_phy);
+               dma_free_coherent(priv->device,
+                               priv->dma_rx_size * sizeof(struct dma_desc),
+                               priv->dma_rx, priv->dma_rx_phy);
+       }
+err_dma:
+       return ret;
 }
 
 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
 {
        int i;
 
-       for (i = 0; i < priv->dma_rx_size; i++) {
-               if (priv->rx_skbuff[i]) {
-                       dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
-                                        priv->dma_buf_sz, DMA_FROM_DEVICE);
-                       dev_kfree_skb_any(priv->rx_skbuff[i]);
-               }
-               priv->rx_skbuff[i] = NULL;
-       }
+       for (i = 0; i < priv->dma_rx_size; i++)
+               stmmac_free_rx_buffers(priv, i);
 }
 
 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
        priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
        priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
-       init_dma_desc_rings(dev);
+
+       ret = init_dma_desc_rings(dev);
+       if (ret < 0) {
+               pr_err("%s: DMA descriptors initialization failed\n", __func__);
+               goto dma_desc_error;
+       }
 
        /* DMA initialization and SW reset */
        ret = stmmac_init_dma_engine(priv);
        if (ret < 0) {
-               pr_err("%s: DMA initialization failed\n", __func__);
+               pr_err("%s: DMA engine initialization failed\n", __func__);
                goto init_error;
        }
 
@@ -1672,6 +1744,7 @@ wolirq_error:
 
 init_error:
        free_dma_desc_resources(priv);
+dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
 phy_error:
index 1d6dc41f755dba00edfbc83d07c1d898c05020cf..d01cacf8a7c279ee892b703715e84707214c186e 100644 (file)
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
        }
-       netif_rx(skb);
+       netif_receive_skb(skb);
 
        stats->rx_bytes += pkt_len;
        stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
        return ret;
 
 err_iounmap:
+       netif_napi_del(&vptr->napi);
        iounmap(regs);
 err_free_dev:
        free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
        struct velocity_info *vptr = netdev_priv(netdev);
 
        unregister_netdev(netdev);
+       netif_napi_del(&vptr->napi);
        iounmap(vptr->mac_regs);
        free_netdev(netdev);
        velocity_nics--;
index d0f9c2fd1d4fdaa06110d394411f48bb5f8874bb..16b43bf544b74dd3ee72599f56f429ca2dea8851 100644 (file)
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
                        return -EADDRNOTAVAIL;
        }
 
+       if (data && data[IFLA_MACVLAN_FLAGS] &&
+           nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+               return -EINVAL;
+
        if (data && data[IFLA_MACVLAN_MODE]) {
                switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
                case MACVLAN_MODE_PRIVATE:
index a98fb0ed6aef7b9be00c4ed21ecc1b79793f9459..b51db2abfe442cd95fdbcb97e17aec702ec0d2c5 100644 (file)
@@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
-       if (vlan)
+       if (vlan) {
+               local_bh_disable();
                macvlan_start_xmit(skb, vlan->dev);
-       else
+               local_bh_enable();
+       } else {
                kfree_skb(skb);
+       }
        rcu_read_unlock();
 
        return total_len;
@@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 done:
        rcu_read_lock();
        vlan = rcu_dereference(q->vlan);
-       if (vlan)
+       if (vlan) {
+               preempt_disable();
                macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+               preempt_enable();
+       }
        rcu_read_unlock();
 
        return ret ? ret : copied;
index db690a372260786b395186dffb38ea24e58d45ee..71af122edf2d639c0a87099d83998a0706bfd433 100644 (file)
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        u32 rxhash;
 
        if (!(tun->flags & TUN_NO_PI)) {
-               if ((len -= sizeof(pi)) > total_len)
+               if (len < sizeof(pi))
                        return -EINVAL;
+               len -= sizeof(pi);
 
                if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
                        return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & TUN_VNET_HDR) {
-               if ((len -= tun->vnet_hdr_sz) > total_len)
+               if (len < tun->vnet_hdr_sz)
                        return -EINVAL;
+               len -= tun->vnet_hdr_sz;
 
                if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
                        return -EFAULT;
index f4c6db419ddb3b56a9ecede0b62988c33e1d7c29..767f7af3bd40385ae18823ab579fff1709f79d7b 100644 (file)
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev)
                return -ENOTCONN;
 
        if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
-           vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+           vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
                vxlan_sock_hold(vs);
                dev_hold(dev);
                queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
 
-       flush_workqueue(vxlan_wq);
-
        spin_lock(&vn->sock_lock);
        hlist_del_rcu(&vxlan->hlist);
        spin_unlock(&vn->sock_lock);
index 7365674366f4426fb79b74cadb4be87bbd07b7a1..010b252be584237c7f6b4e71f5d375332176f586 100644 (file)
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
        if (!priv->join_status)
                goto done;
 
-       if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
-               wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
-                         priv->join_status);
-               BUG_ON(1);
-       }
+       if (priv->join_status == CW1200_JOIN_STATUS_AP)
+               goto done;
 
        cancel_work_sync(&priv->update_filtering_work);
        cancel_work_sync(&priv->set_beacon_wakeup_period_work);
index b9b2bb51e60590ab7dfb91d4e284a6a0c6c259a7..f2ed62e373408d3882a3e463af81b88f3a83b7b1 100644 (file)
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
                 * is killed. Hence update the killswitch state here. The
                 * rfkill handler will care about restarting if needed.
                 */
-               if (!test_bit(S_ALIVE, &il->status)) {
-                       if (hw_rf_kill)
-                               set_bit(S_RFKILL, &il->status);
-                       else
-                               clear_bit(S_RFKILL, &il->status);
+               if (hw_rf_kill) {
+                       set_bit(S_RFKILL, &il->status);
+               } else {
+                       clear_bit(S_RFKILL, &il->status);
                        wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+                       il_force_reset(il, true);
                }
 
                handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
 
        il->active_rate = RATES_MASK;
 
+       il_power_update_mode(il, true);
+       D_INFO("Updated power mode\n");
+
        if (il_is_associated(il)) {
                struct il_rxon_cmd *active_rxon =
                    (struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
        D_INFO("ALIVE processing complete.\n");
        wake_up(&il->wait_command_queue);
 
-       il_power_update_mode(il, true);
-       D_INFO("Updated power mode\n");
-
        return;
 
 restart:
index 3195aad440ddf6317b1b9b34d2e2f57e773755c3..b03e22ef5462d929ea20ffad72a4a01ff067c4a4 100644 (file)
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
 
        return 0;
 }
+EXPORT_SYMBOL(il_force_reset);
 
 int
 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index dbdc5f7e2b294fad4adfe3ae676e258c5af6abbe..01e264fb50e0ebf09a1c4b9b49e1e5e0da3bc7ff 100644 (file)
@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
 /* ACPI bus type */
 static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
 {
-       struct pci_dev * pci_dev;
-       u64     addr;
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       bool is_bridge;
+       u64 addr;
 
-       pci_dev = to_pci_dev(dev);
+       /*
+        * pci_is_bridge() is not suitable here, because pci_dev->subordinate
+        * is set only after acpi_pci_find_device() has been called for the
+        * given device.
+        */
+       is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+                       || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
        /* Please ref to ACPI spec for the syntax of _ADR */
        addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
-       *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+       *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
        if (!*handle)
                return -ENODEV;
        return 0;
index 767fee2ab340efb2d5ce647637053028c38b4aec..26019531db15a2f481dc87da6e6c76e085e1a06c 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
 }
 #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
 
-static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
 {
+       int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
        /*
-        * The datasheet doesn't say which way round the
-        * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
-        * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+        * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
+        * states:
+        * | The order in which registers are updated is
+        * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
+        * | (This list is in bitfield order, from LSB to MSB, as they would
+        * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
+        * | register. For example, the Seconds register corresponds to
+        * | STALE_REGS or NEW_REGS containing 0x80.)
         */
-       while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
-                       (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
-               cpu_relax();
+       do {
+               if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+                               (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
+                       return 0;
+               udelay(1);
+       } while (--timeout > 0);
+       return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+               (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
 }
 
 /* Time read/write */
 static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
 {
+       int ret;
        struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
 
-       stmp3xxx_wait_time(rtc_data);
+       ret = stmp3xxx_wait_time(rtc_data);
+       if (ret)
+               return ret;
+
        rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
        return 0;
 }
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
        struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
 
        writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
-       stmp3xxx_wait_time(rtc_data);
-       return 0;
+       return stmp3xxx_wait_time(rtc_data);
 }
 
 /* interrupt(s) handler */
index 17150a77898433c27e58b2a8ac9ccb81bd228ddb..451bf99582ff09948c04e6634ca2d381c0d6a45a 100644 (file)
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
                rc = cqr->intrc;
        else
                rc = -EIO;
+
+       /* kick tasklets */
+       dasd_schedule_device_bh(device);
+       if (device->block)
+               dasd_schedule_block_bh(device->block);
+
        return rc;
 }
 
index b6d1f92ed33cc17e67ff4cbe1b3c424f5f75bd0e..c18c68150e9f17c8965b80159ecb2c2ddf1b0060 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.22"
+#define DRV_VERSION            "1.5.0.23"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 5f09d1814d2685e86b06d9c6d697659d06cade07..42e15ee6e1bb73fceac4bd8aaf8ded5ad06ecf1e 100644 (file)
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
                INIT_WORK(&fnic->event_work, fnic_handle_event);
                skb_queue_head_init(&fnic->fip_frame_queue);
-               spin_lock_irqsave(&fnic_list_lock, flags);
-               if (!fnic_fip_queue) {
-                       fnic_fip_queue =
-                               create_singlethread_workqueue("fnic_fip_q");
-                       if (!fnic_fip_queue) {
-                               spin_unlock_irqrestore(&fnic_list_lock, flags);
-                               printk(KERN_ERR PFX "fnic FIP work queue "
-                                                "create failed\n");
-                               err = -ENOMEM;
-                               goto err_out_free_max_pool;
-                       }
-               }
-               spin_unlock_irqrestore(&fnic_list_lock, flags);
                INIT_LIST_HEAD(&fnic->evlist);
                INIT_LIST_HEAD(&fnic->vlans);
        } else {
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void)
        spin_lock_init(&fnic_list_lock);
        INIT_LIST_HEAD(&fnic_list);
 
+       fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+       if (!fnic_fip_queue) {
+               printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
+               err = -ENOMEM;
+               goto err_create_fip_workq;
+       }
+
        fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
        if (!fnic_fc_transport) {
                printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void)
 err_pci_register:
        fc_release_transport(fnic_fc_transport);
 err_fc_transport:
+       destroy_workqueue(fnic_fip_queue);
+err_create_fip_workq:
        destroy_workqueue(fnic_event_queue);
 err_create_fnic_workq:
        kmem_cache_destroy(fnic_io_req_cache);
index 0177295599e0cd976e14982b5991fef567cbe52a..1f0ca68409d43a09ae24bd98a1703d34ec2ba2cc 100644 (file)
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
                break;
        }
 
-       /*
-        * We expect the FW state to be READY
-        */
-       if (megasas_transition_to_ready(instance, 0))
-               goto fail_ready_state;
+       if (megasas_transition_to_ready(instance, 0)) {
+               atomic_set(&instance->fw_reset_no_pci_access, 1);
+               instance->instancet->adp_reset
+                       (instance, instance->reg_set);
+               atomic_set(&instance->fw_reset_no_pci_access, 0);
+               dev_info(&instance->pdev->dev,
+                       "megasas: FW restarted successfully from %s!\n",
+                       __func__);
+
+               /*waitting for about 30 second before retry*/
+               ssleep(30);
+
+               if (megasas_transition_to_ready(instance, 0))
+                       goto fail_ready_state;
+       }
 
        /*
         * MSI-X host index 0 is common for all adapter.
index 3b1ea34e1f5a8ea5812a7feecd6ad7c40cd9fe04..eaa808e6ba91c78f13748a6ab3977c9c174c20f9 100644 (file)
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 {
        int i, result;
 
+       if (sdev->skip_vpd_pages)
+               goto fail;
+
        /* Ask for all the pages supported by this device */
        result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
        if (result)
index 2168258fb2c3a2cf5374a3592a302062d75bb305..74b88efde6ad408123de6a9b96ec37460195fe82 100644 (file)
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 
                vscsi->affinity_hint_set = true;
        } else {
-               for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+               for (i = 0; i < vscsi->num_queues; i++)
                        virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
 
                vscsi->affinity_hint_set = false;
index 222d3e37fc283b95dc5932570c1dec874cec78e2..707966bd56103181d31887b53cca01c1d9bd9776 100644 (file)
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                else
                        buf = (void *)t->tx_buf;
                t->tx_dma = dma_map_single(&spi->dev, buf,
-                               t->len, DMA_FROM_DEVICE);
+                               t->len, DMA_TO_DEVICE);
                if (!t->tx_dma) {
                        ret = -EFAULT;
                        goto err_tx_map;
index dcceed29d31ae415818d1fceecc6518cbc017845..81972fa47bebab17ad635a4d855d5cd691f5daad 100644 (file)
@@ -1811,10 +1811,12 @@ static int zcache_comp_init(void)
 #else
        if (*zcache_comp_name != '\0') {
                ret = crypto_has_comp(zcache_comp_name, 0, 0);
-               if (!ret)
+               if (!ret) {
                        pr_info("zcache: %s not supported\n",
                                        zcache_comp_name);
-               goto out;
+                       ret = 1;
+                       goto out;
+               }
        }
        if (!ret)
                strcpy(zcache_comp_name, "lzo");
index 609dbc2f7151d52a08cf2f7b62d229637491ce55..83b4ef4dfcf80c71248555c5378c082182235bcf 100644 (file)
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf,
        /* Determine if it is a Rigol or not */
        data->rigol_quirk = 0;
        dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
-               data->usb_dev->descriptor.idVendor,
-               data->usb_dev->descriptor.idProduct);
+               le16_to_cpu(data->usb_dev->descriptor.idVendor),
+               le16_to_cpu(data->usb_dev->descriptor.idProduct));
        for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
-               if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) &&
-                   (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) {
+               if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
+                   (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
                        dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
                        data->rigol_quirk = 1;
                        break;
index 4a8a1d68002c0fa1695078c6566c616bf764fa35..558313de49111025e03c1b6b6d015a2688800498 100644 (file)
@@ -4798,7 +4798,8 @@ static void hub_events(void)
                                        hub->ports[i - 1]->child;
 
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
-                               if (!udev) {
+                               if (!udev || !(portstatus &
+                                               USB_PORT_STAT_CONNECTION)) {
                                        status = hub_port_reset(hub, i,
                                                        NULL, HUB_BH_RESET_TIME,
                                                        true);
@@ -4808,8 +4809,8 @@ static void hub_events(void)
                                        usb_lock_device(udev);
                                        status = usb_reset_device(udev);
                                        usb_unlock_device(udev);
+                                       connect_change = 0;
                                }
-                               connect_change = 0;
                        }
 
                        if (connect_change)
index a63598895077ff8c984c0ed473d5ee90afa52086..5b44cd47da5b28b30046bead355aa54eda3eec0f 100644 (file)
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04d8, 0x000c), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* CarrolTouch 4000U */
+       { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* CarrolTouch 4500U */
+       { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Samsung Android phone modem - ID conflict with SPH-I500 */
        { USB_DEVICE(0x04e8, 0x6601), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index f80d0330d548d36564e9332d60392c9671360263..8e3c878f38cf57e64cb24c25a992b59e184ce322 100644 (file)
@@ -1391,21 +1391,20 @@ iso_stream_schedule (
 
                /* Behind the scheduling threshold? */
                if (unlikely(start < next)) {
+                       unsigned now2 = (now - base) & (mod - 1);
 
                        /* USB_ISO_ASAP: Round up to the first available slot */
                        if (urb->transfer_flags & URB_ISO_ASAP)
                                start += (next - start + period - 1) & -period;
 
                        /*
-                        * Not ASAP: Use the next slot in the stream.  If
-                        * the entire URB falls before the threshold, fail.
+                        * Not ASAP: Use the next slot in the stream,
+                        * no matter what.
                         */
-                       else if (start + span - period < next) {
-                               ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
+                       else if (start + span - period < now2) {
+                               ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
                                                urb, start + base,
-                                               span - period, next + base);
-                               status = -EXDEV;
-                               goto fail;
+                                               span - period, now2 + base);
                        }
                }
 
index df6978abd7e6f756c6d81ebeef324473e7f36a87..6f8c2fd47675dc2432902f5fbf8b4cfec8a306de 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
 
 #include "xhci.h"
 
index 41eb4fc33453525422b95218b6d51b34aaa617b5..9478caa2f71fb12912ddea1bfa06f37580e7c3ce 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/dmi.h>
+#include <linux/dma-mapping.h>
 
 #include "xhci.h"
 
index eb3c8c142fa988de1aaaee07d036e93554bb48a3..eeb27208c0d153de863f16ba12e62d0222e8c5d3 100644 (file)
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface,
 
        /* let the user know what node this device is now attached to */
        dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
-                udev->descriptor.idProduct, dev->serial_number,
+                le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
                 (dev->minor - ADU_MINOR_BASE));
 exit:
        dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
index 5a979729f8ec4695a1fa4e4395b86a430184e1c9..58c17fdc85eb77ea40718e7791475d341d34d0d8 100644 (file)
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial)
        if (d_details == NULL) {
                dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
                    __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
-               return 1;
+               return -ENODEV;
        }
 
        /* Setup private data for serial driver */
index 51da424327b0f7c1f64ac90fe482df9e063548ea..b01300164fc03cd26cbc81d62afd5a647c04d8be 100644 (file)
@@ -90,6 +90,7 @@ struct urbtracker {
        struct list_head        urblist_entry;
        struct kref             ref_count;
        struct urb              *urb;
+       struct usb_ctrlrequest  *setup;
 };
 
 enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
        struct mos7715_parport *mos_parport = urbtrack->mos_parport;
 
        usb_free_urb(urbtrack->urb);
+       kfree(urbtrack->setup);
        kfree(urbtrack);
        kref_put(&mos_parport->ref_count, destroy_mos_parport);
 }
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
        struct urbtracker *urbtrack;
        int ret_val;
        unsigned long flags;
-       struct usb_ctrlrequest setup;
        struct usb_serial *serial = mos_parport->serial;
        struct usb_device *usbdev = serial->dev;
 
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
                kfree(urbtrack);
                return -ENOMEM;
        }
-       setup.bRequestType = (__u8)0x40;
-       setup.bRequest = (__u8)0x0e;
-       setup.wValue = get_reg_value(reg, dummy);
-       setup.wIndex = get_reg_index(reg);
-       setup.wLength = 0;
+       urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
+       if (!urbtrack->setup) {
+               usb_free_urb(urbtrack->urb);
+               kfree(urbtrack);
+               return -ENOMEM;
+       }
+       urbtrack->setup->bRequestType = (__u8)0x40;
+       urbtrack->setup->bRequest = (__u8)0x0e;
+       urbtrack->setup->wValue = get_reg_value(reg, dummy);
+       urbtrack->setup->wIndex = get_reg_index(reg);
+       urbtrack->setup->wLength = 0;
        usb_fill_control_urb(urbtrack->urb, usbdev,
                             usb_sndctrlpipe(usbdev, 0),
-                            (unsigned char *)&setup,
+                            (unsigned char *)urbtrack->setup,
                             NULL, 0, async_complete, urbtrack);
        kref_init(&urbtrack->ref_count);
        INIT_LIST_HEAD(&urbtrack->urblist_entry);
index d953d674f22295a0aac144a7f9958c8dbb4df6a6..3bac4693c03829c9d5ca23d280e8c9b580548e66 100644 (file)
@@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial)
 static int mos7840_probe(struct usb_serial *serial,
                                const struct usb_device_id *id)
 {
-       u16 product = serial->dev->descriptor.idProduct;
+       u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
        u8 *buf;
        int device_type;
 
index 375b5a400b6fb8a63b0aaa83272299a15aef6e83..5c9f9b1d7736d0b7fade9780920d8916477c81fa 100644 (file)
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
        char buf[32];
 
        /* try ID specific firmware first, then try generic firmware */
-       sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
-           dev->descriptor.idProduct);
+       sprintf(buf, "ti_usb-v%04x-p%04x.fw",
+                       le16_to_cpu(dev->descriptor.idVendor),
+                       le16_to_cpu(dev->descriptor.idProduct));
        status = request_firmware(&fw_p, buf, &dev->dev);
 
        if (status != 0) {
                buf[0] = '\0';
-               if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
-                       switch (dev->descriptor.idProduct) {
+               if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
+                       switch (le16_to_cpu(dev->descriptor.idProduct)) {
                        case MTS_CDMA_PRODUCT_ID:
                                strcpy(buf, "mts_cdma.fw");
                                break;
index 8257d30c40720f348de427ef4d04ea3d1835d03a..85365784040b28a2c8c92a23ff4f4d5fe758834e 100644 (file)
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
                        tty_flip_buffer_push(&port->port);
                } else
                        dev_dbg(dev, "%s: empty read urb received\n", __func__);
-
-               /* Resubmit urb so we continue receiving */
-               err = usb_submit_urb(urb, GFP_ATOMIC);
-               if (err) {
-                       if (err != -EPERM) {
-                               dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
-                               /* busy also in error unless we are killed */
-                               usb_mark_last_busy(port->serial->dev);
-                       }
-               } else {
+       }
+       /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err) {
+               if (err != -EPERM) {
+                       dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
+                               __func__, err);
+                       /* busy also in error unless we are killed */
                        usb_mark_last_busy(port->serial->dev);
                }
+       } else {
+               usb_mark_last_busy(port->serial->dev);
        }
 }
 
index 16968c899493db24a40481e151b0da591405c1bf..d3493ca0525d4c727ed8d0fb86223fde6cb001e7 100644 (file)
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
        }
        spin_lock_irqsave(&xfer->lock, flags);
        rpipe = xfer->ep->hcpriv;
+       if (rpipe == NULL) {
+               pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
+                       __func__, wa_xfer_id(xfer),
+                       "Probably already aborted.\n" );
+               goto out_unlock;
+       }
        /* Check the delayed list -> if there, release and complete */
        spin_lock_irqsave(&wa->xfer_list_lock, flags2);
        if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb)
                        break;
                }
                usb_status = xfer_result->bTransferStatus & 0x3f;
-               if (usb_status == WA_XFER_STATUS_ABORTED
-                   || usb_status == WA_XFER_STATUS_NOT_FOUND)
+               if (usb_status == WA_XFER_STATUS_NOT_FOUND)
                        /* taken care of already */
                        break;
                xfer_id = xfer_result->dwTransferID;
index 3ba37713b1f97795c875ded477e5dfe37b2259b7..dc09ebe4aba53d078881b28e123bc072142f423c 100644 (file)
@@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = {
        }
 };
 
-static const struct fb_bitfield def_rgb666[] = {
-       [RED] = {
-               .offset = 16,
-               .length = 6,
-       },
-       [GREEN] = {
-               .offset = 8,
-               .length = 6,
-       },
-       [BLUE] = {
-               .offset = 0,
-               .length = 6,
-       },
-       [TRANSP] = {    /* no support for transparency */
-               .length = 0,
-       }
-};
-
 static const struct fb_bitfield def_rgb888[] = {
        [RED] = {
                .offset = 16,
@@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
                        break;
                case STMLCDIF_16BIT:
                case STMLCDIF_18BIT:
-                       /* 24 bit to 18 bit mapping */
-                       rgb = def_rgb666;
-                       break;
                case STMLCDIF_24BIT:
                        /* real 24 bit */
                        rgb = def_rgb888;
@@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info)
                        return -EINVAL;
                case STMLCDIF_16BIT:
                case STMLCDIF_18BIT:
-                       /* 24 bit to 18 bit mapping */
-                       ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
-                                           *  each colour component
-                                           */
-                       break;
                case STMLCDIF_24BIT:
                        /* real 24 bit */
                        break;
index 5338f362293b27a9d6050e856fd878f5c88b4684..1b60698f141ed983e2661743f2a5c79f471a02a6 100644 (file)
@@ -28,6 +28,20 @@ struct panel_drv_data {
        bool invert_polarity;
 };
 
+static const struct omap_video_timings tvc_pal_timings = {
+       .x_res          = 720,
+       .y_res          = 574,
+       .pixel_clock    = 13500,
+       .hsw            = 64,
+       .hfp            = 12,
+       .hbp            = 68,
+       .vsw            = 5,
+       .vfp            = 5,
+       .vbp            = 41,
+
+       .interlace      = true,
+};
+
 #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
 
 static int tvc_connect(struct omap_dss_device *dssdev)
@@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       ddata->timings = omap_dss_pal_timings;
+       ddata->timings = tvc_pal_timings;
 
        dssdev = &ddata->dssdev;
        dssdev->driver = &tvc_driver;
        dssdev->dev = &pdev->dev;
        dssdev->type = OMAP_DISPLAY_TYPE_VENC;
        dssdev->owner = THIS_MODULE;
-       dssdev->panel.timings = omap_dss_pal_timings;
+       dssdev->panel.timings = tvc_pal_timings;
 
        r = omapdss_register_display(dssdev);
        if (r) {
index eaf133384a8f97497098ab82cc94a67862116705..8bc5e8ccb091852dd53f262eb556fe5578a419a3 100644 (file)
@@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
                                u64 extent_item_pos,
                                struct extent_inode_elem **eie)
 {
-       u64 data_offset;
-       u64 data_len;
+       u64 offset = 0;
        struct extent_inode_elem *e;
 
-       data_offset = btrfs_file_extent_offset(eb, fi);
-       data_len = btrfs_file_extent_num_bytes(eb, fi);
+       if (!btrfs_file_extent_compression(eb, fi) &&
+           !btrfs_file_extent_encryption(eb, fi) &&
+           !btrfs_file_extent_other_encoding(eb, fi)) {
+               u64 data_offset;
+               u64 data_len;
 
-       if (extent_item_pos < data_offset ||
-           extent_item_pos >= data_offset + data_len)
-               return 1;
+               data_offset = btrfs_file_extent_offset(eb, fi);
+               data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+               if (extent_item_pos < data_offset ||
+                   extent_item_pos >= data_offset + data_len)
+                       return 1;
+               offset = extent_item_pos - data_offset;
+       }
 
        e = kmalloc(sizeof(*e), GFP_NOFS);
        if (!e)
@@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
 
        e->next = *eie;
        e->inum = key->objectid;
-       e->offset = key->offset + (extent_item_pos - data_offset);
+       e->offset = key->offset + offset;
        *eie = e;
 
        return 0;
@@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
        struct extent_buffer *eb;
        struct btrfs_key key;
        struct btrfs_file_extent_item *fi;
-       struct extent_inode_elem *eie = NULL;
+       struct extent_inode_elem *eie = NULL, *old = NULL;
        u64 disk_byte;
 
        if (level != 0) {
@@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
 
                if (disk_byte == wanted_disk_byte) {
                        eie = NULL;
+                       old = NULL;
                        if (extent_item_pos) {
                                ret = check_extent_in_eb(&key, eb, fi,
                                                *extent_item_pos,
@@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
                                if (ret < 0)
                                        break;
                        }
-                       if (!ret) {
-                               ret = ulist_add(parents, eb->start,
-                                               (uintptr_t)eie, GFP_NOFS);
-                               if (ret < 0)
-                                       break;
-                               if (!extent_item_pos) {
-                                       ret = btrfs_next_old_leaf(root, path,
-                                                       time_seq);
-                                       continue;
-                               }
+                       if (ret > 0)
+                               goto next;
+                       ret = ulist_add_merge(parents, eb->start,
+                                             (uintptr_t)eie,
+                                             (u64 *)&old, GFP_NOFS);
+                       if (ret < 0)
+                               break;
+                       if (!ret && extent_item_pos) {
+                               while (old->next)
+                                       old = old->next;
+                               old->next = eie;
                        }
                }
+next:
                ret = btrfs_next_old_item(root, path, time_seq);
        }
 
index 5bf4c39e2ad625f2e90bfbbc765fd4e13db47a2b..ed504607d8ecc9e7e492b9c0f2d88ce02ae1d32b 100644 (file)
@@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                BUG_ON(!eb_rewin);
        }
 
-       extent_buffer_get(eb_rewin);
        btrfs_tree_read_unlock(eb);
        free_extent_buffer(eb);
 
index 583d98bd065ed83ca979a2786b59ae4342380c47..fe443fece85111d69b833f9cac4e2bbb85b6ea80 100644 (file)
@@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        }
 
        while (!end) {
-               u64 offset_in_extent;
+               u64 offset_in_extent = 0;
 
                /* break if the extent we found is outside the range */
                if (em->start >= max || extent_map_end(em) < off)
@@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
                /*
                 * record the offset from the start of the extent
-                * for adjusting the disk offset below
+                * for adjusting the disk offset below.  Only do this if the
+                * extent isn't compressed since our in ram offset may be past
+                * what we have actually allocated on disk.
                 */
-               offset_in_extent = em_start - em->start;
+               if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+                       offset_in_extent = em_start - em->start;
                em_end = extent_map_end(em);
                em_len = em_end - em_start;
                emflags = em->flags;
index a005fe2c072ad0751254adba0fa4e04db10cc996..8e686a427ce2e8e5e824419321f1f8f2dfe19acb 100644 (file)
@@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                if (no_splits)
                        goto next;
 
-               if (em->block_start < EXTENT_MAP_LAST_BYTE &&
-                   em->start < start) {
+               if (em->start < start) {
                        split->start = em->start;
                        split->len = start - em->start;
-                       split->orig_start = em->orig_start;
-                       split->block_start = em->block_start;
 
-                       if (compressed)
-                               split->block_len = em->block_len;
-                       else
-                               split->block_len = split->len;
-                       split->ram_bytes = em->ram_bytes;
-                       split->orig_block_len = max(split->block_len,
-                                                   em->orig_block_len);
+                       if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+                               split->orig_start = em->orig_start;
+                               split->block_start = em->block_start;
+
+                               if (compressed)
+                                       split->block_len = em->block_len;
+                               else
+                                       split->block_len = split->len;
+                               split->orig_block_len = max(split->block_len,
+                                               em->orig_block_len);
+                               split->ram_bytes = em->ram_bytes;
+                       } else {
+                               split->orig_start = split->start;
+                               split->block_len = 0;
+                               split->block_start = em->block_start;
+                               split->orig_block_len = 0;
+                               split->ram_bytes = split->len;
+                       }
+
                        split->generation = gen;
                        split->bdev = em->bdev;
                        split->flags = flags;
@@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                        split = split2;
                        split2 = NULL;
                }
-               if (em->block_start < EXTENT_MAP_LAST_BYTE &&
-                   testend && em->start + em->len > start + len) {
+               if (testend && em->start + em->len > start + len) {
                        u64 diff = start + len - em->start;
 
                        split->start = start + len;
@@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                        split->flags = flags;
                        split->compress_type = em->compress_type;
                        split->generation = gen;
-                       split->orig_block_len = max(em->block_len,
+
+                       if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+                               split->orig_block_len = max(em->block_len,
                                                    em->orig_block_len);
-                       split->ram_bytes = em->ram_bytes;
 
-                       if (compressed) {
-                               split->block_len = em->block_len;
-                               split->block_start = em->block_start;
-                               split->orig_start = em->orig_start;
+                               split->ram_bytes = em->ram_bytes;
+                               if (compressed) {
+                                       split->block_len = em->block_len;
+                                       split->block_start = em->block_start;
+                                       split->orig_start = em->orig_start;
+                               } else {
+                                       split->block_len = split->len;
+                                       split->block_start = em->block_start
+                                               + diff;
+                                       split->orig_start = em->orig_start;
+                               }
                        } else {
-                               split->block_len = split->len;
-                               split->block_start = em->block_start + diff;
-                               split->orig_start = em->orig_start;
+                               split->ram_bytes = split->len;
+                               split->orig_start = split->start;
+                               split->block_len = 0;
+                               split->block_start = em->block_start;
+                               split->orig_block_len = 0;
                        }
 
                        ret = add_extent_mapping(em_tree, split, modified);
index 6d1b93c8aafb8a4d7b832cab8585ebf1ac1ced42..021694c081814828272ff03a8b9e1f64732a46cc 100644 (file)
@@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
                if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
                        continue;
 
-               extent_offset = btrfs_file_extent_offset(leaf, extent);
-               if (key.offset - extent_offset != offset)
+               /*
+                * 'offset' refers to the exact key.offset,
+                * NOT the 'offset' field in btrfs_extent_data_ref, ie.
+                * (key.offset - extent_offset).
+                */
+               if (key.offset != offset)
                        continue;
 
+               extent_offset = btrfs_file_extent_offset(leaf, extent);
                num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+
                if (extent_offset >= old->extent_offset + old->offset +
                    old->len || extent_offset + num_bytes <=
                    old->extent_offset + old->offset)
                        continue;
 
+               ret = 0;
                break;
        }
 
@@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
 
        backref->root_id = root_id;
        backref->inum = inum;
-       backref->file_pos = offset + extent_offset;
+       backref->file_pos = offset;
        backref->num_bytes = num_bytes;
        backref->extent_offset = extent_offset;
        backref->generation = btrfs_file_extent_generation(leaf, extent);
@@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
        new->path = path;
 
        list_for_each_entry_safe(old, tmp, &new->head, list) {
-               ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+               ret = iterate_inodes_from_logical(old->bytenr +
+                                                 old->extent_offset, fs_info,
                                                  path, record_one_backref,
                                                  old);
                BUG_ON(ret < 0 && ret != -ENOENT);
@@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
        int mask = attr->ia_valid;
        int ret;
 
-       if (newsize == oldsize)
-               return 0;
-
        /*
         * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
         * special case where we need to update the times despite not having
@@ -5165,14 +5170,31 @@ next:
        }
 
        /* Reached end of directory/root. Bump pos past the last item. */
-       if (key_type == BTRFS_DIR_INDEX_KEY)
-               /*
-                * 32-bit glibc will use getdents64, but then strtol -
-                * so the last number we can serve is this.
-                */
-               ctx->pos = 0x7fffffff;
-       else
-               ctx->pos++;
+       ctx->pos++;
+
+       /*
+        * Stop new entries from being returned after we return the last
+        * entry.
+        *
+        * New directory entries are assigned a strictly increasing
+        * offset.  This means that new entries created during readdir
+        * are *guaranteed* to be seen in the future by that readdir.
+        * This has broken buggy programs which operate on names as
+        * they're returned by readdir.  Until we re-use freed offsets
+        * we have this hack to stop new entries from being returned
+        * under the assumption that they'll never reach this huge
+        * offset.
+        *
+        * This is being careful not to overflow 32bit loff_t unless the
+        * last entry requires it because doing so has broken 32bit apps
+        * in the past.
+        */
+       if (key_type == BTRFS_DIR_INDEX_KEY) {
+               if (ctx->pos >= INT_MAX)
+                       ctx->pos = LLONG_MAX;
+               else
+                       ctx->pos = INT_MAX;
+       }
 nopos:
        ret = 0;
 err:
index d58cce77fc6c581625c3dd0449c83b6836bf4894..af1931a5960d9d602688fe8bb89242d752b71c5f 100644 (file)
@@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  * a dirty root struct and adds it into the list of dead roots that need to
  * be deleted
  */
-int btrfs_add_dead_root(struct btrfs_root *root)
+void btrfs_add_dead_root(struct btrfs_root *root)
 {
        spin_lock(&root->fs_info->trans_lock);
-       list_add_tail(&root->root_list, &root->fs_info->dead_roots);
+       if (list_empty(&root->root_list))
+               list_add_tail(&root->root_list, &root->fs_info->dead_roots);
        spin_unlock(&root->fs_info->trans_lock);
-       return 0;
 }
 
 /*
@@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
        }
        root = list_first_entry(&fs_info->dead_roots,
                        struct btrfs_root, root_list);
-       list_del(&root->root_list);
+       list_del_init(&root->root_list);
        spin_unlock(&fs_info->trans_lock);
 
        pr_debug("btrfs: cleaner removing %llu\n",
index 005b0375d18cfc6131a4480a9e67e0cbe44ed46a..defbc426989787e405e69f6e360fa82b3dc26556 100644 (file)
@@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root);
 
-int btrfs_add_dead_root(struct btrfs_root *root);
+void btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root);
 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
index 2c6791493637250423ac2c9bc21d9ca3f752ed11..ff60d8978ae264d95709d83342e1c309113521f0 100644 (file)
@@ -3746,8 +3746,9 @@ next_slot:
        }
 
 log_extents:
+       btrfs_release_path(path);
+       btrfs_release_path(dst_path);
        if (fast_search) {
-               btrfs_release_path(dst_path);
                ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
                if (ret) {
                        err = ret;
@@ -3764,8 +3765,6 @@ log_extents:
        }
 
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
-               btrfs_release_path(path);
-               btrfs_release_path(dst_path);
                ret = log_directory_changes(trans, root, inode, path, dst_path);
                if (ret) {
                        err = ret;
index 45e57cc38200448da67b1ce2c896efff555f0e73..fc6f4f3a1a9d6c4c2f8b0c7afa1eb20d8029febf 100644 (file)
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
        server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(server->secmech.md5)) {
                cifs_dbg(VFS, "could not allocate crypto md5\n");
-               return PTR_ERR(server->secmech.md5);
+               rc = PTR_ERR(server->secmech.md5);
+               server->secmech.md5 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
                        crypto_shash_descsize(server->secmech.md5);
        server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
        if (!server->secmech.sdescmd5) {
-               rc = -ENOMEM;
                crypto_free_shash(server->secmech.md5);
                server->secmech.md5 = NULL;
-               return rc;
+               return -ENOMEM;
        }
        server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
        server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
                if (blobptr + attrsize > blobend)
                        break;
                if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
-                       if (!attrsize)
+                       if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
                                break;
                        if (!ses->domainName) {
                                ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
 
 static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
 {
+       int rc;
        unsigned int size;
 
        /* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
        server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
        if (IS_ERR(server->secmech.hmacmd5)) {
                cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
-               return PTR_ERR(server->secmech.hmacmd5);
+               rc = PTR_ERR(server->secmech.hmacmd5);
+               server->secmech.hmacmd5 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
index 4bdd547dbf6fb3d10a2b2b962b710b722d0b9685..85ea98d139fc5643b0606b67959bb1f320037d80 100644 (file)
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
+       if (cifs_sb_master_tcon(cifs_sb)->nocase)
+               sb->s_d_op = &cifs_ci_dentry_ops;
+       else
+               sb->s_d_op = &cifs_dentry_ops;
+
        sb->s_root = d_make_root(inode);
        if (!sb->s_root) {
                rc = -ENOMEM;
                goto out_no_root;
        }
 
-       /* do that *after* d_make_root() - we want NULL ->d_op for root here */
-       if (cifs_sb_master_tcon(cifs_sb)->nocase)
-               sb->s_d_op = &cifs_ci_dentry_ops;
-       else
-               sb->s_d_op = &cifs_dentry_ops;
-
 #ifdef CONFIG_CIFS_NFSD_EXPORT
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
                cifs_dbg(FYI, "export ops supported\n");
index 1fdc370410576e1a3f5a36a133245468160ae4b4..52ca861ed35e4fe3fbf78ec387ff401fb0e8fb94 100644 (file)
@@ -44,6 +44,7 @@
 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
 #define MAX_SERVER_SIZE 15
 #define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
 #define MAX_USERNAME_SIZE 256  /* reasonable maximum for current servers */
 #define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
 
@@ -369,6 +370,9 @@ struct smb_version_operations {
        void (*generate_signingkey)(struct TCP_Server_Info *server);
        int (*calc_signature)(struct smb_rqst *rqst,
                                   struct TCP_Server_Info *server);
+       int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid);
 };
 
 struct smb_version_values {
index f7e584d047e202e0185f4936a71ef189e1a2781a..b29a012bed33a24b6ba45b17f95c70303e3e3014 100644 (file)
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
 struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
                                                work_func_t complete);
 void cifs_writedata_release(struct kref *refcount);
-
+int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid);
 #endif                 /* _CIFSPROTO_H */
index fa68813396b5acb7e7a2e5a65e1202d740688cbb..d67c550c49806254da76ca6f7dd32d29144c3c16 100644 (file)
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        if (string == NULL)
                                goto out_nomem;
 
-                       if (strnlen(string, 256) == 256) {
+                       if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+                                       == CIFS_MAX_DOMAINNAME_LEN) {
                                printk(KERN_WARNING "CIFS: domain name too"
                                                    " long\n");
                                goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
 
 #ifdef CONFIG_KEYS
 
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
 
 /* Populate username and pw fields from keyring if possible */
 static int
index 1e57f36ea1b2f84ac43c486cf6aa9cff4b2280a0..7e36ae34e9479b2614d61c919d393291593ef28f 100644 (file)
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
                                     oflags, &oplock, &cfile->fid.netfid, xid);
                if (rc == 0) {
                        cifs_dbg(FYI, "posix reopen succeeded\n");
+                       oparms.reconnect = true;
                        goto reopen_success;
                }
                /*
index b83c3f5646bde6e21e80806541a00987198a5ce1..562044f700e56bf27997bf7ef7490bddf6bc1c91 100644 (file)
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
 }
 
 int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-                  const unsigned char *path,
-                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+                       unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+                       unsigned int xid)
 {
        int rc;
        int oplock = 0;
        __u16 netfid = 0;
        struct tcon_link *tlink;
-       struct cifs_tcon *pTcon;
+       struct cifs_tcon *ptcon;
        struct cifs_io_parms io_parms;
-       u8 *buf;
-       char *pbuf;
-       unsigned int bytes_read = 0;
        int buf_type = CIFS_NO_BUFFER;
-       unsigned int link_len = 0;
        FILE_ALL_INFO file_info;
 
-       if (!CIFSCouldBeMFSymlink(fattr))
-               /* it's not a symlink */
-               return 0;
-
        tlink = cifs_sb_tlink(cifs_sb);
        if (IS_ERR(tlink))
                return PTR_ERR(tlink);
-       pTcon = tlink_tcon(tlink);
+       ptcon = tlink_tcon(tlink);
 
-       rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+       rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
                         CREATE_NOT_DIR, &netfid, &oplock, &file_info,
                         cifs_sb->local_nls,
                         cifs_sb->mnt_cifs_flags &
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
-       if (rc != 0)
-               goto out;
+       if (rc != 0) {
+               cifs_put_tlink(tlink);
+               return rc;
+       }
 
        if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
-               CIFSSMBClose(xid, pTcon, netfid);
+               CIFSSMBClose(xid, ptcon, netfid);
+               cifs_put_tlink(tlink);
                /* it's not a symlink */
-               goto out;
+               return rc;
        }
 
-       buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-       if (!buf) {
-               rc = -ENOMEM;
-               goto out;
-       }
-       pbuf = buf;
        io_parms.netfid = netfid;
        io_parms.pid = current->tgid;
-       io_parms.tcon = pTcon;
+       io_parms.tcon = ptcon;
        io_parms.offset = 0;
        io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
 
-       rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
-       CIFSSMBClose(xid, pTcon, netfid);
-       if (rc != 0) {
-               kfree(buf);
+       rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+       CIFSSMBClose(xid, ptcon, netfid);
+       cifs_put_tlink(tlink);
+       return rc;
+}
+
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+                  const unsigned char *path,
+                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+{
+       int rc = 0;
+       u8 *buf = NULL;
+       unsigned int link_len = 0;
+       unsigned int bytes_read = 0;
+       struct cifs_tcon *ptcon;
+
+       if (!CIFSCouldBeMFSymlink(fattr))
+               /* it's not a symlink */
+               return 0;
+
+       buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+       if (!buf) {
+               rc = -ENOMEM;
                goto out;
        }
 
+       ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+       if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+               rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+                                                &bytes_read, cifs_sb, xid);
+       else
+               goto out;
+
+       if (rc != 0)
+               goto out;
+
+       if (bytes_read == 0) /* not a symlink */
+               goto out;
+
        rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
-       kfree(buf);
        if (rc == -EINVAL) {
                /* it's not a symlink */
                rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
        fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
        fattr->cf_dtype = DT_LNK;
 out:
-       cifs_put_tlink(tlink);
+       kfree(buf);
        return rc;
 }
 
index ab877846939499c18cb454c83ed26c43dc60040c..69d2c826a23badc552bb686b518beaea297b473c 100644 (file)
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
                        return;
        }
 
+       /*
+        * If we know that the inode will need to be revalidated immediately,
+        * then don't create a new dentry for it. We'll end up doing an on
+        * the wire call either way and this spares us an invalidation.
+        */
+       if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+               return;
+
        dentry = d_alloc(parent, name);
        if (!dentry)
                return;
index 79358e341fd2ea63cff42e6d608c58f65b5f9804..08dd37bb23aac8ea04fe979743f8c963990a80f7 100644 (file)
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
                bytes_ret = 0;
        } else
                bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
-                                           256, nls_cp);
+                                           CIFS_MAX_DOMAINNAME_LEN, nls_cp);
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2;  /* account for null terminator */
 
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
 
        /* copy domain */
        if (ses->domainName != NULL) {
-               strncpy(bcc_ptr, ses->domainName, 256);
-               bcc_ptr += strnlen(ses->domainName, 256);
+               strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+               bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
        } /* else we will send a null domain name
             so the server will default to its own domain */
        *bcc_ptr = 0;
index 6457690731a220b58fa2be3f55bd6bf430f27b8b..60943978aec35bb06360adb7e060802d5775bee1 100644 (file)
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
        .mand_lock = cifs_mand_lock,
        .mand_unlock_range = cifs_unlock_range,
        .push_mand_locks = cifs_push_mandatory_locks,
+       .query_mf_symlink = open_query_close_cifs_symlink,
 };
 
 struct smb_version_values smb1_values = {
index 301b191270b9bcf09edbea5ad47100267f32e859..4f2300d020c7e517a6fa89be5d955fa4b12753da 100644 (file)
@@ -42,6 +42,7 @@
 static int
 smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
 {
+       int rc;
        unsigned int size;
 
        if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
        server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
        if (IS_ERR(server->secmech.hmacsha256)) {
                cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
-               return PTR_ERR(server->secmech.hmacsha256);
+               rc = PTR_ERR(server->secmech.hmacsha256);
+               server->secmech.hmacsha256 = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
                server->secmech.sdeschmacsha256 = NULL;
                crypto_free_shash(server->secmech.hmacsha256);
                server->secmech.hmacsha256 = NULL;
-               return PTR_ERR(server->secmech.cmacaes);
+               rc = PTR_ERR(server->secmech.cmacaes);
+               server->secmech.cmacaes = NULL;
+               return rc;
        }
 
        size = sizeof(struct shash_desc) +
index 4888cb3fdef76037b7af5f9db898bdcb82ce48c2..c7c83ff0f752ce6a86d6e42a420e34ed996ae8ca 100644 (file)
@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
  */
 void debugfs_remove_recursive(struct dentry *dentry)
 {
-       struct dentry *child;
-       struct dentry *parent;
+       struct dentry *child, *next, *parent;
 
        if (IS_ERR_OR_NULL(dentry))
                return;
@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
                return;
 
        parent = dentry;
+ down:
        mutex_lock(&parent->d_inode->i_mutex);
+       list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+               if (!debugfs_positive(child))
+                       continue;
 
-       while (1) {
-               /*
-                * When all dentries under "parent" has been removed,
-                * walk up the tree until we reach our starting point.
-                */
-               if (list_empty(&parent->d_subdirs)) {
-                       mutex_unlock(&parent->d_inode->i_mutex);
-                       if (parent == dentry)
-                               break;
-                       parent = parent->d_parent;
-                       mutex_lock(&parent->d_inode->i_mutex);
-               }
-               child = list_entry(parent->d_subdirs.next, struct dentry,
-                               d_u.d_child);
- next_sibling:
-
-               /*
-                * If "child" isn't empty, walk down the tree and
-                * remove all its descendants first.
-                */
+               /* perhaps simple_empty(child) makes more sense */
                if (!list_empty(&child->d_subdirs)) {
                        mutex_unlock(&parent->d_inode->i_mutex);
                        parent = child;
-                       mutex_lock(&parent->d_inode->i_mutex);
-                       continue;
+                       goto down;
                }
-               __debugfs_remove(child, parent);
-               if (parent->d_subdirs.next == &child->d_u.d_child) {
-                       /*
-                        * Try the next sibling.
-                        */
-                       if (child->d_u.d_child.next != &parent->d_subdirs) {
-                               child = list_entry(child->d_u.d_child.next,
-                                                  struct dentry,
-                                                  d_u.d_child);
-                               goto next_sibling;
-                       }
-
-                       /*
-                        * Avoid infinite loop if we fail to remove
-                        * one dentry.
-                        */
-                       mutex_unlock(&parent->d_inode->i_mutex);
-                       break;
-               }
-               simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+               if (!__debugfs_remove(child, parent))
+                       simple_release_fs(&debugfs_mount, &debugfs_mount_count);
        }
 
-       parent = dentry->d_parent;
+       mutex_unlock(&parent->d_inode->i_mutex);
+       child = parent;
+       parent = parent->d_parent;
        mutex_lock(&parent->d_inode->i_mutex);
-       __debugfs_remove(dentry, parent);
+
+       if (child != dentry) {
+               next = list_entry(child->d_u.d_child.next, struct dentry,
+                                       d_u.d_child);
+               goto up;
+       }
+
+       if (!__debugfs_remove(child, parent))
+               simple_release_fs(&debugfs_mount, &debugfs_mount_count);
        mutex_unlock(&parent->d_inode->i_mutex);
-       simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
 
index 911649a47dd5ae5b000bfabb2b73b0c71a95ec52..812149119fa3c36291508af6fc33a7646c84155f 100644 (file)
@@ -686,7 +686,6 @@ static int device_close(struct inode *inode, struct file *file)
           device_remove_lockspace() */
 
        sigprocmask(SIG_SETMASK, &tmpsig, NULL);
-       recalc_sigpending();
 
        return 0;
 }
index 9c73def87642fc8f36624b7fa525504541a59a04..fd774c7cb4831be8817799ed6cab355a368fa19f 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                return -ENOMEM;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, old_start, old_end);
        if (new_end > old_start) {
                /*
                 * when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                free_pgd_range(&tlb, old_start, old_end, new_end,
                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        }
-       tlb_finish_mmu(&tlb, new_end, old_end);
+       tlb_finish_mmu(&tlb, old_start, old_end);
 
        /*
         * Shrink the vma to just the new range.  Always succeeds.
index b577e45425b0ac921ae1f0904b6ef84b7ace8fb3..0ab26fbf33808c565ae9ef4449d6836e4cb01fae 100644 (file)
@@ -2086,6 +2086,7 @@ extern int  ext4_sync_inode(handle_t *, struct inode *);
 extern void ext4_dirty_inode(struct inode *, int);
 extern int ext4_change_inode_journal_flag(struct inode *, int);
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
 extern int ext4_can_truncate(struct inode *inode);
 extern void ext4_truncate(struct inode *);
 extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
index 72a3600aedbdffe48b5f2756bb0ad863f120a7b8..17ac112ab1012bd88ff32d145f27a29f025efdde 100644 (file)
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
        set_buffer_prio(bh);
        if (ext4_handle_valid(handle)) {
                err = jbd2_journal_dirty_metadata(handle, bh);
-               if (err) {
-                       /* Errors can only happen if there is a bug */
-                       handle->h_err = err;
-                       __ext4_journal_stop(where, line, handle);
+               /* Errors can only happen if there is a bug */
+               if (WARN_ON_ONCE(err)) {
+                       ext4_journal_abort_handle(where, line, __func__, bh,
+                                                 handle, err);
                }
        } else {
                if (inode)
index a61873808f7659aafefa103f96f1f51f564ece04..72ba4705d4fa40e12edd1097f0815381688df7e6 100644 (file)
@@ -4412,7 +4412,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
 retry:
        err = ext4_es_remove_extent(inode, last_block,
                                    EXT_MAX_BLOCKS - last_block);
-       if (err == ENOMEM) {
+       if (err == -ENOMEM) {
                cond_resched();
                congestion_wait(BLK_RW_ASYNC, HZ/50);
                goto retry;
index 6f4cc567c382b7289e2b35a7038756a581b4cd95..319c9d26279a94fa7039dc2f8730747fe2404697 100644 (file)
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
 {
        struct super_block *sb = inode->i_sb;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct ext4_inode_info *ei = EXT4_I(inode);
        struct vfsmount *mnt = filp->f_path.mnt;
        struct path path;
        char buf[64], *cp;
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
         * Set up the jbd2_inode if we are opening the inode for
         * writing and the journal is present
         */
-       if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
-               struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
-               spin_lock(&inode->i_lock);
-               if (!ei->jinode) {
-                       if (!jinode) {
-                               spin_unlock(&inode->i_lock);
-                               return -ENOMEM;
-                       }
-                       ei->jinode = jinode;
-                       jbd2_journal_init_jbd_inode(ei->jinode, inode);
-                       jinode = NULL;
-               }
-               spin_unlock(&inode->i_lock);
-               if (unlikely(jinode != NULL))
-                       jbd2_free_inode(jinode);
+       if (filp->f_mode & FMODE_WRITE) {
+               int ret = ext4_inode_attach_jinode(inode);
+               if (ret < 0)
+                       return ret;
        }
        return dquot_file_open(inode, filp);
 }
index f03598c6ffd3a48282193a13ff30ab6e412e471b..8bf5999875ee454c67899dad4c49ed2ec9445ffc 100644 (file)
@@ -734,11 +734,8 @@ repeat_in_this_group:
                ino = ext4_find_next_zero_bit((unsigned long *)
                                              inode_bitmap_bh->b_data,
                                              EXT4_INODES_PER_GROUP(sb), ino);
-               if (ino >= EXT4_INODES_PER_GROUP(sb)) {
-                       if (++group == ngroups)
-                               group = 0;
-                       continue;
-               }
+               if (ino >= EXT4_INODES_PER_GROUP(sb))
+                       goto next_group;
                if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
                        ext4_error(sb, "reserved inode found cleared - "
                                   "inode=%lu", ino + 1);
@@ -769,6 +766,9 @@ repeat_in_this_group:
                        goto got; /* we grabbed the inode! */
                if (ino < EXT4_INODES_PER_GROUP(sb))
                        goto repeat_in_this_group;
+next_group:
+               if (++group == ngroups)
+                       group = 0;
        }
        err = -ENOSPC;
        goto out;
index ba33c67d6e48a8b035cc9113ce05b916c535350c..c2ca04e67a4fce6a40316550215e2b2fbe107d01 100644 (file)
@@ -555,14 +555,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                int ret;
                unsigned long long status;
 
-#ifdef ES_AGGRESSIVE_TEST
-               if (retval != map->m_len) {
-                       printk("ES len assertion failed for inode: %lu "
-                              "retval %d != map->m_len %d "
-                              "in %s (lookup)\n", inode->i_ino, retval,
-                              map->m_len, __func__);
+               if (unlikely(retval != map->m_len)) {
+                       ext4_warning(inode->i_sb,
+                                    "ES len assertion failed for inode "
+                                    "%lu: retval %d != map->m_len %d",
+                                    inode->i_ino, retval, map->m_len);
+                       WARN_ON(1);
                }
-#endif
 
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -656,14 +655,13 @@ found:
                int ret;
                unsigned long long status;
 
-#ifdef ES_AGGRESSIVE_TEST
-               if (retval != map->m_len) {
-                       printk("ES len assertion failed for inode: %lu "
-                              "retval %d != map->m_len %d "
-                              "in %s (allocation)\n", inode->i_ino, retval,
-                              map->m_len, __func__);
+               if (unlikely(retval != map->m_len)) {
+                       ext4_warning(inode->i_sb,
+                                    "ES len assertion failed for inode "
+                                    "%lu: retval %d != map->m_len %d",
+                                    inode->i_ino, retval, map->m_len);
+                       WARN_ON(1);
                }
-#endif
 
                /*
                 * If the extent has been zeroed out, we don't need to update
@@ -1637,14 +1635,13 @@ add_delayed:
                int ret;
                unsigned long long status;
 
-#ifdef ES_AGGRESSIVE_TEST
-               if (retval != map->m_len) {
-                       printk("ES len assertion failed for inode: %lu "
-                              "retval %d != map->m_len %d "
-                              "in %s (lookup)\n", inode->i_ino, retval,
-                              map->m_len, __func__);
+               if (unlikely(retval != map->m_len)) {
+                       ext4_warning(inode->i_sb,
+                                    "ES len assertion failed for inode "
+                                    "%lu: retval %d != map->m_len %d",
+                                    inode->i_ino, retval, map->m_len);
+                       WARN_ON(1);
                }
-#endif
 
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -3536,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                   offset;
        }
 
+       if (offset & (sb->s_blocksize - 1) ||
+           (offset + length) & (sb->s_blocksize - 1)) {
+               /*
+                * Attach jinode to inode for jbd2 if we do any zeroing of
+                * partial block
+                */
+               ret = ext4_inode_attach_jinode(inode);
+               if (ret < 0)
+                       goto out_mutex;
+
+       }
+
        first_block_offset = round_up(offset, sb->s_blocksize);
        last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
 
@@ -3604,6 +3613,31 @@ out_mutex:
        return ret;
 }
 
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct jbd2_inode *jinode;
+
+       if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+               return 0;
+
+       jinode = jbd2_alloc_inode(GFP_KERNEL);
+       spin_lock(&inode->i_lock);
+       if (!ei->jinode) {
+               if (!jinode) {
+                       spin_unlock(&inode->i_lock);
+                       return -ENOMEM;
+               }
+               ei->jinode = jinode;
+               jbd2_journal_init_jbd_inode(ei->jinode, inode);
+               jinode = NULL;
+       }
+       spin_unlock(&inode->i_lock);
+       if (unlikely(jinode != NULL))
+               jbd2_free_inode(jinode);
+       return 0;
+}
+
 /*
  * ext4_truncate()
  *
@@ -3664,6 +3698,12 @@ void ext4_truncate(struct inode *inode)
                        return;
        }
 
+       /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+       if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+               if (ext4_inode_attach_jinode(inode) < 0)
+                       return;
+       }
+
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                credits = ext4_writepage_trans_blocks(inode);
        else
index 9491ac0590f746b9abe56e484e7646022a0bd1c3..c0427e2f66481a13f3018b4630901688fdd6646e 100644 (file)
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
        memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
        memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
        memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
-       memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
-       memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
+       ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
+       ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
+       ext4_es_lru_del(inode1);
+       ext4_es_lru_del(inode2);
 
        isize = i_size_read(inode1);
        i_size_write(inode1, i_size_read(inode2));
index bca26f34edf4ca55dadc3ac9c5447d916dcbe4fa..b59373b625e9bcacaf3a63e409b21b1ef6212dfa 100644 (file)
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
        {Opt_delalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
-        MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+        MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_SET},
        {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                }
                if (test_opt(sb, DIOREAD_NOLOCK)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
-                                "both data=journal and delalloc");
+                                "both data=journal and dioread_nolock");
                        goto failed_mount;
                }
                if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
+       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+               if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                                "both data=journal and delalloc");
+                       err = -EINVAL;
+                       goto restore_opts;
+               }
+               if (test_opt(sb, DIOREAD_NOLOCK)) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                                "both data=journal and dioread_nolock");
+                       err = -EINVAL;
+                       goto restore_opts;
+               }
+       }
+
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
 
@@ -5481,6 +5496,7 @@ static void __exit ext4_exit_fs(void)
        kset_unregister(ext4_kset);
        ext4_exit_system_zone();
        ext4_exit_pageio();
+       ext4_exit_es();
 }
 
 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
index 6599222536eb20e9deb22884bdc305efcf2cf1ae..65343c3741ff86876418deb9d18e3f72f5872898 100644 (file)
@@ -730,14 +730,14 @@ static int __init fcntl_init(void)
         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
         * is defined as O_NONBLOCK on some platforms and not on others.
         */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               __FMODE_EXEC    | O_PATH
+               __FMODE_EXEC    | O_PATH        | __O_TMPFILE
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
index 9435384562a271cacd5219651269b36d5d2923c4..544a809819c3ee5c16ddaa42a8ce0ce26d2194f4 100644 (file)
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
 
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
                                          WQ_HIGHPRI | WQ_FREEZABLE, 0);
-       if (IS_ERR(glock_workqueue))
-               return PTR_ERR(glock_workqueue);
+       if (!glock_workqueue)
+               return -ENOMEM;
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
                                                WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
-       if (IS_ERR(gfs2_delete_workqueue)) {
+       if (!gfs2_delete_workqueue) {
                destroy_workqueue(glock_workqueue);
-               return PTR_ERR(gfs2_delete_workqueue);
+               return -ENOMEM;
        }
 
        register_shrinker(&glock_shrinker);
index 5f2e5224c51c9ae79e34a1eb0f405a7b304cd0be..e2e0a90396e7823da9aa47c44a727d5e75751cc6 100644 (file)
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  * None of the buffers should be dirty, locked, or pinned.
  */
 
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+                            unsigned int nr_revokes)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 
        gfs2_log_lock(sdp);
        spin_lock(&sdp->sd_ail_lock);
-       list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+       list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+               if (nr_revokes == 0)
+                       break;
                bh = bd->bd_bh;
                if (bh->b_state & b_state) {
                        if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
                        gfs2_ail_error(gl, bh);
                }
                gfs2_trans_add_revoke(sdp, bd);
+               nr_revokes--;
        }
        GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
        spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
        WARN_ON_ONCE(current->journal_info);
        current->journal_info = &tr;
 
-       __gfs2_ail_flush(gl, 0);
+       __gfs2_ail_flush(gl, 0, tr.tr_revokes);
 
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        unsigned int revokes = atomic_read(&gl->gl_ail_count);
+       unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
        int ret;
 
        if (!revokes)
                return;
 
-       ret = gfs2_trans_begin(sdp, 0, revokes);
+       while (revokes > max_revokes)
+               max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+       ret = gfs2_trans_begin(sdp, 0, max_revokes);
        if (ret)
                return;
-       __gfs2_ail_flush(gl, fsync);
+       __gfs2_ail_flush(gl, fsync, max_revokes);
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
 }
index bbb2715171cd0c983770cf86b4b57ed69e04c98d..64915eeae5a7112f59256185a00a1cc9f3b2a193 100644 (file)
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                }
                gfs2_glock_dq_uninit(ghs);
                if (IS_ERR(d))
-                       return PTR_RET(d);
+                       return PTR_ERR(d);
                return error;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
        struct gfs2_holder gh;
        int ret;
 
+       /* For selinux during lookup */
+       if (gfs2_glock_is_locked_by_me(ip->i_gl))
+               return generic_getxattr(dentry, name, data, size);
+
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret == 0) {
index e04d0e09ee7b59d5959d69e5df872a5871e9ae64..7b0f5043cf24c253612451787588d638da5483ce 100644 (file)
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
                goto fail_wq;
 
        gfs2_control_wq = alloc_workqueue("gfs2_control",
-                              WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+                                         WQ_UNBOUND | WQ_FREEZABLE, 0);
        if (!gfs2_control_wq)
                goto fail_recovery;
 
index a3f868ae3fd48f043f91a059ede274b20b431c3b..34423978b17083d73b038d360d72b47dc08c6693 100644 (file)
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
        return inode;
 }
 
+/*
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * be taken from reclaim -- unlike regular filesystems. This needs an
+ * annotation because huge_pmd_share() does an allocation under
+ * i_mmap_mutex.
+ */
+struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+
 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                                        struct inode *dir,
                                        umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                struct hugetlbfs_inode_info *info;
                inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
+               lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
+                               &hugetlbfs_i_mmap_mutex_key);
                inode->i_mapping->a_ops = &hugetlbfs_aops;
                inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
index 01bfe76627516d836241b249bdeac7b0a02b0f97..41e491b8e5d7e40164e0e51d90534c9d8660a94e 100644 (file)
@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
                                   nlm_init->protocol, nlm_version,
                                   nlm_init->hostname, nlm_init->noresvport,
                                   nlm_init->net);
-       if (host == NULL) {
-               lockd_down(nlm_init->net);
-               return ERR_PTR(-ENOLCK);
-       }
+       if (host == NULL)
+               goto out_nohost;
+       if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
+               goto out_nobind;
 
        return host;
+out_nobind:
+       nlmclnt_release_host(host);
+out_nohost:
+       lockd_down(nlm_init->net);
+       return ERR_PTR(-ENOLCK);
 }
 EXPORT_SYMBOL_GPL(nlmclnt_init);
 
index 9760ecb9b60f6ae39471ea09fd8f286ca0285da7..acd3947163497b802544a632aa02512c37b88d9d 100644 (file)
@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
 {
        struct nlm_args *argp = &req->a_args;
        struct nlm_lock *lock = &argp->lock;
+       char *nodename = req->a_host->h_rpcclnt->cl_nodename;
 
        nlmclnt_next_cookie(&argp->cookie);
        memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
-       lock->caller  = utsname()->nodename;
+       lock->caller  = nodename;
        lock->oh.data = req->a_owner;
        lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
                                (unsigned int)fl->fl_u.nfs_fl.owner->pid,
-                               utsname()->nodename);
+                               nodename);
        lock->svid = fl->fl_u.nfs_fl.owner->pid;
        lock->fl.fl_start = fl->fl_start;
        lock->fl.fl_end = fl->fl_end;
index 8b61d103a8a7a15b0c5e8eab070e92f940a64b88..89a612e392ebb28e9a00bfa1c2c8562d8a3fb7c6 100644 (file)
@@ -3671,15 +3671,11 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
        if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
                return -EINVAL;
        /*
-        * To use null names we require CAP_DAC_READ_SEARCH
-        * This ensures that not everyone will be able to create
-        * handlink using the passed filedescriptor.
+        * Using empty names is equivalent to using AT_SYMLINK_FOLLOW
+        * on /proc/self/fd/<fd>.
         */
-       if (flags & AT_EMPTY_PATH) {
-               if (!capable(CAP_DAC_READ_SEARCH))
-                       return -ENOENT;
+       if (flags & AT_EMPTY_PATH)
                how = LOOKUP_EMPTY;
-       }
 
        if (flags & AT_SYMLINK_FOLLOW)
                how |= LOOKUP_FOLLOW;
index af6e806044d7cee5d8047a56de6693388fc8d34f..941246f2b43d266827666dee7fec5051c2289586 100644 (file)
@@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                unlock_new_inode(inode);
        } else
                nfs_refresh_inode(inode, fattr);
-               nfs_setsecurity(inode, fattr, label);
        dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
                inode->i_sb->s_id,
                (long long)NFS_FILEID(inode),
@@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
 static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
-       
+       int ret;
+
        if (mapping->nrpages != 0) {
-               int ret = invalidate_inode_pages2(mapping);
+               if (S_ISREG(inode->i_mode)) {
+                       ret = nfs_sync_mapping(mapping);
+                       if (ret < 0)
+                               return ret;
+               }
+               ret = invalidate_inode_pages2(mapping);
                if (ret < 0)
                        return ret;
        }
index cf11799297c42d50dc33aca126e6da11f033ce4b..108a774095f7ef6a53fc04366aec3caa3bfea717 100644 (file)
@@ -3071,15 +3071,13 @@ struct rpc_clnt *
 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
                            struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
+       struct rpc_clnt *client = NFS_CLIENT(dir);
        int status;
-       struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
 
        status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
-       if (status < 0) {
-               rpc_shutdown_client(client);
+       if (status < 0)
                return ERR_PTR(status);
-       }
-       return client;
+       return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
 }
 
 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
index 71fdc0dfa0d28d9e293cbb58729d63395af7cc99..f6db66d8f647069a4cffde4e609bc7f23e3a2da0 100644 (file)
@@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
        if (server->flags & NFS_MOUNT_NOAC)
                sb_mntdata.mntflags |= MS_SYNCHRONOUS;
 
+       if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
+               if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
+                       sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
        if (IS_ERR(s)) {
index 0d4c410e45893c2ae00064c5a8ae3384e00848b4..419572f33b72dafaf04a770fb53e29e250e5f42b 100644 (file)
@@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
        return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
-               1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
+               1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
                2 + /*eir_server_owner.so_minor_id */\
                /* eir_server_owner.so_major_id<> */\
                XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
index 280acef6f0dc45e29365b197a8242f7d26bb22f7..43f42290e5df096fe7eced0266759b45264a61a1 100644 (file)
@@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
        struct svc_cred *cr = &rqstp->rq_cred;
        u32 service;
 
+       if (!cr->cr_gss_mech)
+               return false;
        service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
        return service == RPC_GSS_SVC_INTEGRITY ||
               service == RPC_GSS_SVC_PRIVACY;
index 0c0f3ea90de59f9c9e0c21aebd46e8b07b2f45f9..c2a4701d7286478cca66cda986565a7b3105504c 100644 (file)
@@ -3360,7 +3360,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
                8 /* eir_clientid */ +
                4 /* eir_sequenceid */ +
                4 /* eir_flags */ +
-               4 /* spr_how (SP4_NONE) */ +
+               4 /* spr_how */ +
+               8 /* spo_must_enforce, spo_must_allow */ +
                8 /* so_minor_id */ +
                4 /* so_major_id.len */ +
                (XDR_QUADLEN(major_id_sz) * 4) +
@@ -3372,8 +3373,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
        WRITE32(exid->seqid);
        WRITE32(exid->flags);
 
-       /* state_protect4_r. Currently only support SP4_NONE */
-       BUG_ON(exid->spa_how != SP4_NONE);
        WRITE32(exid->spa_how);
        switch (exid->spa_how) {
        case SP4_NONE:
index 79736a28d84f911dd66acca86c844912284ad9fa..2abf97b2a592b7dafaad4e2cfc0f9a6f7442f475 100644 (file)
@@ -1757,7 +1757,7 @@ try_again:
                goto out;
        } else if (ret == 1) {
                clusters_need = wc->w_clen;
-               ret = ocfs2_refcount_cow(inode, filp, di_bh,
+               ret = ocfs2_refcount_cow(inode, di_bh,
                                         wc->w_cpos, wc->w_clen, UINT_MAX);
                if (ret) {
                        mlog_errno(ret);
index eb760d8acd500c96bd45195ce2a2c79fb0942ed5..30544ce8e9f78cc66cc0eb61387f84b21cef4309 100644 (file)
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
 {
        int ret;
        struct ocfs2_empty_dir_priv priv = {
-               .ctx.actor = ocfs2_empty_dir_filldir
+               .ctx.actor = ocfs2_empty_dir_filldir,
        };
 
-       memset(&priv, 0, sizeof(priv));
-
        if (ocfs2_dir_indexed(inode)) {
                ret = ocfs2_empty_dir_dx(inode, &priv);
                if (ret)
index 41000f223ca42bb855a57f1cc27f08a28ecccba9..3261d71319eeb27d3a569aeccf4f3f6e27a95496 100644 (file)
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
        if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
                goto out;
 
-       return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
+       return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 
 out:
        return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
                zero_clusters = last_cpos - zero_cpos;
 
        if (needs_cow) {
-               rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
+               rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
                                        zero_clusters, UINT_MAX);
                if (rc) {
                        mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
 
        *meta_level = 1;
 
-       ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
+       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
        if (ret)
                mlog_errno(ret);
 out:
index 96f9ac237e86dbc301339c07b882bbff508f9143..0a992737dcaf8e76d0b57b18dcb21b57d5ed6479 100644 (file)
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
        extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
 
        return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
-              ocfs2_quota_trans_credits(sb) + bits_wanted;
+              ocfs2_quota_trans_credits(sb);
 }
 
 static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
index f1fc172175b6c47454cfbb4f10bfd239102e92a9..452068b45749a7545002543f0b0b450b52612dec 100644 (file)
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
        u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
        u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
 
-       ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+       ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
                                               p_cpos, new_p_cpos, len);
        if (ret) {
                mlog_errno(ret);
index 9f6b96a09615bcc243b81ebc5cf3c9139c2deec3..a70d604593b61c0ffbce761a92b1207148f3b346 100644 (file)
@@ -49,7 +49,6 @@
 
 struct ocfs2_cow_context {
        struct inode *inode;
-       struct file *file;
        u32 cow_start;
        u32 cow_len;
        struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
                            u32 *num_clusters,
                            unsigned int *extent_flags);
        int (*cow_duplicate_clusters)(handle_t *handle,
-                                     struct file *file,
+                                     struct inode *inode,
                                      u32 cpos, u32 old_cluster,
                                      u32 new_cluster, u32 new_len);
 };
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
 }
 
 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
-                                    struct file *file,
+                                    struct inode *inode,
                                     u32 cpos, u32 old_cluster,
                                     u32 new_cluster, u32 new_len)
 {
        int ret = 0, partial;
-       struct inode *inode = file_inode(file);
-       struct ocfs2_caching_info *ci = INODE_CACHE(inode);
-       struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+       struct super_block *sb = inode->i_sb;
        u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
        struct page *page;
        pgoff_t page_index;
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
                        BUG_ON(PageDirty(page));
 
-               if (PageReadahead(page)) {
-                       page_cache_async_readahead(mapping,
-                                                  &file->f_ra, file,
-                                                  page, page_index,
-                                                  readahead_pages);
-               }
-
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
                        if (ret) {
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                        }
                }
 
-               ocfs2_map_and_dirty_page(inode, handle, from, to,
+               ocfs2_map_and_dirty_page(inode,
+                                        handle, from, to,
                                         page, 0, &new_block);
                mark_page_accessed(page);
 unlock:
@@ -3020,12 +3011,11 @@ unlock:
 }
 
 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
-                                   struct file *file,
+                                   struct inode *inode,
                                    u32 cpos, u32 old_cluster,
                                    u32 new_cluster, u32 new_len)
 {
        int ret = 0;
-       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
 
        /*If the old clusters is unwritten, no need to duplicate. */
        if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
-               ret = context->cow_duplicate_clusters(handle, context->file,
+               ret = context->cow_duplicate_clusters(handle, context->inode,
                                                      cpos, old, new, len);
                if (ret) {
                        mlog_errno(ret);
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
        return ret;
 }
 
-static void ocfs2_readahead_for_cow(struct inode *inode,
-                                   struct file *file,
-                                   u32 start, u32 len)
-{
-       struct address_space *mapping;
-       pgoff_t index;
-       unsigned long num_pages;
-       int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-
-       if (!file)
-               return;
-
-       mapping = file->f_mapping;
-       num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
-       if (!num_pages)
-               num_pages = 1;
-
-       index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
-       page_cache_sync_readahead(mapping, &file->f_ra, file,
-                                 index, num_pages);
-}
-
 /*
  * Starting at cpos, try to CoW write_len clusters.  Don't CoW
  * past max_cpos.  This will stop when it runs into a hole or an
  * unrefcounted extent.
  */
 static int ocfs2_refcount_cow_hunk(struct inode *inode,
-                                  struct file *file,
                                   struct buffer_head *di_bh,
                                   u32 cpos, u32 write_len, u32 max_cpos)
 {
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
 
        BUG_ON(cow_len == 0);
 
-       ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
-
        context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
        if (!context) {
                ret = -ENOMEM;
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
        context->ref_root_bh = ref_root_bh;
        context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
        context->get_clusters = ocfs2_di_get_clusters;
-       context->file = file;
 
        ocfs2_init_dinode_extent_tree(&context->data_et,
                                      INODE_CACHE(inode), di_bh);
@@ -3537,7 +3501,6 @@ out:
  * clusters between cpos and cpos+write_len are safe to modify.
  */
 int ocfs2_refcount_cow(struct inode *inode,
-                      struct file *file,
                       struct buffer_head *di_bh,
                       u32 cpos, u32 write_len, u32 max_cpos)
 {
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
                        num_clusters = write_len;
 
                if (ext_flags & OCFS2_EXT_REFCOUNTED) {
-                       ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
+                       ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
                                                      num_clusters, max_cpos);
                        if (ret) {
                                mlog_errno(ret);
index 7754608c83a47b1b44425c8f9c5e13a2adc65675..6422bbcdb52506b1631ce495ddf4524a4ae71c92 100644 (file)
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
                                          int *credits,
                                          int *ref_blocks);
 int ocfs2_refcount_cow(struct inode *inode,
-                      struct file *filep, struct buffer_head *di_bh,
+                      struct buffer_head *di_bh,
                       u32 cpos, u32 write_len, u32 max_cpos);
 
 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
                             u32 cpos, u32 write_len,
                             struct ocfs2_post_refcount *post);
 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
-                                    struct file *file,
+                                    struct inode *inode,
                                     u32 cpos, u32 old_cluster,
                                     u32 new_cluster, u32 new_len);
 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
-                                   struct file *file,
+                                   struct inode *inode,
                                    u32 cpos, u32 old_cluster,
                                    u32 new_cluster, u32 new_len);
 int ocfs2_cow_sync_writeback(struct super_block *sb,
index d53e29895082306747de0998c730159b73d08528..7931f76acc2beede0298747df1d505b30cd225b4 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -823,7 +823,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
        int lookup_flags = 0;
        int acc_mode;
 
-       if (flags & O_CREAT)
+       if (flags & (O_CREAT | __O_TMPFILE))
                op->mode = (mode & S_IALLUGO) | S_IFREG;
        else
                op->mode = 0;
index 94441a407337bb02fb77e89fe93dfbbed169f827..737e15615b0490c40d002a315217033f5463d5b3 100644 (file)
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
                de = next;
        } while (de);
        spin_unlock(&proc_subdir_lock);
-       return 0;
+       return 1;
 }
 
 int proc_readdir(struct file *file, struct dir_context *ctx)
index 229e366598daecd4e905e8f51f13efaf0a44e773..e0a790da726d0f710a7585b0a8b6663e9902a0ac 100644 (file)
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
 static int proc_root_readdir(struct file *file, struct dir_context *ctx)
 {
        if (ctx->pos < FIRST_PROCESS_ENTRY) {
-               proc_readdir(file, ctx);
+               int error = proc_readdir(file, ctx);
+               if (unlikely(error <= 0))
+                       return error;
                ctx->pos = FIRST_PROCESS_ENTRY;
        }
 
index dbf61f6174f0f473d3d9a19311a09170f8dab9f3..107d026f5d6e0cbebc068fce65f201f167c425c5 100644 (file)
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
         * of how soft-dirty works.
         */
        pte_t ptent = *pte;
-       ptent = pte_wrprotect(ptent);
-       ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+
+       if (pte_present(ptent)) {
+               ptent = pte_wrprotect(ptent);
+               ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+       } else if (is_swap_pte(ptent)) {
+               ptent = pte_swp_clear_soft_dirty(ptent);
+       } else if (pte_file(ptent)) {
+               ptent = pte_file_clear_soft_dirty(ptent);
+       }
+
        set_pte_at(vma->vm_mm, addr, pte, ptent);
 #endif
 }
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
-               if (!pte_present(ptent))
-                       continue;
 
                if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
                        clear_soft_dirty(vma, addr, pte);
                        continue;
                }
 
+               if (!pte_present(ptent))
+                       continue;
+
                page = vm_normal_page(vma, addr, ptent);
                if (!page)
                        continue;
@@ -859,7 +868,7 @@ typedef struct {
 } pagemap_entry_t;
 
 struct pagemapread {
-       int pos, len;
+       int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
        pagemap_entry_t *buffer;
        bool v2;
 };
@@ -867,7 +876,7 @@ struct pagemapread {
 #define PAGEMAP_WALK_SIZE      (PMD_SIZE)
 #define PAGEMAP_WALK_MASK      (PMD_MASK)
 
-#define PM_ENTRY_BYTES      sizeof(u64)
+#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
 #define PM_STATUS_BITS      3
 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
                flags = PM_PRESENT;
                page = vm_normal_page(vma, addr, pte);
        } else if (is_swap_pte(pte)) {
-               swp_entry_t entry = pte_to_swp_entry(pte);
-
+               swp_entry_t entry;
+               if (pte_swp_soft_dirty(pte))
+                       flags2 |= __PM_SOFT_DIRTY;
+               entry = pte_to_swp_entry(pte);
                frame = swp_type(entry) |
                        (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
                flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
                goto out_task;
 
        pm.v2 = soft_dirty_cleared;
-       pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-       pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+       pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+       pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
        ret = -ENOMEM;
        if (!pm.buffer)
                goto out_task;
index 33532f79b4f714923ec951fa5fb75d1758fbfda3..a958444a75fc310627d56de6a37e28a15a99273a 100644 (file)
 /*
  * LOCKING:
  *
- * We rely on new Alexander Viro's super-block locking.
+ * These guys are evicted from procfs as the very first step in ->kill_sb().
  *
  */
 
-static int show_version(struct seq_file *m, struct super_block *sb)
+static int show_version(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        char *format;
 
        if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb)
 #define DJP( x ) le32_to_cpu( jp -> x )
 #define JF( x ) ( r -> s_journal -> x )
 
-static int show_super(struct seq_file *m, struct super_block *sb)
+static int show_super(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *r = REISERFS_SB(sb);
 
        seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-static int show_per_level(struct seq_file *m, struct super_block *sb)
+static int show_per_level(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *r = REISERFS_SB(sb);
        int level;
 
@@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-static int show_bitmap(struct seq_file *m, struct super_block *sb)
+static int show_bitmap(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *r = REISERFS_SB(sb);
 
        seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+static int show_on_disk_super(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
        struct reiserfs_super_block *rs = sb_info->s_rs;
        int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-static int show_oidmap(struct seq_file *m, struct super_block *sb)
+static int show_oidmap(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
        struct reiserfs_super_block *rs = sb_info->s_rs;
        unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-static int show_journal(struct seq_file *m, struct super_block *sb)
+static int show_journal(struct seq_file *m, void *unused)
 {
+       struct super_block *sb = m->private;
        struct reiserfs_sb_info *r = REISERFS_SB(sb);
        struct reiserfs_super_block *rs = r->s_rs;
        struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
        return 0;
 }
 
-/* iterator */
-static int test_sb(struct super_block *sb, void *data)
-{
-       return data == sb;
-}
-
-static int set_sb(struct super_block *sb, void *data)
-{
-       return -ENOENT;
-}
-
-struct reiserfs_seq_private {
-       struct super_block *sb;
-       int (*show) (struct seq_file *, struct super_block *);
-};
-
-static void *r_start(struct seq_file *m, loff_t * pos)
-{
-       struct reiserfs_seq_private *priv = m->private;
-       loff_t l = *pos;
-
-       if (l)
-               return NULL;
-
-       if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
-               return NULL;
-
-       up_write(&priv->sb->s_umount);
-       return priv->sb;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t * pos)
-{
-       ++*pos;
-       if (v)
-               deactivate_super(v);
-       return NULL;
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
-       if (v)
-               deactivate_super(v);
-}
-
-static int r_show(struct seq_file *m, void *v)
-{
-       struct reiserfs_seq_private *priv = m->private;
-       return priv->show(m, v);
-}
-
-static const struct seq_operations r_ops = {
-       .start = r_start,
-       .next = r_next,
-       .stop = r_stop,
-       .show = r_show,
-};
-
 static int r_open(struct inode *inode, struct file *file)
 {
-       struct reiserfs_seq_private *priv;
-       int ret = seq_open_private(file, &r_ops,
-                                  sizeof(struct reiserfs_seq_private));
-
-       if (!ret) {
-               struct seq_file *m = file->private_data;
-               priv = m->private;
-               priv->sb = proc_get_parent_data(inode);
-               priv->show = PDE_DATA(inode);
-       }
-       return ret;
+       return single_open(file, PDE_DATA(inode), 
+                               proc_get_parent_data(inode));
 }
 
 static const struct file_operations r_file_operations = {
        .open = r_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = seq_release_private,
-       .owner = THIS_MODULE,
+       .release = single_release,
 };
 
 static struct proc_dir_entry *proc_info_root = NULL;
 static const char proc_info_root_name[] = "fs/reiserfs";
 
 static void add_file(struct super_block *sb, char *name,
-                    int (*func) (struct seq_file *, struct super_block *))
+                    int (*func) (struct seq_file *, void *))
 {
        proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
                         &r_file_operations, func);
index f8a23c3078f87d5eda59c30d55cea7ecc94b9273..e2e202a07b317cff5c84d8b51e3b66efafd689ea 100644 (file)
@@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate)
 static void reiserfs_kill_sb(struct super_block *s)
 {
        if (REISERFS_SB(s)) {
+               reiserfs_proc_info_done(s);
                /*
                 * Force any pending inode evictions to occur now. Any
                 * inodes to be removed that have extended attributes
@@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s)
                                 REISERFS_SB(s)->reserved_blocks);
        }
 
-       reiserfs_proc_info_done(s);
-
        reiserfs_write_unlock(s);
        mutex_destroy(&REISERFS_SB(s)->lock);
        kfree(s->s_fs_info);
index 56e6b68c8d2fcb36fae154ebe6bb687f9e74760d..94383a70c1a3d738413e06389cc6937909554823 100644 (file)
@@ -274,15 +274,12 @@ struct acpi_device_wakeup {
 };
 
 struct acpi_device_physical_node {
-       u8 node_id;
+       unsigned int node_id;
        struct list_head node;
        struct device *dev;
        bool put_online:1;
 };
 
-/* set maximum of physical nodes to 32 for expansibility */
-#define ACPI_MAX_PHYSICAL_NODE 32
-
 /* Device */
 struct acpi_device {
        int device_type;
@@ -302,10 +299,9 @@ struct acpi_device {
        struct acpi_driver *driver;
        void *driver_data;
        struct device dev;
-       u8 physical_node_count;
+       unsigned int physical_node_count;
        struct list_head physical_node_list;
        struct mutex physical_node_lock;
-       DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
        struct list_head power_dependent;
        void (*remove)(struct acpi_device *);
 };
@@ -445,7 +441,11 @@ struct acpi_pci_root {
 };
 
 /* helper */
-acpi_handle acpi_get_child(acpi_handle, u64);
+acpi_handle acpi_find_child(acpi_handle, u64, bool);
+static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+       return acpi_find_child(handle, addr, false);
+}
 int acpi_is_root_bridge(acpi_handle);
 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
 #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
index 2f47ade1b5678f325faea455656a9578839b8638..0807ddf97b058fb04b1eedfd82c34b76e4253335 100644 (file)
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 {
        return pmd;
 }
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+       return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+       return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+       return 0;
+}
 #endif
 
 #ifndef __HAVE_PFNMAP_TRACKING
index 13821c339a4151912b88f2e4de04cbf99653a612..5672d7ea1fa066175b33c8f4c19457fcdc0614ed 100644 (file)
@@ -112,7 +112,7 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
                                                        unsigned long end);
index 4372658c73ae5eddd3dd6570d4dfac8c5d8825c8..120d57a1c3a5dcb452a75d4fd21a6d338b128a0d 100644 (file)
@@ -78,6 +78,11 @@ struct trace_iterator {
        /* trace_seq for __print_flags() and __print_symbolic() etc. */
        struct trace_seq        tmp_seq;
 
+       cpumask_var_t           started;
+
+       /* it's true when current open file is snapshot */
+       bool                    snapshot;
+
        /* The below is zeroed out in pipe_read */
        struct trace_seq        seq;
        struct trace_entry      *ent;
@@ -90,10 +95,7 @@ struct trace_iterator {
        loff_t                  pos;
        long                    idx;
 
-       cpumask_var_t           started;
-
-       /* it's true when current open file is snapshot */
-       bool                    snapshot;
+       /* All new field here will be zeroed out in pipe_read */
 };
 
 enum trace_iter_flags {
@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
                              const char *name, int offset, int size,
                              int is_signed, int filter_type);
 extern int trace_add_event_call(struct ftrace_event_call *call);
-extern void trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
 
 #define is_signed_type(type)   (((type)(-1)) < (type)1)
 
index 3869c525b052171fff8bbe3000d61d5999bae7ee..369cf2cd51448b690cb812e6310cbd450ab83386 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/atomic.h>
 
 #ifndef _IIO_TRIGGER_H_
 #define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@ struct iio_trigger {
 
        struct list_head                list;
        struct list_head                alloc_list;
-       int use_count;
+       atomic_t                        use_count;
 
        struct irq_chip                 subirq_chip;
        int                             subirq_base;
index 3bef14c6586bc9d7d875e6579352a62e53e13c6a..482ad2d84a32dae333c74cccb2d1394b247d5ae8 100644 (file)
@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
 static inline void tracing_start(void) { }
 static inline void tracing_stop(void) { }
 static inline void ftrace_off_permanent(void) { }
-static inline void trace_dump_stack(void) { }
+static inline void trace_dump_stack(int skip) { }
 
 static inline void tracing_on(void) { }
 static inline void tracing_off(void) { }
index 8d73fe29796a9bc1ce8687c9e18e6963cc278c62..db1791bb997ae495d65637e506b6f13ab4945b6d 100644 (file)
 #define CNTRLREG_8WIRE         CNTRLREG_AFE_CTRL(3)
 #define CNTRLREG_TSCENB                BIT(7)
 
+/* FIFO READ Register */
+#define FIFOREAD_DATA_MASK (0xfff << 0)
+#define FIFOREAD_CHNLID_MASK (0xf << 16)
+
+/* Sequencer Status */
+#define SEQ_STATUS BIT(5)
+
 #define ADC_CLK                        3000000
 #define        MAX_CLK_DIV             7
 #define TOTAL_STEPS            16
 #define TOTAL_CHANNELS         8
 
+/*
+* ADC runs at 3MHz, and it takes
+* 15 cycles to latch one data output.
+* Hence the idle time for ADC to
+* process one sample data would be
+* around 5 micro seconds.
+*/
+#define IDLE_TIMEOUT 5 /* microsec */
+
 #define TSCADC_CELLS           2
 
 struct ti_tscadc_dev {
index 737685e9e852e91fd93ccf142500c33773f42e71..68029b30c3dc89e2d2620fd41101b2ab05069344 100644 (file)
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
        __be16  max_desc_sz_rq;
        u8      rsvd21[2];
        __be16  max_desc_sz_sq_dc;
-       u8      rsvd22[4];
-       __be16  max_qp_mcg;
-       u8      rsvd23;
+       __be32  max_qp_mcg;
+       u8      rsvd22[3];
        u8      log_max_mcg;
-       u8      rsvd24;
+       u8      rsvd23;
        u8      log_max_pd;
-       u8      rsvd25;
+       u8      rsvd24;
        u8      log_max_xrcd;
-       u8      rsvd26[42];
+       u8      rsvd25[42];
        __be16  log_uar_page_sz;
-       u8      rsvd27[28];
+       u8      rsvd26[28];
        u8      log_msx_atomic_size_qp;
-       u8      rsvd28[2];
+       u8      rsvd27[2];
        u8      log_msx_atomic_size_dc;
-       u8      rsvd29[76];
+       u8      rsvd28[76];
 };
 
 
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
 struct mlx5_eqe_page_req {
        u8              rsvd0[2];
        __be16          func_id;
-       u8              rsvd1[2];
-       __be16          num_pages;
-       __be32          rsvd2[5];
+       __be32          num_pages;
+       __be32          rsvd1[5];
 };
 
 union ev_data {
index 2aa258b0ced1449b8281f6728d6f243d0c72d15f..8888381fc150b8f3f852077407ee8187a54cb7aa 100644 (file)
@@ -358,7 +358,7 @@ struct mlx5_caps {
        u32     reserved_lkey;
        u8      local_ca_ack_delay;
        u8      log_max_mcg;
-       u16     max_qp_mcg;
+       u32     max_qp_mcg;
        int     min_page_sz;
 };
 
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages);
+                                s32 npages);
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
 const char *mlx5_command_str(int command);
 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
index 75981d0b57dccd4b6a3b5e9833557973f33d9404..580a5320cc96afd21807cddaaa40bdff47694d5d 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/list.h>
 #include <linux/rbtree.h>
+#include <linux/err.h>
 
 struct module;
 struct device;
index d722490da030c5906d991b7c76f8dc178edadee8..e9995eb5985cd50b28aaae49f528487acf3f018c 100644 (file)
@@ -314,6 +314,7 @@ struct nsproxy;
 struct user_namespace;
 
 #ifdef CONFIG_MMU
+extern unsigned long mmap_legacy_base(void);
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
  * Test if a process is not yet dead (at most zombie state)
  * If pid_alive fails, then pointers within the task structure
  * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
  */
 static inline int pid_alive(struct task_struct *p)
 {
@@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p)
  * @tsk: Task structure to be checked.
  *
  * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
  */
 static inline int is_global_init(struct task_struct *tsk)
 {
@@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu);
 /**
  * is_idle_task - is the specified task an idle task?
  * @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
  */
 static inline bool is_idle_task(const struct task_struct *p)
 {
index 7d537ced949aa83b33b4c20e32c991d2123ca5a5..75f34949d9ab998adf5941c0321ea45005cc7d62 100644 (file)
@@ -117,9 +117,17 @@ do {                                                               \
 #endif /*arch_spin_is_contended*/
 #endif
 
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock()      smp_wmb()
 #endif
 
 /**
index 6d870353674ac274a3f27e0f454749f7e5c18485..1821445708d62d81bb1176ecb20f8480fa4799fa 100644 (file)
@@ -121,6 +121,7 @@ struct rpc_task_setup {
 #define RPC_TASK_SOFTCONN      0x0400          /* Fail if can't connect */
 #define RPC_TASK_SENT          0x0800          /* message was sent */
 #define RPC_TASK_TIMEOUT       0x1000          /* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT     0x2000          /* return ENOTCONN if not connected */
 
 #define RPC_IS_ASYNC(t)                ((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)      ((t)->tk_flags & RPC_TASK_SWAPPER)
index c5fd30d2a415a48964eb1fb2aaaac0db255656e1..8d4fa82bfb913d0021d93c49e71dab940804fdf7 100644 (file)
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
        swp_entry_t arch_entry;
 
        BUG_ON(pte_file(pte));
+       if (pte_swp_soft_dirty(pte))
+               pte = pte_swp_clear_soft_dirty(pte);
        arch_entry = __pte_to_swp_entry(pte);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index 4147d700a293623b63615b987c797f14d380a7a1..84662ecc7b51468233175959ddd5c04bd0efac0d 100644 (file)
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
               int __user *);
 #else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+                         int __user *, int);
+#else
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
               int __user *, int);
 #endif
+#endif
 
 asmlinkage long sys_execve(const char __user *filename,
                const char __user *const __user *argv,
index b6b215f13b453091ff2be0fae11b37a5a51c0c84..14105c26a83618da5d91ae09331fb83ba8f8ad6b 100644 (file)
@@ -23,6 +23,7 @@ struct user_namespace {
        struct uid_gid_map      projid_map;
        atomic_t                count;
        struct user_namespace   *parent;
+       int                     level;
        kuid_t                  owner;
        kgid_t                  group;
        unsigned int            proc_inum;
index 7343a27fe81966372c3ee8dc72241f2822891543..47ada23345a195973c3942681566244d50c7f34d 100644 (file)
@@ -22,6 +22,7 @@
 #define _V4L2_CTRLS_H
 
 #include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/videodev2.h>
 
 /* forward references */
index f18b91966d3de718528ae3d44887e937514718fc..8a358a2c97e6a82cb98dd6b9586133828306f961 100644 (file)
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
                if (rc > 0)
                        /* local bh are disabled so it is ok to use _BH */
                        NET_ADD_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+                                        LINUX_MIB_BUSYPOLLRXPACKETS, rc);
 
        } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
                 !need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
        return false;
 }
 
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
-       return false;
-}
-
 static inline void skb_mark_napi_id(struct sk_buff *skb,
                                    struct napi_struct *napi)
 {
index 781b3cf86a2f534ff71e03037b7984a0e84a8c8e..a354db5b7662f1bce43af7edc97471cdef3421bb 100644 (file)
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
        return INET_ECN_encapsulate(tos, inner);
 }
 
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
-                                         const struct iphdr  *old_iph,
-                                         struct dst_entry *dst)
-{
-       struct iphdr *iph = ip_hdr(skb);
-
-       /* Use inner packet iph-id if possible. */
-       if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
-               iph->id = old_iph->id;
-       else
-               __ip_select_ident(iph, dst,
-                                 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct net *net, struct rtable *rt,
                  struct sk_buff *skb,
index 6eab63363e59fe82dc3fc6119633e5583b663dd1..e5ae0c50fa9c5c481f991de92efd5e862d5bca67 100644 (file)
@@ -683,13 +683,19 @@ struct psched_ratecfg {
        u64     rate_bytes_ps; /* bytes per second */
        u32     mult;
        u16     overhead;
+       u8      linklayer;
        u8      shift;
 };
 
 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
                                unsigned int len)
 {
-       return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+       len += r->overhead;
+
+       if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+               return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+       return ((u64)len * r->mult) >> r->shift;
 }
 
 extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
        memset(res, 0, sizeof(*res));
        res->rate = r->rate_bytes_ps;
        res->overhead = r->overhead;
+       res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
 
 #endif
index dbd71b0c7d8c93edbc80ae50ad7223db3665f00a..09d62b9228ffa061de1b5cb8ee80e20ef470db5c 100644 (file)
@@ -73,9 +73,17 @@ struct tc_estimator {
 #define TC_H_ROOT      (0xFFFFFFFFU)
 #define TC_H_INGRESS    (0xFFFFFFF1U)
 
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+       TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+       TC_LINKLAYER_ETHERNET,
+       TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
 struct tc_ratespec {
        unsigned char   cell_log;
-       unsigned char   __reserved;
+       __u8            linklayer; /* lower 4 bits */
        unsigned short  overhead;
        short           cell_align;
        unsigned short  mpu;
index af0a674cc677f570bf5d5e04683957605d5e9ea1..a1356d3b54df4bd7f68fa26e3436fd7ff2dbcb71 100644 (file)
@@ -253,7 +253,7 @@ enum
        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
        LINUX_MIB_TCPFASTOPENCOOKIEREQD,        /* TCPFastOpenCookieReqd */
        LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
-       LINUX_MIB_LOWLATENCYRXPACKETS,          /* LowLatencyRxPackets */
+       LINUX_MIB_BUSYPOLLRXPACKETS,            /* BusyPollRxPackets */
        __LINUX_MIB_MAX
 };
 
index 789ec4683db3b73f83e455891bdb319124215f97..781845a013ab23c8f99f2c2153c70c9fd679c45d 100644 (file)
@@ -4335,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                }
 
                err = percpu_ref_init(&css->refcnt, css_release);
-               if (err)
+               if (err) {
+                       ss->css_free(cgrp);
                        goto err_free_all;
+               }
 
                init_cgroup_css(css, ss, cgrp);
 
index e5657788feddfefaaed5f7ce3ce2ac26ca80a9c1..010a0083c0ae4cfa222e2d51f2e951893bac28c4 100644 (file)
@@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
 {
        struct cpuset *cs = cgroup_cs(cgrp);
        cpuset_filetype_t type = cft->private;
-       int retval = -ENODEV;
+       int retval = 0;
 
        mutex_lock(&cpuset_mutex);
-       if (!is_cpuset_online(cs))
+       if (!is_cpuset_online(cs)) {
+               retval = -ENODEV;
                goto out_unlock;
+       }
 
        switch (type) {
        case FILE_CPU_EXCLUSIVE:
index 403d2bb8a96865ec2b62da44e48f77e3a7ae3e0c..e23bb19e2a3e23c9d3ad3c68e0f49acb214d3e51 100644 (file)
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
                 int __user *, parent_tidptr,
                 int __user *, child_tidptr,
                 int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+               int, stack_size,
+               int __user *, parent_tidptr,
+               int __user *, child_tidptr,
+               int, tls_val)
 #else
 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
                 int __user *, parent_tidptr,
index ff05f4bd86eb6acf10ff49307e448ddb8b00916a..a52ee7bb830d3fa9db3cf898a34c8212664ac8b7 100644 (file)
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        might_sleep();
        ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
                                   0, &ctx->dep_map, _RET_IP_, ctx);
-       if (!ret && ctx->acquired > 0)
+       if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
                                  0, &ctx->dep_map, _RET_IP_, ctx);
 
-       if (!ret && ctx->acquired > 0)
+       if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        return ret;
index 06fe28589e9c9e88b014315dfab33a9e6cdfd0eb..a394297f8b2f94ea806a545c6e92e4b7221d2f7e 100644 (file)
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
 }
 EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
+static void __pm_qos_update_request(struct pm_qos_request *req,
+                          s32 new_value)
+{
+       trace_pm_qos_update_request(req->pm_qos_class, new_value);
+
+       if (new_value != req->node.prio)
+               pm_qos_update_target(
+                       pm_qos_array[req->pm_qos_class]->constraints,
+                       &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
 /**
  * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
  * @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
                                                  struct pm_qos_request,
                                                  work);
 
-       pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+       __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
 }
 
 /**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
        }
 
        cancel_delayed_work_sync(&req->work);
-
-       trace_pm_qos_update_request(req->pm_qos_class, new_value);
-       if (new_value != req->node.prio)
-               pm_qos_update_target(
-                       pm_qos_array[req->pm_qos_class]->constraints,
-                       &req->node, PM_QOS_UPDATE_REQ, new_value);
+       __pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
 
index b51087fb9acec5a33d8ee53b37206b7fdbd770b1..276762f3a46078141117e5810c1afc964e869ec9 100644 (file)
@@ -19,7 +19,8 @@ char *_braille_console_setup(char **str, char **brl_options)
                        pr_err("need port name after brl=\n");
                else
                        *((*str)++) = 0;
-       }
+       } else
+               return NULL;
 
        return *str;
 }
index 4041f5747e73bf70f8d8dbb412f8e7b7067a8017..a146ee327f6ac15029b64424f0abd07e8d316f67 100644 (file)
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
        /* Architecture-specific hardware disable .. */
        ptrace_disable(child);
        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-       flush_ptrace_hw_breakpoint(child);
 
        write_lock_irq(&tasklist_lock);
        /*
index b7c32cb7bfebbd5912f06a421decb87b15c34bf9..05c39f030314698730349de8eb9346c1437d9aba 100644 (file)
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
 /**
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
  */
 inline int task_curr(const struct task_struct *p)
 {
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
  * the simpler "current->state = TASK_RUNNING" to mark yourself
  * runnable without the overhead of this.
  *
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
  * or @state didn't match @p's state.
  */
 static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        unsigned long flags;
        int cpu, success = 0;
 
-       smp_wmb();
+       /*
+        * If we are going to wake up a thread waiting for CONDITION we
+        * need to ensure that CONDITION=1 done by the caller can not be
+        * reordered with p->state check below. This pairs with mb() in
+        * set_current_state() the waiting thread does.
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
@@ -1577,8 +1585,9 @@ out:
  * @p: The process to be woken up.
  *
  * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.  Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
  *
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
  * This makes sure that uptime, CFS vruntime, load
  * balancing, etc... continue to move forward, even
  * with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
  */
 u64 scheduler_tick_max_deferment(void)
 {
@@ -2394,6 +2405,12 @@ need_resched:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
+       /*
+        * Make sure that signal_pending_state()->signal_pending() below
+        * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+        * done by the caller to avoid the race with signal_wake_up().
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
        switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
  *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible. The caller is accounted as waiting for IO.
  *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
  *     try_wait_for_completion - try to decrement a completion without blocking
  *     @x:     completion structure
  *
- *     Returns: 0 if a decrement cannot be done without blocking
+ *     Return: 0 if a decrement cannot be done without blocking
  *              1 if a decrement succeeded.
  *
  *     If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
  *     completion_done - Test to see if a completion has any waiters
  *     @x:     completion structure
  *
- *     Returns: 0 if there are waiters (wait_for_completion() in progress)
+ *     Return: 0 if there are waiters (wait_for_completion() in progress)
  *              1 if there are no waiters.
  *
  */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
  * task_prio - return the priority value of a given task.
  * @p: the task in question.
  *
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
  * RT tasks are offset by -200. Normal tasks are centered
  * around 0, value goes from -16 to +15.
  */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
 /**
  * task_nice - return the nice value of a given task.
  * @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
  */
 int task_nice(const struct task_struct *p)
 {
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
 /**
  * idle_cpu - is a given cpu idle currently?
  * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
  */
 int idle_cpu(int cpu)
 {
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
 /**
  * idle_task - return the idle task for a given cpu.
  * @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
  */
 struct task_struct *idle_task(int cpu)
 {
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
 /**
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
  */
 static struct task_struct *find_process_by_pid(pid_t pid)
 {
@@ -3432,6 +3457,8 @@ recheck:
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  *
+ * Return: 0 on success. An error code otherwise.
+ *
  * NOTE that the task may be already dead.
  */
 int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
  * current context has permission.  For example, this is needed in
  * stop_machine(): we create temporary high priority worker threads,
  * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
                               const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  * @pid: the pid in question.
  * @policy: new policy.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
                struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  * sys_sched_setparam - set/change the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 /**
  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
  */
 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
 {
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  * sys_sched_getparam - get the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
  */
 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
 {
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  *
  * This function yields the current CPU to other tasks. If there are no
  * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
  */
 SYSCALL_DEFINE0(sched_yield)
 {
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
  * It's the caller's job to ensure that the target task struct
  * can't go away on us before we can do any checks.
  *
- * Returns:
+ * Return:
  *     true (>0) if we indeed boosted the target task.
  *     false (0) if we failed to boost the target.
  *     -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
  * sys_sched_get_priority_max - return maximum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
 {
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  * sys_sched_get_priority_min - return minimum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
 {
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  *
  * this syscall writes the default timeslice value of a given process
  * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
  */
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
  * @cpu: the processor in question.
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
  */
 struct task_struct *curr_task(int cpu)
 {
index 1095e878a46fdbe4a7224eb07a68cf1fc90c2530..8b836b376d9129760066326eabf5040f72b2e4f3 100644 (file)
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
  * any discrepancies created by racing against the uncertainty of the current
  * priority configuration.
  *
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
  */
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
                struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
  *
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
  */
 int cpupri_init(struct cpupri *cp)
 {
index 9565645e320218f394f36eb0edbaa95236bbfcd1..68f1609ca149e259fab8db2fdb6bc3d18587a049 100644 (file)
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         */
        update_entity_load_avg(curr, 1);
        update_cfs_rq_blocked_load(cfs_rq, 1);
+       update_cfs_shares(cfs_rq);
 
 #ifdef CONFIG_SCHED_HRTICK
        /*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
  */
 static inline int get_sd_load_idx(struct sched_domain *sd,
                                        enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
  *
  * Determine if @sg is a busier group than the previously selected
  * busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
  */
 static bool update_sd_pick_busiest(struct lb_env *env,
                                   struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
  * assuming lower CPU number will be equivalent to lower a SMT thread
  * number.
  *
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
  * this CPU.  The amount of the imbalance is returned in *imbalance.
  *
  * @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
  *
- * Returns:    - the busiest group if imbalance exists.
+ * Return:     - The busiest group if imbalance exists.
  *             - If no imbalance and user has opted for power-savings balance,
  *                return the least loaded group whose CPUs can be
  *                put to idle by rebalancing its tasks onto our group.
index a326f27d7f09e0942ae2df9eb010d8caaabdb725..0b479a6a22bb8fe30e2b9d6c76c2ddb1d5646ae1 100644 (file)
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
        BUG_ON(bits > 32);
        WARN_ON(!irqs_disabled());
        read_sched_clock = read;
-       sched_clock_mask = (1 << bits) - 1;
+       sched_clock_mask = (1ULL << bits) - 1;
        cd.rate = rate;
 
        /* calculate the mult/shift to convert counter ticks to ns. */
index e77edc97e036b4e8216ae8b54fe0e6cafaa2fe71..e8a1516cc0a36d3c247d2bd4a18ef6d69f17b42c 100644 (file)
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
                 * Don't allow the user to think they can get
                 * full NO_HZ with this machine.
                 */
-               WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+               WARN_ONCE(have_nohz_full_mask,
+                         "NO_HZ FULL will not work with unstable sched clock");
                return false;
        }
 #endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
 
 void __init tick_nohz_init(void)
 {
-       int cpu;
-
        if (!have_nohz_full_mask) {
                if (tick_nohz_init_all() < 0)
                        return;
index 8ce9eefc5bb4f6aae3ffa520152a51eb033adb1e..a6d098c6df3f47f960004019fb48468ecea64410 100644 (file)
@@ -2169,12 +2169,57 @@ static cycle_t          ftrace_update_time;
 static unsigned long   ftrace_update_cnt;
 unsigned long          ftrace_update_tot_cnt;
 
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
-       struct ftrace_hash *hash;
+       /*
+        * Filter_hash being empty will default to trace module.
+        * But notrace hash requires a test of individual module functions.
+        */
+       return ftrace_hash_empty(ops->filter_hash) &&
+               ftrace_hash_empty(ops->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+       /* If ops isn't enabled, ignore it */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return 0;
+
+       /* If ops traces all mods, we already accounted for it */
+       if (ops_traces_mod(ops))
+               return 0;
+
+       /* The function must be in the filter */
+       if (!ftrace_hash_empty(ops->filter_hash) &&
+           !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+               return 0;
+
+       /* If in notrace hash, we ignore it too */
+       if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+               return 0;
+
+       return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *ops;
+       int cnt = 0;
 
-       hash = ops->filter_hash;
-       return ftrace_hash_empty(hash);
+       for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+               if (ops_references_rec(ops, rec))
+                   cnt++;
+       }
+
+       return cnt;
 }
 
 static int ftrace_update_code(struct module *mod)
@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
        struct dyn_ftrace *p;
        cycle_t start, stop;
        unsigned long ref = 0;
+       bool test = false;
        int i;
 
        /*
@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
 
                for (ops = ftrace_ops_list;
                     ops != &ftrace_list_end; ops = ops->next) {
-                       if (ops->flags & FTRACE_OPS_FL_ENABLED &&
-                           ops_traces_mod(ops))
-                               ref++;
+                       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+                               if (ops_traces_mod(ops))
+                                       ref++;
+                               else
+                                       test = true;
+                       }
                }
        }
 
@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
        for (pg = ftrace_new_pgs; pg; pg = pg->next) {
 
                for (i = 0; i < pg->index; i++) {
+                       int cnt = ref;
+
                        /* If something went wrong, bail without enabling anything */
                        if (unlikely(ftrace_disabled))
                                return -1;
 
                        p = &pg->records[i];
-                       p->flags = ref;
+                       if (test)
+                               cnt += referenced_filters(p);
+                       p->flags = cnt;
 
                        /*
                         * Do the initial record conversion from mcount jump
@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
                         * conversion puts the module to the correct state, thus
                         * passing the ftrace_make_call check.
                         */
-                       if (ftrace_start_up && ref) {
+                       if (ftrace_start_up && cnt) {
                                int failed = __ftrace_replace_code(p, 1);
                                if (failed)
                                        ftrace_bug(failed, p->ip);
@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
        return add_hash_entry(hash, ip);
 }
 
+static void ftrace_ops_update_code(struct ftrace_ops *ops)
+{
+       if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+}
+
 static int
 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                unsigned long ip, int remove, int reset, int enable)
@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
 
        mutex_lock(&ftrace_lock);
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
-       if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
-           && ftrace_enabled)
-               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+       if (!ret)
+               ftrace_ops_update_code(ops);
 
        mutex_unlock(&ftrace_lock);
 
@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
                mutex_lock(&ftrace_lock);
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
-               if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
-                   && ftrace_enabled)
-                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+               if (!ret)
+                       ftrace_ops_update_code(iter->ops);
 
                mutex_unlock(&ftrace_lock);
        }
index 882ec1dd15155233f89c8c7bad0d2a6dbf58c45f..496f94d57698294966d2c0c9bd10de34854b34a5 100644 (file)
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(filter_current_check_discard);
 
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
        u64 ts;
 
        /* Early boot up does not have a buffer yet */
-       if (!global_trace.trace_buffer.buffer)
+       if (!buf->buffer)
                return trace_clock_local();
 
-       ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
-       ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+       ts = ring_buffer_time_stamp(buf->buffer, cpu);
+       ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 
        return ts;
 }
 
+cycle_t ftrace_now(int cpu)
+{
+       return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
 /**
  * tracing_is_enabled - Show if global_trace has been disabled
  *
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
        /* Make sure all commits have finished */
        synchronize_sched();
 
-       buf->time_start = ftrace_now(buf->cpu);
+       buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 
        for_each_online_cpu(cpu)
                ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
        ring_buffer_record_enable(buffer);
 }
 
-void tracing_reset_current(int cpu)
-{
-       tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
 /* Must have trace_types_lock held */
 void tracing_reset_all_online_cpus(void)
 {
@@ -4151,6 +4151,7 @@ waitagain:
        memset(&iter->seq, 0,
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
+       cpumask_clear(iter->started);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
 
        /* disable tracing ? */
        if (trace_flags & TRACE_ITER_STOP_ON_FREE)
-               tracing_off();
+               tracer_tracing_off(tr);
        /* resize the ring buffer to 0 */
        tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 
@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
         * New clock may not be consistent with the previous clock.
         * Reset the buffer so that it doesn't have incomparable timestamps.
         */
-       tracing_reset_online_cpus(&global_trace.trace_buffer);
+       tracing_reset_online_cpus(&tr->trace_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
                ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
-       tracing_reset_online_cpus(&global_trace.max_buffer);
+       tracing_reset_online_cpus(&tr->max_buffer);
 #endif
 
        mutex_unlock(&trace_types_lock);
index 898f868833f2d4df0d80838f9461385b5eedcd77..29a7ebcfb42621d05cb9a74a7bcf291515b38461 100644 (file)
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
        mutex_unlock(&event_mutex);
 }
 
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
 {
-       struct ftrace_event_file *file = inode->i_private;
-       struct trace_array *tr = file->tr;
-       int ret;
+       if (!dir)
+               return;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       if (!--dir->nr_events) {
+               debugfs_remove_recursive(dir->entry);
+               list_del(&dir->list);
+               __put_system_dir(dir);
+       }
+}
 
-       ret = tracing_open_generic(inode, filp);
-       if (ret < 0)
-               trace_array_put(tr);
-       return ret;
+static void *event_file_data(struct file *filp)
+{
+       return ACCESS_ONCE(file_inode(filp)->i_private);
 }
 
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void remove_event_file_dir(struct ftrace_event_file *file)
 {
-       struct ftrace_event_file *file = inode->i_private;
-       struct trace_array *tr = file->tr;
+       struct dentry *dir = file->dir;
+       struct dentry *child;
 
-       trace_array_put(tr);
+       if (dir) {
+               spin_lock(&dir->d_lock);        /* probably unneeded */
+               list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+                       if (child->d_inode)     /* probably unneeded */
+                               child->d_inode->i_private = NULL;
+               }
+               spin_unlock(&dir->d_lock);
 
-       return 0;
+               debugfs_remove_recursive(dir);
+       }
+
+       list_del(&file->list);
+       remove_subsystem(file->system);
+       kmem_cache_free(file_cachep, file);
 }
 
 /*
@@ -679,15 +688,25 @@ static ssize_t
 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
-       struct ftrace_event_file *file = filp->private_data;
+       struct ftrace_event_file *file;
+       unsigned long flags;
        char buf[4] = "0";
 
-       if (file->flags & FTRACE_EVENT_FL_ENABLED &&
-           !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+       mutex_lock(&event_mutex);
+       file = event_file_data(filp);
+       if (likely(file))
+               flags = file->flags;
+       mutex_unlock(&event_mutex);
+
+       if (!file)
+               return -ENODEV;
+
+       if (flags & FTRACE_EVENT_FL_ENABLED &&
+           !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
                strcpy(buf, "1");
 
-       if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
-           file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+       if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
+           flags & FTRACE_EVENT_FL_SOFT_MODE)
                strcat(buf, "*");
 
        strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
-       struct ftrace_event_file *file = filp->private_data;
+       struct ftrace_event_file *file;
        unsigned long val;
        int ret;
 
-       if (!file)
-               return -EINVAL;
-
        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
        if (ret)
                return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
        switch (val) {
        case 0:
        case 1:
+               ret = -ENODEV;
                mutex_lock(&event_mutex);
-               ret = ftrace_event_enable_disable(file, val);
+               file = event_file_data(filp);
+               if (likely(file))
+                       ret = ftrace_event_enable_disable(file, val);
                mutex_unlock(&event_mutex);
                break;
 
@@ -825,7 +844,7 @@ enum {
 
 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_call *call = event_file_data(m->private);
        struct list_head *common_head = &ftrace_common_fields;
        struct list_head *head = trace_get_fields(call);
        struct list_head *node = v;
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 
 static int f_show(struct seq_file *m, void *v)
 {
-       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_call *call = event_file_data(m->private);
        struct ftrace_event_field *field;
        const char *array_descriptor;
 
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
        void *p = (void *)FORMAT_HEADER;
        loff_t l = 0;
 
+       /* ->stop() is called even if ->start() fails */
+       mutex_lock(&event_mutex);
+       if (!event_file_data(m->private))
+               return ERR_PTR(-ENODEV);
+
        while (l < *pos && p)
                p = f_next(m, p, &l);
 
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
 
 static void f_stop(struct seq_file *m, void *p)
 {
+       mutex_unlock(&event_mutex);
 }
 
 static const struct seq_operations trace_format_seq_ops = {
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
 
 static int trace_format_open(struct inode *inode, struct file *file)
 {
-       struct ftrace_event_call *call = inode->i_private;
        struct seq_file *m;
        int ret;
 
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
                return ret;
 
        m = file->private_data;
-       m->private = call;
+       m->private = file;
 
        return 0;
 }
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file)
 static ssize_t
 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       struct ftrace_event_call *call = filp->private_data;
+       int id = (long)event_file_data(filp);
        char buf[32];
        int len;
 
        if (*ppos)
                return 0;
 
-       len = sprintf(buf, "%d\n", call->event.type);
+       if (unlikely(!id))
+               return -ENODEV;
+
+       len = sprintf(buf, "%d\n", id);
+
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 }
 
@@ -961,21 +989,28 @@ static ssize_t
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
-       struct ftrace_event_call *call = filp->private_data;
+       struct ftrace_event_call *call;
        struct trace_seq *s;
-       int r;
+       int r = -ENODEV;
 
        if (*ppos)
                return 0;
 
        s = kmalloc(sizeof(*s), GFP_KERNEL);
+
        if (!s)
                return -ENOMEM;
 
        trace_seq_init(s);
 
-       print_event_filter(call, s);
-       r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+       mutex_lock(&event_mutex);
+       call = event_file_data(filp);
+       if (call)
+               print_event_filter(call, s);
+       mutex_unlock(&event_mutex);
+
+       if (call)
+               r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
        kfree(s);
 
@@ -986,9 +1021,9 @@ static ssize_t
 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
-       struct ftrace_event_call *call = filp->private_data;
+       struct ftrace_event_call *call;
        char *buf;
-       int err;
+       int err = -ENODEV;
 
        if (cnt >= PAGE_SIZE)
                return -EINVAL;
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
        }
        buf[cnt] = '\0';
 
-       err = apply_event_filter(call, buf);
+       mutex_lock(&event_mutex);
+       call = event_file_data(filp);
+       if (call)
+               err = apply_event_filter(call, buf);
+       mutex_unlock(&event_mutex);
+
        free_page((unsigned long) buf);
        if (err < 0)
                return err;
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
 };
 
 static const struct file_operations ftrace_enable_fops = {
-       .open = tracing_open_generic_file,
+       .open = tracing_open_generic,
        .read = event_enable_read,
        .write = event_enable_write,
-       .release = tracing_release_generic_file,
        .llseek = default_llseek,
 };
 
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
 };
 
 static const struct file_operations ftrace_event_id_fops = {
-       .open = tracing_open_generic,
        .read = event_id_read,
        .llseek = default_llseek,
 };
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent,
 
 #ifdef CONFIG_PERF_EVENTS
        if (call->event.type && call->class->reg)
-               trace_create_file("id", 0444, file->dir, call,
-                                 id);
+               trace_create_file("id", 0444, file->dir,
+                                 (void *)(long)call->event.type, id);
 #endif
 
        /*
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent,
        return 0;
 }
 
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
-       if (!dir)
-               return;
-
-       if (!--dir->nr_events) {
-               debugfs_remove_recursive(dir->entry);
-               list_del(&dir->list);
-               __put_system_dir(dir);
-       }
-}
-
 static void remove_event_from_tracers(struct ftrace_event_call *call)
 {
        struct ftrace_event_file *file;
        struct trace_array *tr;
 
        do_for_each_event_file_safe(tr, file) {
-
                if (file->event_call != call)
                        continue;
 
-               list_del(&file->list);
-               debugfs_remove_recursive(file->dir);
-               remove_subsystem(file->system);
-               kmem_cache_free(file_cachep, file);
-
+               remove_event_file_dir(file);
                /*
                 * The do_for_each_event_file_safe() is
                 * a double loop. After finding the call for this
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
        destroy_preds(call);
 }
 
+static int probe_remove_event_call(struct ftrace_event_call *call)
+{
+       struct trace_array *tr;
+       struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+       if (call->perf_refcount)
+               return -EBUSY;
+#endif
+       do_for_each_event_file(tr, file) {
+               if (file->event_call != call)
+                       continue;
+               /*
+                * We can't rely on ftrace_event_enable_disable(enable => 0)
+                * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+                * TRACE_REG_UNREGISTER.
+                */
+               if (file->flags & FTRACE_EVENT_FL_ENABLED)
+                       return -EBUSY;
+               /*
+                * The do_for_each_event_file_safe() is
+                * a double loop. After finding the call for this
+                * trace_array, we use break to jump to the next
+                * trace_array.
+                */
+               break;
+       } while_for_each_event_file();
+
+       __trace_remove_event_call(call);
+
+       return 0;
+}
+
 /* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct ftrace_event_call *call)
 {
+       int ret;
+
        mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
        down_write(&trace_event_sem);
-       __trace_remove_event_call(call);
+       ret = probe_remove_event_call(call);
        up_write(&trace_event_sem);
        mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+
+       return ret;
 }
 
 #define for_each_event(event, start, end)                      \
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
 {
        struct ftrace_event_file *file, *next;
 
-       list_for_each_entry_safe(file, next, &tr->events, list) {
-               list_del(&file->list);
-               debugfs_remove_recursive(file->dir);
-               remove_subsystem(file->system);
-               kmem_cache_free(file_cachep, file);
-       }
+       list_for_each_entry_safe(file, next, &tr->events, list)
+               remove_event_file_dir(file);
 }
 
 static void
index 0c7b75a8acc8f9ccad53d75bc6a3fe0da425ae9f..97daa8cf958df8e73c9fa8090f89cbaf4e63893e 100644 (file)
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
        free_page((unsigned long) buf);
 }
 
+/* caller must hold event_mutex */
 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
 {
-       struct event_filter *filter;
+       struct event_filter *filter = call->filter;
 
-       mutex_lock(&event_mutex);
-       filter = call->filter;
        if (filter && filter->filter_string)
                trace_seq_printf(s, "%s\n", filter->filter_string);
        else
                trace_seq_puts(s, "none\n");
-       mutex_unlock(&event_mutex);
 }
 
 void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
        return err;
 }
 
+/* caller must hold event_mutex */
 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
 {
        struct event_filter *filter;
-       int err = 0;
-
-       mutex_lock(&event_mutex);
+       int err;
 
        if (!strcmp(strstrip(filter_string), "0")) {
                filter_disable(call);
                filter = call->filter;
                if (!filter)
-                       goto out_unlock;
+                       return 0;
                RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
-               goto out_unlock;
+               return 0;
        }
 
        err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                        __free_filter(tmp);
                }
        }
-out_unlock:
-       mutex_unlock(&event_mutex);
 
        return err;
 }
index 3811487e7a7a1d58f23b6e04b49fda0dbf06c9b0..243f6834d02675c9c1101e1eabe18e56eb7d3244 100644 (file)
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
 }
 
 static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
 
 static DEFINE_MUTEX(probe_lock);
 static LIST_HEAD(probe_list);
@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
        if (trace_probe_is_enabled(tp))
                return -EBUSY;
 
+       /* Will fail if probe is being used by ftrace or perf */
+       if (unregister_probe_event(tp))
+               return -EBUSY;
+
        __unregister_trace_probe(tp);
        list_del(&tp->list);
-       unregister_probe_event(tp);
 
        return 0;
 }
@@ -632,7 +635,9 @@ static int release_all_trace_probes(void)
        /* TODO: Use batch unregistration */
        while (!list_empty(&probe_list)) {
                tp = list_entry(probe_list.next, struct trace_probe, list);
-               unregister_trace_probe(tp);
+               ret = unregister_trace_probe(tp);
+               if (ret)
+                       goto end;
                free_trace_probe(tp);
        }
 
@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
        return ret;
 }
 
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
 {
+       int ret;
+
        /* tp->event is unregistered in trace_remove_event_call() */
-       trace_remove_event_call(&tp->call);
-       kfree(tp->call.print_fmt);
+       ret = trace_remove_event_call(&tp->call);
+       if (!ret)
+               kfree(tp->call.print_fmt);
+       return ret;
 }
 
 /* Make a debugfs interface for controlling probe points */
index a23d2d71188e61f6ad3b706730b2a70d8c773103..272261b5f94ff84e3291be05aaa3b1a8c1ca00ca 100644 (file)
@@ -70,7 +70,7 @@ struct trace_uprobe {
        (sizeof(struct probe_arg) * (n)))
 
 static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
 
 static DEFINE_MUTEX(uprobe_lock);
 static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
 }
 
 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
 {
+       int ret;
+
+       ret = unregister_uprobe_event(tu);
+       if (ret)
+               return ret;
+
        list_del(&tu->list);
-       unregister_uprobe_event(tu);
        free_trace_uprobe(tu);
+       return 0;
 }
 
 /* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
 
        /* register as an event */
        old_tp = find_probe_event(tu->call.name, tu->call.class->system);
-       if (old_tp)
+       if (old_tp) {
                /* delete old event */
-               unregister_trace_uprobe(old_tp);
+               ret = unregister_trace_uprobe(old_tp);
+               if (ret)
+                       goto end;
+       }
 
        ret = register_uprobe_event(tu);
        if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
                group = UPROBE_EVENT_SYSTEM;
 
        if (is_delete) {
+               int ret;
+
                if (!event) {
                        pr_info("Delete command needs an event name.\n");
                        return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
                        return -ENOENT;
                }
                /* delete an event */
-               unregister_trace_uprobe(tu);
+               ret = unregister_trace_uprobe(tu);
                mutex_unlock(&uprobe_lock);
-               return 0;
+               return ret;
        }
 
        if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
        return ret;
 }
 
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
 {
        struct trace_uprobe *tu;
+       int ret = 0;
 
        mutex_lock(&uprobe_lock);
        while (!list_empty(&uprobe_list)) {
                tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
-               unregister_trace_uprobe(tu);
+               ret = unregister_trace_uprobe(tu);
+               if (ret)
+                       break;
        }
        mutex_unlock(&uprobe_lock);
+       return ret;
 }
 
 /* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
 
 static int probes_open(struct inode *inode, struct file *file)
 {
-       if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
-               cleanup_all_probes();
+       int ret;
+
+       if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+               ret = cleanup_all_probes();
+               if (ret)
+                       return ret;
+       }
 
        return seq_open(file, &probes_seq_op);
 }
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
        return ret;
 }
 
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
 {
+       int ret;
+
        /* tu->event is unregistered in trace_remove_event_call() */
-       trace_remove_event_call(&tu->call);
+       ret = trace_remove_event_call(&tu->call);
+       if (ret)
+               return ret;
        kfree(tu->call.print_fmt);
        tu->call.print_fmt = NULL;
+       return 0;
 }
 
 /* Make a trace interface for controling probe points */
index d8c30db06c5b75456098fe2f1a26785e9a9110ee..9064b919a4066fe9fca676581479e9162de56717 100644 (file)
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
        kgid_t group = new->egid;
        int ret;
 
+       if (parent_ns->level > 32)
+               return -EUSERS;
+
        /*
         * Verify that we can not violate the policy of which files
         * may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
        atomic_set(&ns->count, 1);
        /* Leave the new->user_ns reference with the new user namespace. */
        ns->parent = parent_ns;
+       ns->level = parent_ns->level + 1;
        ns->owner = owner;
        ns->group = group;
 
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
 {
        struct cred *cred;
+       int err = -ENOMEM;
 
        if (!(unshare_flags & CLONE_NEWUSER))
                return 0;
 
        cred = prepare_creds();
-       if (!cred)
-               return -ENOMEM;
+       if (cred) {
+               err = create_user_ns(cred);
+               if (err)
+                       put_cred(cred);
+               else
+                       *new_cred = cred;
+       }
 
-       *new_cred = cred;
-       return create_user_ns(cred);
+       return err;
 }
 
 void free_user_ns(struct user_namespace *ns)
index dec68bd4e9d8e996404faa065b67354f1defa8eb..d550920e040c4515c7c865bb6ba2ab884f7ef7bd 100644 (file)
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
 
 /**
  * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
  *
  * Wake up anyone waiting for the atomic_t to go to zero.
  *
index 0b72e816b8d0126f0a3e72c1f3a2496fc3545030..7f5d4be220345b0899d38ced692f2458b9278ce7 100644 (file)
@@ -2817,6 +2817,19 @@ already_gone:
        return false;
 }
 
+static bool __flush_work(struct work_struct *work)
+{
+       struct wq_barrier barr;
+
+       if (start_flush_work(work, &barr)) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+               return true;
+       } else {
+               return false;
+       }
+}
+
 /**
  * flush_work - wait for a work to finish executing the last queueing instance
  * @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
  */
 bool flush_work(struct work_struct *work)
 {
-       struct wq_barrier barr;
-
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       if (start_flush_work(work, &barr)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else {
-               return false;
-       }
+       return __flush_work(work);
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
 {
        to->nice = from->nice;
        cpumask_copy(to->cpumask, from->cpumask);
+       /*
+        * Unlike hash and equality test, this function doesn't ignore
+        * ->no_numa as it is used for both pool and wq attrs.  Instead,
+        * get_unbound_pool() explicitly clears ->no_numa after copying.
+        */
+       to->no_numa = from->no_numa;
 }
 
 /* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
        copy_workqueue_attrs(pool->attrs, attrs);
 
+       /*
+        * no_numa isn't a worker_pool attribute, always clear it.  See
+        * 'struct workqueue_attrs' comments for detail.
+        */
+       pool->attrs->no_numa = false;
+
        /* if cpumask is contained inside a NUMA node, we belong to that node */
        if (wq_numa_enabled) {
                for_each_node(node) {
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 
        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
        schedule_work_on(cpu, &wfc.work);
-       flush_work(&wfc.work);
+
+       /*
+        * The work item is on-stack and can't lead to deadlock through
+        * flushing.  Use __flush_work() to avoid spurious lockdep warnings
+        * when work_on_cpu()s are nested.
+        */
+       __flush_work(&wfc.work);
+
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
index 87da3590c61e20287b4854e8212c178f96cb73c6..5bff0814776870e2974e6e436ad99f99f264e05a 100644 (file)
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long addr, unsigned long pgoff, pgprot_t prot)
 {
        int err = -ENOMEM;
-       pte_t *pte;
+       pte_t *pte, ptfile;
        spinlock_t *ptl;
 
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
                goto out;
 
-       if (!pte_none(*pte))
+       ptfile = pgoff_to_pte(pgoff);
+
+       if (!pte_none(*pte)) {
+               if (pte_present(*pte) && pte_soft_dirty(*pte))
+                       pte_file_mksoft_dirty(ptfile);
                zap_pte(mm, vma, addr, pte);
+       }
 
-       set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+       set_pte_at(mm, addr, pte, ptfile);
        /*
         * We don't need to run update_mmu_cache() here because the "file pte"
         * being installed by install_file_pte() is not a real pte - it's a
index 83aff0a4d0938e308619a703dfca17265419e628..b60f33080a28f0a078f8186e65ab8d878fced928 100644 (file)
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
        tlb_finish_mmu(&tlb, start, end);
 }
index c290a1cf38624adf8372d67ccf60d9ec01ff1357..c5792a5d87cede8cf2c5474156c1596f51f5ba61 100644 (file)
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (!s->memcg_params)
                return -ENOMEM;
 
-       INIT_WORK(&s->memcg_params->destroy,
-                       kmem_cache_destroy_work_func);
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
+               INIT_WORK(&s->memcg_params->destroy,
+                               kmem_cache_destroy_work_func);
        } else
                s->memcg_params->is_root_cache = true;
 
index 1ce2e2a734fc2b0812845279a69cb87f48588d3e..af84bc0ec17c213054b023815ae197fefde7ffb6 100644 (file)
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
  *     tear-down from @mm. The @fullmm argument is used when @mm is without
  *     users and we're going to destroy the full address space (exit/execve).
  */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
-       tlb->fullmm     = fullmm;
+       /* Is it from 0 to ~0? */
+       tlb->fullmm     = !(start | (end+1));
        tlb->need_flush_all = 0;
-       tlb->start      = -1UL;
-       tlb->end        = 0;
+       tlb->start      = start;
+       tlb->end        = end;
        tlb->need_flush = 0;
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
 {
        struct mmu_gather_batch *batch, *next;
 
-       tlb->start = start;
-       tlb->end   = end;
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        spinlock_t *ptl;
        pte_t *start_pte;
        pte_t *pte;
-       unsigned long range_start = addr;
 
 again:
        init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
                                continue;
                        if (unlikely(details) && details->nonlinear_vma
                            && linear_page_index(details->nonlinear_vma,
-                                               addr) != page->index)
-                               set_pte_at(mm, addr, pte,
-                                          pgoff_to_pte(page->index));
+                                               addr) != page->index) {
+                               pte_t ptfile = pgoff_to_pte(page->index);
+                               if (pte_soft_dirty(ptent))
+                                       pte_file_mksoft_dirty(ptfile);
+                               set_pte_at(mm, addr, pte, ptfile);
+                       }
                        if (PageAnon(page))
                                rss[MM_ANONPAGES]--;
                        else {
@@ -1202,17 +1203,25 @@ again:
         * and page-free while holding it.
         */
        if (force_flush) {
+               unsigned long old_end;
+
                force_flush = 0;
 
-#ifdef HAVE_GENERIC_MMU_GATHER
-               tlb->start = range_start;
+               /*
+                * Flush the TLB just for the previous segment,
+                * then update the range to be the remaining
+                * TLB range.
+                */
+               old_end = tlb->end;
                tlb->end = addr;
-#endif
+
                tlb_flush_mmu(tlb);
-               if (addr != end) {
-                       range_start = addr;
+
+               tlb->start = addr;
+               tlb->end = old_end;
+
+               if (addr != end)
                        goto again;
-               }
        }
 
        return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long end = start + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, start, end);
        for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
        unsigned long end = address + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, address, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, address, end);
        unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                exclusive = 1;
        }
        flush_icache_page(vma, page);
+       if (pte_swp_soft_dirty(orig_pte))
+               pte = pte_mksoft_dirty(pte);
        set_pte_at(mm, address, page_table, pte);
        if (page == swapcache)
                do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+                       pte_mksoft_dirty(entry);
                if (anon) {
                        inc_mm_counter_fast(mm, MM_ANONPAGES);
                        page_add_new_anon_rmap(page, vma, address);
index 1edbaa3136c3c464cf648e569f2f98499f00642c..f9c97d10b873ae4a896c3a22266707653d941c90 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
        struct mmu_gather tlb;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
 
        lru_add_drain();
        flush_cache_mm(mm);
-       tlb_gather_mmu(&tlb, mm, 1);
+       tlb_gather_mmu(&tlb, mm, 0, -1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
index cd356df4f71ab5aa0514e7685b4cce50eafceb6e..b2e29acd7e3d6267a626f7bde5656c4f7fd49640 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                           swp_entry_to_pte(make_hwpoison_entry(page)));
        } else if (PageAnon(page)) {
                swp_entry_t entry = { .val = page_private(page) };
+               pte_t swp_pte;
 
                if (PageSwapCache(page)) {
                        /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
                        entry = make_migration_entry(page, pte_write(pteval));
                }
-               set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+               swp_pte = swp_entry_to_pte(entry);
+               if (pte_soft_dirty(pteval))
+                       swp_pte = pte_swp_mksoft_dirty(swp_pte);
+               set_pte_at(mm, address, pte, swp_pte);
                BUG_ON(pte_file(*pte));
        } else if (IS_ENABLED(CONFIG_MIGRATION) &&
                   (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                pteval = ptep_clear_flush(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
-               if (page->index != linear_page_index(vma, address))
-                       set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+               if (page->index != linear_page_index(vma, address)) {
+                       pte_t ptfile = pgoff_to_pte(page->index);
+                       if (pte_soft_dirty(pteval))
+                               pte_file_mksoft_dirty(ptfile);
+                       set_pte_at(mm, address, pte, ptfile);
+               }
 
                /* Move the dirty bit to the physical page now the pte is gone. */
                if (pte_dirty(pteval))
index 2b02d666bf63aee43e60e17fab4544e31730383c..e3ba1f2cf60cc0caa5dd7ed059dbe583b477e073 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
        int pages;
        int pobjects;
 
-       if (!s->cpu_partial)
-               return;
-
        do {
                pages = 0;
                pobjects = 0;
index 36af6eeaa67eaa0532442706e668e9a268596c90..6cf2e60983b7c36c5dea0b745e6aadd907c01532 100644 (file)
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
 }
 #endif /* CONFIG_HIBERNATION */
 
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+       /*
+        * When pte keeps soft dirty bit the pte generated
+        * from swap entry does not has it, still it's same
+        * pte from logical point of view.
+        */
+       pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+       return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+       return pte_same(pte, swp_pte);
+#endif
+}
+
 /*
  * No need to decide whether this PTE shares the swap entry with others,
  * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        }
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+       if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
                mem_cgroup_cancel_charge_swapin(memcg);
                ret = 0;
                goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                 * swapoff spends a _lot_ of time in this loop!
                 * Test inline before going to call unuse_pte.
                 */
-               if (unlikely(pte_same(*pte, swp_pte))) {
+               if (unlikely(maybe_same_pte(*pte, swp_pte))) {
                        pte_unmap(pte);
                        ret = unuse_pte(vma, pmd, addr, entry, page);
                        if (ret)
index 4a78c4de9f200831c75e5c87bad683bbf669bbfb..6ee48aac776fbe84db68a503d9aca10ca45b0081 100644 (file)
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-       return vlan_dev_priv(dev)->real_dev;
+       struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+       while (is_vlan_dev(ret))
+               ret = vlan_dev_priv(ret)->real_dev;
+
+       return ret;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
index e14531f1ce1c5258ccc816f9a739ed464b6c364f..264de88db3208290b940cc27b09e44f79e1616c9 100644 (file)
@@ -1529,6 +1529,8 @@ out:
  * in these cases, the skb is further handled by this function and
  * returns 1, otherwise it returns 0 and the caller shall further
  * process the skb.
+ *
+ * This call might reallocate skb data.
  */
 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                  unsigned short vid)
index f105219f4a4b120810973e0ae8cb31a8c4e2aca1..7614af31daff3678eb2bb75447810c0e27850f88 100644 (file)
@@ -508,6 +508,7 @@ out:
        return 0;
 }
 
+/* this call might reallocate skb data */
 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
 {
        int ret = false;
@@ -568,6 +569,7 @@ out:
        return ret;
 }
 
+/* this call might reallocate skb data */
 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 {
        struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 
        if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
                return false;
+
+       /* skb->data might have been reallocated by pskb_may_pull() */
+       ethhdr = (struct ethhdr *)skb->data;
+       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+               ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
        udphdr = (struct udphdr *)(skb->data + *header_len);
        *header_len += sizeof(*udphdr);
 
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        return true;
 }
 
+/* this call might reallocate skb data */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
-                           struct sk_buff *skb, struct ethhdr *ethhdr)
+                           struct sk_buff *skb)
 {
        struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
        struct batadv_orig_node *orig_dst_node = NULL;
        struct batadv_gw_node *curr_gw = NULL;
+       struct ethhdr *ethhdr;
        bool ret, out_of_range = false;
        unsigned int header_len = 0;
        uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
        if (!ret)
                goto out;
 
+       ethhdr = (struct ethhdr *)skb->data;
        orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
                                                 ethhdr->h_dest);
        if (!orig_dst_node)
index 039902dca4a691074856d7846c59ad89f0bd8b8e..1037d75da51f35c1867cee72cc64bafd2859e3a8 100644 (file)
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
-                           struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
index 700d0b49742da54d0a1280b84c87416977d7d721..0f04e1c302b4a7bf12aa4da04fc84a4d0c2a0f20 100644 (file)
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
        if (batadv_bla_tx(bat_priv, skb, vid))
                goto dropped;
 
+       /* skb->data might have been reallocated by batadv_bla_tx() */
+       ethhdr = (struct ethhdr *)skb->data;
+
        /* Register the client MAC in the transtable */
        if (!is_multicast_ether_addr(ethhdr->h_source))
                batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
                default:
                        break;
                }
+
+               /* reminder: ethhdr might have become unusable from here on
+                * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+                */
        }
 
        /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        /* unicast packet */
        } else {
                if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
-                       ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+                       ret = batadv_gw_out_of_range(bat_priv, skb);
                        if (ret)
                                goto dropped;
                }
index dc8b5d4dd636d3d5ed982a5d93c2783f203ac4f5..688a0419756bfc6ce2a9f632f6e84a6ed42debcf 100644 (file)
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
  */
 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
                                       struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
  * @orig_node: the destination node
  * @packet_subtype: the batman 4addr packet subtype to use
  *
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
  */
 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
                                      struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *neigh_node;
        int data_len = skb->len;
        int ret = NET_RX_DROP;
-       unsigned int dev_mtu;
+       unsigned int dev_mtu, header_len;
 
        /* get routing information */
        if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -429,10 +433,12 @@ find_router:
        switch (packet_type) {
        case BATADV_UNICAST:
                batadv_unicast_prepare_skb(skb, orig_node);
+               header_len = sizeof(struct batadv_unicast_packet);
                break;
        case BATADV_UNICAST_4ADDR:
                batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
                                                 packet_subtype);
+               header_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
        default:
                /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +447,7 @@ find_router:
                goto out;
        }
 
+       ethhdr = (struct ethhdr *)(skb->data + header_len);
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        /* inform the destination node that we are still missing a correct route
index 61c5e819380e52d6347cde7186251467e921c1a0..08e576ada0b2699922b85c9e2783773baee404dc 100644 (file)
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
                if (max_delay)
                        group = &mld->mld_mca;
-       } else if (skb->len >= sizeof(*mld2q)) {
+       } else {
                if (!pskb_may_pull(skb, sizeof(*mld2q))) {
                        err = -EINVAL;
                        goto out;
index 394bb96b608707aa1ab943f66bc52fa173859706..3b9637fb7939a146e8a172a1e009e84c6ca4e841 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *     Sysfs attributes of bridge ports
+ *     Sysfs attributes of bridge
  *     Linux ethernet bridge
  *
  *     Authors:
index 00ee068efc1c2374bc0195d9458eca00392359c6..b84a1b155bc133e39d0f118b61be85fb1d2ed02f 100644 (file)
@@ -65,6 +65,7 @@ ipv6:
                nhoff += sizeof(struct ipv6hdr);
                break;
        }
+       case __constant_htons(ETH_P_8021AD):
        case __constant_htons(ETH_P_8021Q): {
                const struct vlan_hdr *vlan;
                struct vlan_hdr _vlan;
index 9232c68941abb4b5b5ec23f5d6cd56054d8b4c67..60533db8b72db4bafa73d336d697575ba3138c90 100644 (file)
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                atomic_set(&p->refcnt, 1);
                p->reachable_time =
                                neigh_rand_reach_time(p->base_reachable_time);
+               dev_hold(dev);
+               p->dev = dev;
+               write_pnet(&p->net, hold_net(net));
+               p->sysctl_table = NULL;
 
                if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+                       release_net(net);
+                       dev_put(dev);
                        kfree(p);
                        return NULL;
                }
 
-               dev_hold(dev);
-               p->dev = dev;
-               write_pnet(&p->net, hold_net(net));
-               p->sysctl_table = NULL;
                write_lock_bh(&tbl->lock);
                p->next         = tbl->parms.next;
                tbl->parms.next = p;
index 3de740834d1ffcfa35fe31735b41d5a0dcb58c3a..ca198c1d1d3047baededa2c1f2c6284b07010bfb 100644 (file)
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
        /* If aging addresses are supported device will need to
         * implement its own handler for this.
         */
-       if (ndm->ndm_state & NUD_PERMANENT) {
+       if (!(ndm->ndm_state & NUD_PERMANENT)) {
                pr_info("%s: FDB only supports static addresses\n", dev->name);
                return -EINVAL;
        }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        struct nlattr *extfilt;
        u32 filter_mask = 0;
 
-       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
                                  IFLA_EXT_MASK);
        if (extfilt)
                filter_mask = nla_get_u32(extfilt);
index ab3d814bc80af8f377da971af189a49ae4f2f094..109ee89f123e5e51714cae1fa9097e6ec65f0627 100644 (file)
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
        }
 
        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-                net_adj) & ~(align - 1)) + (net_adj - 2);
+                net_adj) & ~(align - 1)) + net_adj - 2;
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
index 108a1e9c9eac388515530bb790d0e56e4dba0bfd..3df6d3edb2a15a98cb0e90a4e5ed935f42f15f1c 100644 (file)
@@ -71,7 +71,6 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/prefetch.h>
 #include <linux/export.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
                        if (!c)
                                continue;
 
-                       if (IS_LEAF(c)) {
-                               prefetch(rcu_dereference_rtnl(p->child[idx]));
+                       if (IS_LEAF(c))
                                return (struct leaf *) c;
-                       }
 
                        /* Rescan start scanning in new node */
                        p = (struct tnode *) c;
index 1f6eab66f7cee5f94e1a5cfd6b6ea919184b5d35..8d6939eeb49247b1c2ee8659cc190814d64eb4e5 100644 (file)
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
        if (daddr)
                memcpy(&iph->daddr, daddr, 4);
        if (iph->daddr)
-               return t->hlen;
+               return t->hlen + sizeof(*iph);
 
        return -(t->hlen + sizeof(*iph));
 }
index 7167b08977df582bd38565cf960568a8553f29d4..850525b34899f3a50d0e19f6bd326821b0c2423a 100644 (file)
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
        iph->daddr      =       dst;
        iph->saddr      =       src;
        iph->ttl        =       ttl;
-       tunnel_ip_select_ident(skb,
-                              (const struct iphdr *)skb_inner_network_header(skb),
-                              &rt->dst);
+       __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
        err = ip_local_out(skb);
        if (unlikely(net_xmit_eval(err)))
index 6577a1149a47c17853e8c7e0e3daa267ef16d1cd..463bd1273346d72cad00a42a5852f95cef490c19 100644 (file)
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
        SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
        SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
-       SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+       SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
        SNMP_MIB_SENTINEL
 };
 
index a9077f441cb27b693d8ad1d710e4dba67aa93ebb..b6ae92a51f58607f7c32be2d0762a49380205012 100644 (file)
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
  */
 static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 {
-       u64 offs;
-       u32 delta, t, bic_target, max_cnt;
+       u32 delta, bic_target, max_cnt;
+       u64 offs, t;
 
        ca->ack_cnt++;  /* count the number of ACKs */
 
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
         * if the cwnd < 1 million packets !!!
         */
 
+       t = (s32)(tcp_time_stamp - ca->epoch_start);
+       t += msecs_to_jiffies(ca->delay_min >> 3);
        /* change the unit from HZ to bictcp_HZ */
-       t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
-             - ca->epoch_start) << BICTCP_HZ) / HZ;
+       t <<= BICTCP_HZ;
+       do_div(t, HZ);
 
        if (t < ca->bic_K)              /* t - K */
                offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                return;
 
        /* Discard delay samples right after fast recovery */
-       if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+       if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
                return;
 
        delay = (rtt_us << 3) / USEC_PER_MSEC;
index 40ffd72243a4f2d1ec9e5da494b3f2650dd027a6..aeac0dc3635d98458a1f70ec84103c89a3f5a648 100644 (file)
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
                net_adj = 0;
 
        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-                net_adj) & ~(align - 1)) + (net_adj - 2);
+                net_adj) & ~(align - 1)) + net_adj - 2;
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index bff3d821c7ebb7cf7b05342a5e4a9b4ddb14cd4a..c4ff5bbb45c44a1bda8eee4f5d7728338de88a47 100644 (file)
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
 
                        if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
 #ifdef CONFIG_IPV6_SUBTREES
-                               if (fn->subtree)
-                                       fn = fib6_lookup_1(fn->subtree, args + 1);
+                               if (fn->subtree) {
+                                       struct fib6_node *sfn;
+                                       sfn = fib6_lookup_1(fn->subtree,
+                                                           args + 1);
+                                       if (!sfn)
+                                               goto backtrack;
+                                       fn = sfn;
+                               }
 #endif
-                               if (!fn || fn->fn_flags & RTN_RTINFO)
+                               if (fn->fn_flags & RTN_RTINFO)
                                        return fn;
                        }
                }
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
                if (fn->fn_flags & RTN_ROOT)
                        break;
 
index ae31968d42d3b855eebe05dcf0e660b4d367ff8f..cc9e02d79b550106ee07b6aca48ffa9e18081a45 100644 (file)
 #include "led.h"
 
 #define IEEE80211_AUTH_TIMEOUT         (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG    (HZ / 2)
 #define IEEE80211_AUTH_TIMEOUT_SHORT   (HZ / 10)
 #define IEEE80211_AUTH_MAX_TRIES       3
 #define IEEE80211_AUTH_WAIT_ASSOC      (HZ * 5)
 #define IEEE80211_ASSOC_TIMEOUT                (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG   (HZ / 2)
 #define IEEE80211_ASSOC_TIMEOUT_SHORT  (HZ / 10)
 #define IEEE80211_ASSOC_MAX_TRIES      3
 
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_channel *channel,
                             const struct ieee80211_ht_operation *ht_oper,
                             const struct ieee80211_vht_operation *vht_oper,
-                            struct cfg80211_chan_def *chandef, bool verbose)
+                            struct cfg80211_chan_def *chandef, bool tracking)
 {
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct cfg80211_chan_def vht_chandef;
        u32 ht_cfreq, ret;
 
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
                                                  channel->band);
        /* check that channel matches the right operating channel */
-       if (channel->center_freq != ht_cfreq) {
+       if (!tracking && channel->center_freq != ht_cfreq) {
                /*
                 * It's possible that some APs are confused here;
                 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                 * since we look at probe response/beacon data here
                 * it should be OK.
                 */
-               if (verbose)
-                       sdata_info(sdata,
-                                  "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
-                                  channel->center_freq, ht_cfreq,
-                                  ht_oper->primary_chan, channel->band);
+               sdata_info(sdata,
+                          "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+                          channel->center_freq, ht_cfreq,
+                          ht_oper->primary_chan, channel->band);
                ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
                goto out;
        }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                                channel->band);
                break;
        default:
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
                                   vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!cfg80211_chandef_valid(&vht_chandef)) {
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT information is invalid, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
-               if (verbose)
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT information doesn't match HT, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
        if (ret & IEEE80211_STA_DISABLE_VHT)
                vht_chandef = *chandef;
 
+       /*
+        * Ignore the DISABLED flag when we're already connected and only
+        * tracking the APs beacon for bandwidth changes - otherwise we
+        * might get disconnected here if we connect to an AP, update our
+        * regulatory information based on the AP's country IE and the
+        * information we have is wrong/outdated and disables the channel
+        * that we're actually using for the connection to the AP.
+        */
        while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                       IEEE80211_CHAN_DISABLED)) {
+                                       tracking ? 0 :
+                                                  IEEE80211_CHAN_DISABLED)) {
                if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
                        ret = IEEE80211_STA_DISABLE_HT |
                              IEEE80211_STA_DISABLE_VHT;
-                       goto out;
+                       break;
                }
 
                ret |= chandef_downgrade(chandef);
        }
 
-       if (chandef->width != vht_chandef.width && verbose)
+       if (chandef->width != vht_chandef.width && !tracking)
                sdata_info(sdata,
                           "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
 
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
 
        /* calculate new channel (type) based on HT/VHT operation IEs */
        flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
-                                            vht_oper, &chandef, false);
+                                            vht_oper, &chandef, true);
 
        /*
         * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
 
        if (tx_flags == 0) {
                auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
-               ifmgd->auth_data->timeout_started = true;
+               auth_data->timeout_started = true;
                run_again(sdata, auth_data->timeout);
        } else {
-               auth_data->timeout_started = false;
+               auth_data->timeout =
+                       round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+               auth_data->timeout_started = true;
+               run_again(sdata, auth_data->timeout);
        }
 
        return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
                assoc_data->timeout_started = true;
                run_again(sdata, assoc_data->timeout);
        } else {
-               assoc_data->timeout_started = false;
+               assoc_data->timeout =
+                       round_jiffies_up(jiffies +
+                                        IEEE80211_ASSOC_TIMEOUT_LONG);
+               assoc_data->timeout_started = true;
+               run_again(sdata, assoc_data->timeout);
        }
 
        return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
                                                     ht_oper, vht_oper,
-                                                    &chandef, true);
+                                                    &chandef, false);
 
        sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
                                      local->rx_chains);
index 7dcc376eea5f9205d1abf76f62a8d58eb54bc788..2f8010707d015dc62348ca758390c194388d7a8b 100644 (file)
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        __u32 seq, ack, sack, end, win, swin;
        s16 receiver_offset;
-       bool res;
+       bool res, in_recv_win;
 
        /*
         * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
                 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
                 receiver->td_scale);
 
+       /* Is the ending sequence in the receive window (if available)? */
+       in_recv_win = !receiver->td_maxwin ||
+                     after(end, sender->td_end - receiver->td_maxwin - 1);
+
        pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
                 before(seq, sender->td_maxend + 1),
-                after(end, sender->td_end - receiver->td_maxwin - 1),
+                (in_recv_win ? 1 : 0),
                 before(sack, receiver->td_end + 1),
                 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
 
        if (before(seq, sender->td_maxend + 1) &&
-           after(end, sender->td_end - receiver->td_maxwin - 1) &&
+           in_recv_win &&
            before(sack, receiver->td_end + 1) &&
            after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
                /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                        nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
                        "nf_ct_tcp: %s ",
                        before(seq, sender->td_maxend + 1) ?
-                       after(end, sender->td_end - receiver->td_maxwin - 1) ?
+                       in_recv_win ?
                        before(sack, receiver->td_end + 1) ?
                        after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
                        : "ACK is under the lower bound (possible overly delayed ACK)"
index 962e9792e3179997db98a448a76fc909432d841f..d92cc317bf8b25a0c371b770688c860169c9dea8 100644 (file)
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = htons(inst->group_num);
 
+       memset(&pmsg, 0, sizeof(pmsg));
        pmsg.hw_protocol        = skb->protocol;
        pmsg.hook               = hooknum;
 
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
        if (indev && skb->dev &&
            skb->mac_header != skb->network_header) {
                struct nfulnl_msg_packet_hw phw;
-               int len = dev_parse_header(skb, phw.hw_addr);
+               int len;
+
+               memset(&phw, 0, sizeof(phw));
+               len = dev_parse_header(skb, phw.hw_addr);
                if (len > 0) {
                        phw.hw_addrlen = htons(len);
                        if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
index 971ea145ab3ea33a2d42ad0cf66e7025c1457832..8a703c3dd318b660c5e02b326495dbbc7b9f5b3e 100644 (file)
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (indev && entskb->dev &&
            entskb->mac_header != entskb->network_header) {
                struct nfqnl_msg_packet_hw phw;
-               int len = dev_parse_header(entskb, phw.hw_addr);
+               int len;
+
+               memset(&phw, 0, sizeof(phw));
+               len = dev_parse_header(entskb, phw.hw_addr);
                if (len) {
                        phw.hw_addrlen = htons(len);
                        if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
index 7011c71646f0266eb75c856bc49fea7b5030bd52..6113cc7efffcd782cee9d8c2e07ffc399f6e67c4 100644 (file)
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 {
        const struct xt_tcpmss_info *info = par->targinfo;
        struct tcphdr *tcph;
-       unsigned int tcplen, i;
+       int len, tcp_hdrlen;
+       unsigned int i;
        __be16 oldval;
        u16 newmss;
        u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        if (!skb_make_writable(skb, skb->len))
                return -1;
 
-       tcplen = skb->len - tcphoff;
+       len = skb->len - tcphoff;
+       if (len < (int)sizeof(struct tcphdr))
+               return -1;
+
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+       tcp_hdrlen = tcph->doff * 4;
 
-       /* Header cannot be larger than the packet */
-       if (tcplen < tcph->doff*4)
+       if (len < tcp_hdrlen)
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                newmss = info->mss;
 
        opt = (u_int8_t *)tcph;
-       for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
-               if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
-                   opt[i+1] == TCPOLEN_MSS) {
+       for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+               if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
                        u_int16_t oldmss;
 
                        oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        }
 
        /* There is data after the header so the option can't be added
-          without moving it, and doing so may make the SYN packet
-          itself too large. Accept the packet unmodified instead. */
-       if (tcplen > tcph->doff*4)
+        * without moving it, and doing so may make the SYN packet
+        * itself too large. Accept the packet unmodified instead.
+        */
+       if (len > tcp_hdrlen)
                return 0;
 
        /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                newmss = min(newmss, (u16)1220);
 
        opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
-       memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+       memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+                                htons(len), htons(len + TCPOLEN_MSS), 1);
        opt[0] = TCPOPT_MSS;
        opt[1] = TCPOLEN_MSS;
        opt[2] = (newmss & 0xff00) >> 8;
index b68fa191710fe02bdb1b09f825ee6330c9bf570b..625fa1d636a01ccacb43c2fcae902d65a466b9db 100644 (file)
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
        struct tcphdr *tcph;
        u_int16_t n, o;
        u_int8_t *opt;
-       int len;
+       int len, tcp_hdrlen;
 
        /* This is a fragment, no TCP header is available */
        if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
                return NF_DROP;
 
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
-       if (tcph->doff * 4 > len)
+       tcp_hdrlen = tcph->doff * 4;
+
+       if (len < tcp_hdrlen)
                return NF_DROP;
 
        opt  = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
         * Walk through all TCP options - if we find some option to remove,
         * set all octets to %TCPOPT_NOP and adjust checksum.
         */
-       for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+       for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
                optl = optlen(opt, i);
 
-               if (i + optl > tcp_hdrlen(skb))
+               if (i + optl > tcp_hdrlen)
                        break;
 
                if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
index 512718adb0d59df5120e047c69c973556d1c6fb6..f85f8a2ad6cf002fa438bef15597496897d00483 100644 (file)
@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
        struct net *net = sock_net(skb->sk);
        int chains_to_skip = cb->args[0];
        int fams_to_skip = cb->args[1];
+       bool need_locking = chains_to_skip || fams_to_skip;
+
+       if (need_locking)
+               genl_lock();
 
        for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
                n = 0;
@@ -810,6 +814,9 @@ errout:
        cb->args[0] = i;
        cb->args[1] = n;
 
+       if (need_locking)
+               genl_unlock();
+
        return skb->len;
 }
 
index 22c5f399f1cf8119f13064559099ad7c30aae93d..ab101f715447921b01829c493d1cdef195b4400f 100644 (file)
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
 {
        struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
 
+       OVS_CB(skb)->tun_key = NULL;
        return do_execute_actions(dp, skb, acts->actions,
                                         acts->actions_len, false);
 }
index f7e3a0d84c40488cd6744dce31c90bd0d942bd3b..f2ed7600084e896c34d7f5c137a286fd3a05b986 100644 (file)
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
        return 0;
 
-       rtnl_unlock();
-       return 0;
-
 exit_free:
        kfree_skb(reply);
 exit_unlock:
index 5c519b121e1be0ba26c40e136ace7b8eba42f62e..1aa84dc58777b589f80f84f04554a981502bed2c 100644 (file)
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
        struct flex_array *buckets;
        int i, err;
 
-       buckets = flex_array_alloc(sizeof(struct hlist_head *),
+       buckets = flex_array_alloc(sizeof(struct hlist_head),
                                   n_buckets, GFP_KERNEL);
        if (!buckets)
                return NULL;
index 281c1bded1f60f94934e406c9cafe076e4e2706d..51b968d3febb477be1e4183c8a94258c97fdcd18 100644 (file)
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
        return q;
 }
 
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value.  The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell.  If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+       int low       = roundup(r->mpu, 48);
+       int high      = roundup(low+1, 48);
+       int cell_low  = low >> r->cell_log;
+       int cell_high = (high >> r->cell_log) - 1;
+
+       /* rtab is too inaccurate at rates > 100Mbit/s */
+       if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+               pr_debug("TC linklayer: Giving up ATM detection\n");
+               return TC_LINKLAYER_ETHERNET;
+       }
+
+       if ((cell_high > cell_low) && (cell_high < 256)
+           && (rtab[cell_low] == rtab[cell_high])) {
+               pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+                        cell_low, cell_high, rtab[cell_high]);
+               return TC_LINKLAYER_ATM;
+       }
+       return TC_LINKLAYER_ETHERNET;
+}
+
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
                rtab->rate = *r;
                rtab->refcnt = 1;
                memcpy(rtab->data, nla_data(tab), 1024);
+               if (r->linklayer == TC_LINKLAYER_UNAWARE)
+                       r->linklayer = __detect_linklayer(r, rtab->data);
                rtab->next = qdisc_rtab_list;
                qdisc_rtab_list = rtab;
        }
index 4626cef4b76ea631e32d082d34828ffd2e465459..48be3d5c0d9246117c03b0174da16a637129640c 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
 
 unsigned long dev_trans_start(struct net_device *dev)
 {
-       unsigned long val, res = dev->trans_start;
+       unsigned long val, res;
        unsigned int i;
 
+       if (is_vlan_dev(dev))
+               dev = vlan_dev_real_dev(dev);
+       res = dev->trans_start;
        for (i = 0; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
                if (val && time_after(val, res))
                        res = val;
        }
        dev->trans_start = res;
+
        return res;
 }
 EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
        memset(r, 0, sizeof(*r));
        r->overhead = conf->overhead;
        r->rate_bytes_ps = conf->rate;
+       r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
        r->mult = 1;
        /*
         * The deal here is to replace a divide by a reciprocal one
index 45e751527dfcc57ed533c4b259bbf5c56e093cfc..c2178b15ca6e0bb0ed60eb885cc94177fd5b8e95 100644 (file)
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)*arg, *parent;
        struct nlattr *opt = tca[TCA_OPTIONS];
+       struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_opt *hopt;
 
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        if (!hopt->rate.rate || !hopt->ceil.rate)
                goto failure;
 
+       /* Keeping backward compatible with rate_table based iproute2 tc */
+       if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+               rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+               if (rtab)
+                       qdisc_put_rtab(rtab);
+       }
+       if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+               ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+               if (ctab)
+                       qdisc_put_rtab(ctab);
+       }
+
        if (!cl) {              /* new class */
                struct Qdisc *new_q;
                int prio;
index bce5b79662a62b8bdc5e7c895a6ea4bcd147ed86..ab67efc64b2490efba497e93db105bf9ed25b117 100644 (file)
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                else
                        spc_state = SCTP_ADDR_AVAILABLE;
                /* Don't inform ULP about transition from PF to
-                * active state and set cwnd to 1, see SCTP
+                * active state and set cwnd to 1 MTU, see SCTP
                 * Quick failover draft section 5.1, point 5
                 */
                if (transport->state == SCTP_PF) {
                        ulp_notify = false;
-                       transport->cwnd = 1;
+                       transport->cwnd = asoc->pathmtu;
                }
                transport->state = SCTP_ACTIVE;
                break;
index bdbbc3fd7c14e9fa2c31d4faf7de9b7b64540292..8fdd16046d668b0c6cf927e5e87754f2ebfaf583 100644 (file)
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
                return;
        }
 
-       call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
        sctp_packet_free(&transport->packet);
 
        if (transport->asoc)
                sctp_association_put(transport->asoc);
+
+       call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
 }
 
 /* Start T3_rtx timer if it is not already running and update the heartbeat
index 74f6a704e37418cc9b8d4a9b6679f3dd8825da21..ecbc4e3d83ad3f816caa51b6ec942461dac9f090 100644 (file)
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
                task->tk_action = call_connect_status;
                if (task->tk_status < 0)
                        return;
+               if (task->tk_flags & RPC_TASK_NOCONNECT) {
+                       rpc_exit(task, -ENOTCONN);
+                       return;
+               }
                xprt_connect(task);
        }
 }
index 74d948f5d5a1d399d13061ee5c910c2cacc6f720..779742cfc1ffe3178fc0c320b6d0832e0f420d64 100644 (file)
@@ -23,6 +23,7 @@ struct sunrpc_net {
        struct rpc_clnt *rpcb_local_clnt4;
        spinlock_t rpcb_clnt_lock;
        unsigned int rpcb_users;
+       unsigned int rpcb_is_af_local : 1;
 
        struct mutex gssp_lock;
        wait_queue_head_t gssp_wq;
index 3df764dc330cbdb398e7dbbd5c6558d894bfc386..1891a1022c17e9d7d4c1686ff30c2ef13ebbf7b8 100644 (file)
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
 }
 
 static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
-                       struct rpc_clnt *clnt4)
+                       struct rpc_clnt *clnt4,
+                       bool is_af_local)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
        /* Protected by rpcb_create_local_mutex */
        sn->rpcb_local_clnt = clnt;
        sn->rpcb_local_clnt4 = clnt4;
+       sn->rpcb_is_af_local = is_af_local ? 1 : 0;
        smp_wmb(); 
        sn->rpcb_users = 1;
        dprintk("RPC:       created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
                .program        = &rpcb_program,
                .version        = RPCBVERS_2,
                .authflavor     = RPC_AUTH_NULL,
+               /*
+                * We turn off the idle timeout to prevent the kernel
+                * from automatically disconnecting the socket.
+                * Otherwise, we'd have to cache the mount namespace
+                * of the caller and somehow pass that to the socket
+                * reconnect code.
+                */
+               .flags          = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
        };
        struct rpc_clnt *clnt, *clnt4;
        int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
                clnt4 = NULL;
        }
 
-       rpcb_set_local(net, clnt, clnt4);
+       rpcb_set_local(net, clnt, clnt4, true);
 
 out:
        return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
                clnt4 = NULL;
        }
 
-       rpcb_set_local(net, clnt, clnt4);
+       rpcb_set_local(net, clnt, clnt4, false);
 
 out:
        return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
        return rpc_create(&args);
 }
 
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
 {
-       int result, error = 0;
+       int flags = RPC_TASK_NOCONNECT;
+       int error, result = 0;
 
+       if (is_set || !sn->rpcb_is_af_local)
+               flags = RPC_TASK_SOFTCONN;
        msg->rpc_resp = &result;
 
-       error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+       error = rpc_call_sync(clnt, msg, flags);
        if (error < 0) {
                dprintk("RPC:       failed to contact local rpcbind "
                                "server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
                .rpc_argp       = &map,
        };
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+       bool is_set = false;
 
        dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
                        "rpcbind\n", (port ? "" : "un"),
                        prog, vers, prot, port);
 
        msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
-       if (port)
+       if (port != 0) {
                msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+               is_set = true;
+       }
 
-       return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+       return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
 }
 
 /*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
        const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
        struct rpcbind_args *map = msg->rpc_argp;
        unsigned short port = ntohs(sin->sin_port);
+       bool is_set = false;
        int result;
 
        map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
                        map->r_addr, map->r_netid);
 
        msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
-       if (port)
+       if (port != 0) {
                msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+               is_set = true;
+       }
 
-       result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+       result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
        kfree(map->r_addr);
        return result;
 }
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
        const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
        struct rpcbind_args *map = msg->rpc_argp;
        unsigned short port = ntohs(sin6->sin6_port);
+       bool is_set = false;
        int result;
 
        map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
                        map->r_addr, map->r_netid);
 
        msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
-       if (port)
+       if (port != 0) {
                msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+               is_set = true;
+       }
 
-       result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+       result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
        kfree(map->r_addr);
        return result;
 }
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
        map->r_addr = "";
        msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
 
-       return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+       return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
 }
 
 /**
index cb29ef7ba2f0b1d285c079d87ddf8c5a07c118af..609c30c808165191a69899e750439666addc29f8 100644 (file)
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
 {
        struct tipc_link *l_ptr;
        struct tipc_link *temp_l_ptr;
+       struct tipc_link_req *temp_req;
 
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
-       if (b_ptr->link_req)
-               tipc_disc_delete(b_ptr->link_req);
+       temp_req = b_ptr->link_req;
+       b_ptr->link_req = NULL;
        spin_unlock_bh(&b_ptr->lock);
+
+       if (temp_req)
+               tipc_disc_delete(temp_req);
+
        memset(b_ptr, 0, sizeof(struct tipc_bearer));
 }
 
index 593071dabd1cc7477169b24bc31979d8590465b6..4d9334683f8409c3e064c44a25235ea48e65b38b 100644 (file)
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
        for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
                struct vsock_sock *vsk;
                list_for_each_entry(vsk, &vsock_connected_table[i],
-                                   connected_table);
+                                   connected_table)
                        fn(sk_vsock(vsk));
        }
 
index 4f9f216665e9d4d6753172f3c14d7bbdd3f53154..a8c29fa4f1b3b90539f5370b9084056c5774812a 100644 (file)
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
                cfg80211_leave_mesh(rdev, dev);
                break;
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
                cfg80211_stop_ap(rdev, dev);
                break;
        default:
index 25d217d90807f05f6a56945cb89dcd7b3314a53d..3fcba69817e579244e836b2e2d39a1aab14cc210 100644 (file)
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        goto out_unlock;
                }
                *rdev = wiphy_to_dev((*wdev)->wiphy);
-               cb->args[0] = (*rdev)->wiphy_idx;
+               /* 0 is the first index - add 1 to parse only once */
+               cb->args[0] = (*rdev)->wiphy_idx + 1;
                cb->args[1] = (*wdev)->identifier;
        } else {
-               struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
+               /* subtract the 1 again here */
+               struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
                if (!wiphy) {
index 3f7682a387b730b9c75fc6e547de98d512b46dbb..eefbd10e408f18b87d35c35d729fabf088734d65 100644 (file)
@@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
  *
  * Create or update the port list entry
  */
-static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
                                int act)
 {
        __be16 *bep;
        __be32 *be32p;
-       struct sockaddr_in6 *addr6;
        struct smk_port_label *spp;
        struct socket_smack *ssp = sk->sk_security;
        struct smack_known *skp;
@@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
        /*
         * Get the IP address and port from the address.
         */
-       addr6 = (struct sockaddr_in6 *)address;
-       port = ntohs(addr6->sin6_port);
-       bep = (__be16 *)(&addr6->sin6_addr);
-       be32p = (__be32 *)(&addr6->sin6_addr);
+       port = ntohs(address->sin6_port);
+       bep = (__be16 *)(&address->sin6_addr);
+       be32p = (__be32 *)(&address->sin6_addr);
 
        /*
         * It's remote, so port lookup does no good.
@@ -2060,9 +2058,9 @@ auditout:
        ad.a.u.net->family = sk->sk_family;
        ad.a.u.net->dport = port;
        if (act == SMK_RECEIVING)
-               ad.a.u.net->v6info.saddr = addr6->sin6_addr;
+               ad.a.u.net->v6info.saddr = address->sin6_addr;
        else
-               ad.a.u.net->v6info.daddr = addr6->sin6_addr;
+               ad.a.u.net->v6info.daddr = address->sin6_addr;
 #endif
        return smk_access(skp, object, MAY_WRITE, &ad);
 }
@@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
        case PF_INET6:
                if (addrlen < sizeof(struct sockaddr_in6))
                        return -EINVAL;
-               rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING);
+               rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+                                               SMK_CONNECTING);
                break;
        }
        return rc;
@@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
                                int size)
 {
        struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
-       struct sockaddr *sap = (struct sockaddr *) msg->msg_name;
+       struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
        int rc = 0;
 
        /*
@@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
        return smack_net_ambient;
 }
 
-static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
 {
-       struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
        u8 nexthdr;
        int offset;
        int proto = -EINVAL;
@@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
        struct netlbl_lsm_secattr secattr;
        struct socket_smack *ssp = sk->sk_security;
        struct smack_known *skp;
-       struct sockaddr sadd;
+       struct sockaddr_in6 sadd;
        int rc = 0;
        struct smk_audit_info ad;
 #ifdef CONFIG_AUDIT
index 8e77cbbad871dd4ff10263824d016487b1479813..e3c7ba8d7582643f9d1f52681251e16e3835d042 100644 (file)
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
 }
 
 #define nid_has_mute(codec, nid, dir) \
-       check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
+       check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
 #define nid_has_volume(codec, nid, dir) \
        check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
 
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
                if (enable)
                        val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
        }
-       if (caps & AC_AMPCAP_MUTE) {
+       if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
                if (!enable)
                        val |= HDA_AMP_MUTE;
        }
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
 {
        unsigned int mask = 0xff;
 
-       if (caps & AC_AMPCAP_MUTE) {
+       if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
                if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
                        mask &= ~0x80;
        }
index 8bd2261498680676c0b62b7861fbd5be2aa4daac..f303cd898515d5f20000ec5138400bfd93e63d85 100644 (file)
@@ -1031,6 +1031,7 @@ enum {
        ALC880_FIXUP_GPIO2,
        ALC880_FIXUP_MEDION_RIM,
        ALC880_FIXUP_LG,
+       ALC880_FIXUP_LG_LW25,
        ALC880_FIXUP_W810,
        ALC880_FIXUP_EAPD_COEF,
        ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = {
                        { }
                }
        },
+       [ALC880_FIXUP_LG_LW25] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x0181344f }, /* line-in */
+                       { 0x1b, 0x0321403f }, /* headphone */
+                       { }
+               }
+       },
        [ALC880_FIXUP_W810] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
        SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
        SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+       SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
        SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
 
        /* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
index 987f728718c52cad7ee2a5f1c5dd8dc2fc6d3f02..be2ba1b6fe4a3a344b062a7f693f8b1f83cce0ae 100644 (file)
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
 static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
 
+static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
+
 static const unsigned int limiter_tlv[] = {
        TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
        SOC_ENUM("Beep Pitch", beep_pitch_enum),
        SOC_ENUM("Beep on Time", beep_ontime_enum),
        SOC_ENUM("Beep off Time", beep_offtime_enum),
-       SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
+       SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
+                       0, 0x07, 0x1f, beep_tlv),
        SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
        SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
        SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
index 6c8a9e7bee25c837cd35dc260847e1c778a811b6..760e8bfeacaadef370eb0ce30e1ce5ad124da295 100644 (file)
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
 static int power_vag_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *kcontrol, int event)
 {
+       const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
-               snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
-                       SGTL5000_VAG_POWERUP, 0);
-               msleep(400);
+               /*
+                * Don't clear VAG_POWERUP, when both DAC and ADC are
+                * operational to prevent inadvertently starving the
+                * other one of them.
+                */
+               if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
+                               mask) != mask) {
+                       snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+                               SGTL5000_VAG_POWERUP, 0);
+                       msleep(400);
+               }
                break;
        default:
                break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
        SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
        SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
                        SGTL5000_CHIP_ANA_ADC_CTRL,
-                       8, 2, 0, capture_6db_attenuate),
+                       8, 1, 0, capture_6db_attenuate),
        SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
 
        SOC_DOUBLE_TLV("Headphone Playback Volume",
index bd16010441cc16fdb26aba40ae5326c427705e81..4375c9f2b791006717f807cdddcb343bb0f8074c 100644 (file)
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
                return -EINVAL;
        }
 
-       path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-                               list_sink);
-       if (!path) {
+       if (list_empty(&w->sources)) {
                dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
                return -EINVAL;
        }
 
+       path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
+                               list_sink);
+
        ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
        if (ret < 0)
                return ret;
index d04146cad61f4f2dd0227815d0f25970e22e4cae..47565fd045057344390592bb2a8d7a5374cc2209 100644 (file)
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
                reg = TEGRA30_I2S_CIF_RX_CTRL;
        } else {
                val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
-               reg = TEGRA30_I2S_CIF_RX_CTRL;
+               reg = TEGRA30_I2S_CIF_TX_CTRL;
        }
 
        regmap_write(i2s->regmap, reg, val);
index 9e6e3ffd86bbbc4212e72eb3d0c7cd69e6d44c8e..23452ee617e11a5d304fffbd0da3e7bea805e0dc 100644 (file)
@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
 static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
                u8 reg, u8 value)
 {
-       u8 buffer[13]; /* 13: maximum length of message */
+       u8 *buffer;
+       int ret;
+
+       /* 13: maximum length of message */
+       buffer = kmalloc(13, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
 
        usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
-       return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+       ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+       kfree(buffer);
+       return ret;
 }
 
 static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
                u8 reg, u8 vl, u8 vh)
 {
-       u8 buffer[13]; /* 13: maximum length of message */
+       u8 *buffer;
+       int ret;
+
+       /* 13: maximum length of message */
+       buffer = kmalloc(13, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
 
        usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
-       return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+       ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+       kfree(buffer);
+       return ret;
 }
 
 int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip)
        if (!rt)
                return -ENOMEM;
 
+       rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
+       if (!rt->receiver_buffer) {
+               kfree(rt);
+               return -ENOMEM;
+       }
+
        urb = &rt->receiver;
        rt->serial = 1;
        rt->chip = chip;
@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip)
        urb->interval = 1;
        ret = usb_submit_urb(urb, GFP_KERNEL);
        if (ret < 0) {
+               kfree(rt->receiver_buffer);
                kfree(rt);
                snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
                return ret;
@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
 
 void usb6fire_comm_destroy(struct sfire_chip *chip)
 {
-       kfree(chip->comm);
+       struct comm_runtime *rt = chip->comm;
+
+       kfree(rt->receiver_buffer);
+       kfree(rt);
        chip->comm = NULL;
 }
index 6a0840b0dcff2be78366119126e5842aa4ab325f..780d5ed8e5d8a39ff9bec150d9840fcbbef713d8 100644 (file)
@@ -24,7 +24,7 @@ struct comm_runtime {
        struct sfire_chip *chip;
 
        struct urb receiver;
-       u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+       u8 *receiver_buffer;
 
        u8 serial; /* urb serial */
 
index 26722423330dd283b62d3fd4c5b77409913372cb..f3dd7266c391c7f17dae6220ed094f548a1d4507 100644 (file)
 #include "chip.h"
 #include "comm.h"
 
+enum {
+       MIDI_BUFSIZE = 64
+};
+
 static void usb6fire_midi_out_handler(struct urb *urb)
 {
        struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
        if (!rt)
                return -ENOMEM;
 
+       rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
+       if (!rt->out_buffer) {
+               kfree(rt);
+               return -ENOMEM;
+       }
+
        rt->chip = chip;
        rt->in_received = usb6fire_midi_in_received;
        rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
 
        ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
        if (ret < 0) {
+               kfree(rt->out_buffer);
                kfree(rt);
                snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
                return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
 
 void usb6fire_midi_destroy(struct sfire_chip *chip)
 {
-       kfree(chip->midi);
+       struct midi_runtime *rt = chip->midi;
+
+       kfree(rt->out_buffer);
+       kfree(rt);
        chip->midi = NULL;
 }
index c321006e5430ac763e593ecee6314c6cee8fd2d6..84851b9f55592f2ddc4b35781e1efa20cac5cd02 100644 (file)
 
 #include "common.h"
 
-enum {
-       MIDI_BUFSIZE = 64
-};
-
 struct midi_runtime {
        struct sfire_chip *chip;
        struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
        struct snd_rawmidi_substream *out;
        struct urb out_urb;
        u8 out_serial; /* serial number of out packet */
-       u8 out_buffer[MIDI_BUFSIZE];
+       u8 *out_buffer;
        int buffer_offset;
 
        void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
index 3d2551cc10f2f6fc191acea154ff2a2bc303b09e..b5eb97fdc842f79893adc8d359c4e519df82d700 100644 (file)
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
        urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
 }
 
+static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
+{
+       int i;
+
+       for (i = 0; i < PCM_N_URBS; i++) {
+               rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+                               * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+               if (!rt->out_urbs[i].buffer)
+                       return -ENOMEM;
+               rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+                               * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+               if (!rt->in_urbs[i].buffer)
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
+{
+       int i;
+
+       for (i = 0; i < PCM_N_URBS; i++) {
+               kfree(rt->out_urbs[i].buffer);
+               kfree(rt->in_urbs[i].buffer);
+       }
+}
+
 int usb6fire_pcm_init(struct sfire_chip *chip)
 {
        int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
        if (!rt)
                return -ENOMEM;
 
+       ret = usb6fire_pcm_buffers_init(rt);
+       if (ret) {
+               usb6fire_pcm_buffers_destroy(rt);
+               kfree(rt);
+               return ret;
+       }
+
        rt->chip = chip;
        rt->stream_state = STREAM_DISABLED;
        rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
 
        ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
        if (ret < 0) {
+               usb6fire_pcm_buffers_destroy(rt);
                kfree(rt);
                snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
                return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
 
        if (ret) {
+               usb6fire_pcm_buffers_destroy(rt);
                kfree(rt);
                snd_printk(KERN_ERR PREFIX
                                "error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
 
 void usb6fire_pcm_destroy(struct sfire_chip *chip)
 {
-       kfree(chip->pcm);
+       struct pcm_runtime *rt = chip->pcm;
+
+       usb6fire_pcm_buffers_destroy(rt);
+       kfree(rt);
        chip->pcm = NULL;
 }
index 9b01133ee3fe9c22d3f51557430f011846e5bc8a..f5779d6182c638b02253af7bc9ab73dfed7e1916 100644 (file)
@@ -32,7 +32,7 @@ struct pcm_urb {
        struct urb instance;
        struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
        /* END DO NOT SEPARATE */
-       u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+       u8 *buffer;
 
        struct pcm_urb *peer;
 };
index 7a444b5501d91d28a4e7e8fb5e044c2b4e7ea41b..659950e5b94f6b411577a84821f1a1c4d93b4593 100644 (file)
@@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
        ep->stride = frame_bits >> 3;
        ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
 
-       /* calculate max. frequency */
-       if (ep->maxpacksize) {
+       /* assume max. frequency is 25% higher than nominal */
+       ep->freqmax = ep->freqn + (ep->freqn >> 2);
+       maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+                               >> (16 - ep->datainterval);
+       /* but wMaxPacketSize might reduce this */
+       if (ep->maxpacksize && ep->maxpacksize < maxsize) {
                /* whatever fits into a max. size packet */
                maxsize = ep->maxpacksize;
                ep->freqmax = (maxsize / (frame_bits >> 3))
                                << (16 - ep->datainterval);
-       } else {
-               /* no max. packet size: just take 25% higher than nominal */
-               ep->freqmax = ep->freqn + (ep->freqn >> 2);
-               maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
-                               >> (16 - ep->datainterval);
        }
 
        if (ep->fill_max)
index d5438083fd6a8072c20275778f1f3b9fa0a42517..95558ef4a7a09c72ed52c4706348d765a40c4314 100644 (file)
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+       case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
        case USB_ID(0x046d, 0x0991):
        /* Most audio usb devices lie about volume resolution.
         * Most Logitech webcams have res = 384.
index 1bc45e71f1fe0217d25ad6b85edd396458cf123d..0df9ede99dfd798730376574511fa1041f2f528c 100644 (file)
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
        if (altsd->bNumEndpoints < 1)
                return -ENODEV;
        epd = get_endpoint(alts, 0);
-       if (!usb_endpoint_xfer_bulk(epd) ||
+       if (!usb_endpoint_xfer_bulk(epd) &&
            !usb_endpoint_xfer_int(epd))
                return -ENODEV;
 
        switch (USB_ID_VENDOR(chip->usb_id)) {
        case 0x0499: /* Yamaha */
                err = create_yamaha_midi_quirk(chip, iface, driver, alts);
-               if (err < 0 && err != -ENODEV)
+               if (err != -ENODEV)
                        return err;
                break;
        case 0x0582: /* Roland */
                err = create_roland_midi_quirk(chip, iface, driver, alts);
-               if (err < 0 && err != -ENODEV)
+               if (err != -ENODEV)
                        return err;
                break;
        }