]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Oct 2014 07:39:08 +0000 (09:39 +0200)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Oct 2014 07:39:08 +0000 (09:39 +0200)
Pull drm updates from Dave Airlie:
 "This is the main git pull for the drm,

  I pretty much froze major pulls at -rc5/6 time, and haven't had much
  fallout, so will probably continue doing that.

  Lots of changes all over, big internal header cleanup to make it clear
  drm features are legacy things and what are things that modern KMS
  drivers should be using.  Also big move to use the new generic fences
  in all the TTM drivers.

  core:
        atomic prep work,
        vblank rework changes, allows immediate vblank disables
        major header reworking and cleanups to better delinate legacy
        interfaces from what KMS drivers should be using.
        cursor planes locking fixes

  ttm:
        move to generic fences (affects all TTM drivers)
        ppc64 caching fixes

  radeon:
        userptr support,
        uvd for old asics,
        reset rework for fence changes
        better buffer placement changes,
        dpm feature enablement
        hdmi audio support fixes

  intel:
        Cherryview work,
        180 degree rotation,
        skylake prep work,
        execlist command submission
        full ppgtt prep work
        cursor improvements
        edid caching,
        vdd handling improvements

  nouveau:
        fence reworking
        kepler memory clock work
        gt21x clock work
        fan control improvements
        hdmi infoframe fixes
        DP audio

  ast:
        ppc64 fixes
        caching fix

  rcar:
        rcar-du DT support

  ipuv3:
        prep work for capture support

  msm:
        LVDS support for mdp4, new panel, gpu refactoring

  exynos:
        exynos3250 SoC support, drop bad mmap interface,
        mipi dsi changes, and component match support"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits)
  drm/mst: rework payload table allocation to conform better.
  drm/ast: Fix HW cursor image
  drm/radeon/kv: add uvd/vce info to dpm debugfs output
  drm/radeon/ci: add uvd/vce info to dpm debugfs output
  drm/radeon: export reservation_object from dmabuf to ttm
  drm/radeon: cope with foreign fences inside the reservation object
  drm/radeon: cope with foreign fences inside display
  drm/core: use helper to check driver features
  drm/radeon/cik: write gfx ucode version to ucode addr reg
  drm/radeon/si: print full CS when we hit a packet 0
  drm/radeon: remove unecessary includes
  drm/radeon/combios: declare legacy_connector_convert as static
  drm/radeon/atombios: declare connector convert tables as static
  drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table
  drm/radeon/dpm: drop clk/voltage dependency filters for BTC
  drm/radeon/dpm: drop clk/voltage dependency filters for CI
  drm/radeon/dpm: drop clk/voltage dependency filters for SI
  drm/radeon/dpm: drop clk/voltage dependency filters for NI
  drm/radeon: disable audio when we disable hdmi (v2)
  drm/radeon: split audio enable between eg and r600 (v2)
  ...

39 files changed:
1  2 
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
arch/arm/boot/dts/exynos3250.dtsi
arch/arm/mach-shmobile/board-koelsch-reference.c
arch/arm/mach-shmobile/board-koelsch.c
arch/arm/mach-shmobile/board-lager-reference.c
arch/arm/mach-shmobile/board-lager.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/si.c
drivers/staging/imx-drm/imx-drm-core.c

index f67e3f84e8bc45077c51f68766eceba16215857f,2b5648b1ce1d8769d6cc2971e9b773eed06d6260..c7bb11be15a38ce36482bd68e93427293b064d18
@@@ -14,7 -14,6 +14,7 @@@ allwinner     Allwinner Technology Co., Ltd
  altr  Altera Corp.
  amcc  Applied Micro Circuits Corporation (APM, formally AMCC)
  amd   Advanced Micro Devices (AMD), Inc.
 +amlogic       Amlogic, Inc.
  ams   AMS AG
  amstaos       AMS-Taos Inc.
  apm   Applied Micro Circuits Corporation (APM)
@@@ -30,7 -29,6 +30,7 @@@ calxeda       Calxed
  capella       Capella Microsystems, Inc
  cavium        Cavium, Inc.
  cdns  Cadence Design Systems Inc.
 +chipidea      Chipidea, Inc
  chrp  Common Hardware Reference Platform
  chunghwa      Chunghwa Picture Tubes Ltd.
  cirrus        Cirrus Logic, Inc.
@@@ -40,30 -38,24 +40,30 @@@ dallas     Maxim Integrated Products (forme
  davicom       DAVICOM Semiconductor, Inc.
  denx  Denx Software Engineering
  digi  Digi International Inc.
 +dlg   Dialog Semiconductor
  dlink D-Link Corporation
  dmo   Data Modul AG
  ebv   EBV Elektronik
  edt   Emerging Display Technologies
  emmicro       EM Microelectronic
 +energymicro   Silicon Laboratories (formerly Energy Micro AS)
  epcos EPCOS AG
  epfl  Ecole Polytechnique Fédérale de Lausanne
  epson Seiko Epson Corp.
  est   ESTeem Wireless Modems
  eukrea  Eukréa Electromatique
 +everest       Everest Semiconductor Co. Ltd.
  excito        Excito
 +fcs   Fairchild Semiconductor
  fsl   Freescale Semiconductor
  GEFanuc       GE Fanuc Intelligent Platforms Embedded Systems, Inc.
  gef   GE Fanuc Intelligent Platforms Embedded Systems, Inc.
 +geniatech     Geniatech, Inc.
  globalscale   Globalscale Technologies, Inc.
  gmt   Global Mixed-mode Technology, Inc.
  google        Google, Inc.
  gumstix       Gumstix, Inc.
 +gw    Gateworks Corporation
  haoyu Haoyu Microelectronic Co. Ltd.
  hisilicon     Hisilicon Limited.
  honeywell     Honeywell
@@@ -73,7 -65,6 +73,7 @@@ ibm   International Business Machines (IB
  idt   Integrated Device Technologies, Inc.
  iom   Iomega Corporation
  img   Imagination Technologies Ltd.
 +innolux       Innolux Corporation
  intel Intel Corporation
  intercontrol  Inter Control Group
  isee  ISEE 2007 S.L.
@@@ -92,6 -83,7 +92,7 @@@ maxim Maxim Integrated Product
  mediatek      MediaTek Inc.
  micrel        Micrel Inc.
  microchip     Microchip Technology Inc.
+ mitsubishi    Mitsubishi Electric Corporation
  mosaixtech    Mosaix Technologies, Inc.
  moxa  Moxa
  mpl   MPL AG
@@@ -133,9 -125,7 +134,9 @@@ sil        Silicon Imag
  silabs        Silicon Laboratories
  simtek
  sii   Seiko Instruments, Inc.
 +silergy       Silergy Corp.
  sirf  SiRF Technology, Inc.
 +sitronix      Sitronix Technology Corporation
  smsc  Standard Microsystems Corporation
  snps  Synopsys, Inc.
  solidrun      SolidRun
@@@ -144,12 -134,13 +145,13 @@@ st      STMicroelectronic
  ste   ST-Ericsson
  stericsson    ST-Ericsson
  synology      Synology, Inc.
+ thine THine Electronics, Inc.
  ti    Texas Instruments
  tlm   Trusted Logic Mobility
  toradex       Toradex AG
  toshiba       Toshiba Corporation
  toumaz        Toumaz
 -usi   Universal Scientifc Industrial Co., Ltd.
 +usi   Universal Scientific Industrial Co., Ltd.
  v3    V3 Semiconductor
  variscite     Variscite Ltd.
  via   VIA Technologies, Inc.
@@@ -158,7 -149,6 +160,7 @@@ winbond Winbond Electronics corp
  wlf   Wolfson Microelectronics
  wm    Wondermedia Technologies, Inc.
  xes   Extreme Engineering Solutions (X-ES)
 +xillybus      Xillybus Ltd.
  xlnx  Xilinx
  zyxel ZyXEL Communications Corp.
  zarlink       Zarlink Semiconductor
diff --combined MAINTAINERS
index c52367997fb5bcca51b1483667918bff36383017,dd31933400e8d49b1e8de6c19da5d4be72fbd179..ee1bc5bc20ad221d3d2af470ae2b55f2daf76db0
@@@ -152,9 -152,8 +152,9 @@@ F: drivers/scsi/53c700
  
  6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
  M:    Alexander Aring <alex.aring@gmail.com>
 -L:    linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 +M:    Jukka Rissanen <jukka.rissanen@linux.intel.com>
  L:    linux-bluetooth@vger.kernel.org
 +L:    linux-wpan@vger.kernel.org
  S:    Maintained
  F:    net/6lowpan/
  F:    include/net/6lowpan.h
@@@ -686,17 -685,6 +686,17 @@@ L:       alsa-devel@alsa-project.org (moderat
  W:    http://blackfin.uclinux.org/
  S:    Supported
  F:    sound/soc/blackfin/*
 + 
 +ANALOG DEVICES INC IIO DRIVERS
 +M:    Lars-Peter Clausen <lars@metafoo.de>
 +M:    Michael Hennerich <Michael.Hennerich@analog.com>
 +W:    http://wiki.analog.com/
 +W:    http://ez.analog.com/community/linux-device-drivers
 +S:    Supported
 +F:    drivers/iio/*/ad*
 +X:    drivers/iio/*/adjd*
 +F:    drivers/staging/iio/*/ad*
 +F:    staging/iio/trigger/iio-trig-bfin-timer.c
  
  AOA (Apple Onboard Audio) ALSA DRIVER
  M:    Johannes Berg <johannes@sipsolutions.net>
@@@ -734,6 -722,7 +734,6 @@@ F: net/appletalk
  APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
  M:    Iyappan Subramanian <isubramanian@apm.com>
  M:    Keyur Chudgar <kchudgar@apm.com>
 -M:    Ravi Patel <rapatel@apm.com>
  S:    Supported
  F:    drivers/net/ethernet/apm/xgene/
  F:    Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@@ -846,12 -835,6 +846,12 @@@ M:       Emilio López <emilio@elopez.com.ar
  S:    Maintained
  F:    drivers/clk/sunxi/
  
 +ARM/Amlogic MesonX SoC support
 +M:    Carlo Caione <carlo@caione.org>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +N:    meson[x68]
 +
  ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
  M:    Andrew Victor <linux@maxim.org.za>
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
@@@ -878,9 -861,10 +878,9 @@@ S:        Maintaine
  F:    arch/arm/mach-highbank/
  
  ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
 -M:    Anton Vorontsov <anton@enomsg.org>
 +M:    Krzysztof Halasa <khalasa@piap.pl>
  S:    Maintained
  F:    arch/arm/mach-cns3xxx/
 -T:    git git://git.infradead.org/users/cbou/linux-cns3xxx.git
  
  ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
  M:    Alexander Shiyan <shc_work@mail.ru>
@@@ -980,7 -964,7 +980,7 @@@ F: arch/arm/include/asm/hardware/dec212
  F:    arch/arm/mach-footbridge/
  
  ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 -M:    Shawn Guo <shawn.guo@freescale.com>
 +M:    Shawn Guo <shawn.guo@linaro.org>
  M:    Sascha Hauer <kernel@pengutronix.de>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -1041,20 -1025,24 +1041,20 @@@ F:   arch/arm/mach-pxa/colibri-pxa270-inc
  
  ARM/INTEL IOP32X ARM ARCHITECTURE
  M:    Lennert Buytenhek <kernel@wantstofly.org>
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
  ARM/INTEL IOP33X ARM ARCHITECTURE
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -S:    Maintained
 +S:    Orphan
  
  ARM/INTEL IOP13XX ARM ARCHITECTURE
  M:    Lennert Buytenhek <kernel@wantstofly.org>
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
  ARM/INTEL IQ81342EX MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
@@@ -1065,7 -1053,7 +1065,7 @@@ S:      Maintaine
  
  ARM/INTEL IXP4XX ARM ARCHITECTURE
  M:    Imre Kaloz <kaloz@openwrt.org>
 -M:    Krzysztof Halasa <khc@pm.waw.pl>
 +M:    Krzysztof Halasa <khalasa@piap.pl>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-ixp4xx/
@@@ -1079,6 -1067,7 +1079,6 @@@ F:      drivers/pcmcia/pxa2xx_stargate2.
  
  ARM/INTEL XSC3 (MANZANO) ARM CORE
  M:    Lennert Buytenhek <kernel@wantstofly.org>
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
@@@ -1161,16 -1150,6 +1161,16 @@@ W:    http://www.digriz.org.uk/ts78xx/kern
  S:    Maintained
  F:    arch/arm/mach-orion5x/ts78xx-*
  
 +ARM/Mediatek SoC support
 +M:    Matthias Brugger <matthias.bgg@gmail.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    arch/arm/boot/dts/mt6*
 +F:    arch/arm/boot/dts/mt8*
 +F:    arch/arm/mach-mediatek/
 +N:    mtk
 +K:    mediatek
 +
  ARM/MICREL KS8695 ARCHITECTURE
  M:    Greg Ungerer <gerg@uclinux.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1338,7 -1317,8 +1338,7 @@@ ARM/SAMSUNG MOBILE MACHINE SUPPOR
  M:    Kyungmin Park <kyungmin.park@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -F:    arch/arm/mach-s5pv210/mach-aquila.c
 -F:    arch/arm/mach-s5pv210/mach-goni.c
 +F:    arch/arm/mach-s5pv210/
  
  ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
  M:    Kyungmin Park <kyungmin.park@samsung.com>
@@@ -1381,6 -1361,7 +1381,6 @@@ F:      arch/arm/boot/dts/sh
  F:    arch/arm/configs/ape6evm_defconfig
  F:    arch/arm/configs/armadillo800eva_defconfig
  F:    arch/arm/configs/bockw_defconfig
 -F:    arch/arm/configs/genmai_defconfig
  F:    arch/arm/configs/koelsch_defconfig
  F:    arch/arm/configs/kzm9g_defconfig
  F:    arch/arm/configs/lager_defconfig
@@@ -1391,23 -1372,15 +1391,23 @@@ F:   arch/arm/mach-shmobile
  F:    drivers/sh/
  
  ARM/SOCFPGA ARCHITECTURE
 -M:    Dinh Nguyen <dinguyen@altera.com>
 +M:    Dinh Nguyen <dinguyen@opensource.altera.com>
  S:    Maintained
  F:    arch/arm/mach-socfpga/
 +W:    http://www.rocketboards.org
 +T:    git://git.rocketboards.org/linux-socfpga.git
 +T:    git://git.rocketboards.org/linux-socfpga-next.git
  
  ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
 -M:    Dinh Nguyen <dinguyen@altera.com>
 +M:    Dinh Nguyen <dinguyen@opensource.altera.com>
  S:    Maintained
  F:    drivers/clk/socfpga/
  
 +ARM/SOCFPGA EDAC SUPPORT
 +M:    Thor Thayer <tthayer@opensource.altera.com>
 +S:    Maintained
 +F:    drivers/edac/altera_edac.
 +
  ARM/STI ARCHITECTURE
  M:    Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
  M:    Maxime Coquelin <maxime.coquelin@st.com>
@@@ -1419,17 -1392,12 +1419,17 @@@ S:   Maintaine
  F:    arch/arm/mach-sti/
  F:    arch/arm/boot/dts/sti*
  F:    drivers/clocksource/arm_global_timer.c
 -F:    drivers/reset/sti/
 -F:    drivers/pinctrl/pinctrl-st.c
 -F:    drivers/media/rc/st_rc.c
  F:    drivers/i2c/busses/i2c-st.c
 -F:    drivers/tty/serial/st-asc.c
 +F:    drivers/media/rc/st_rc.c
  F:    drivers/mmc/host/sdhci-st.c
 +F:    drivers/phy/phy-stih407-usb.c
 +F:    drivers/phy/phy-stih41x-usb.c
 +F:    drivers/pinctrl/pinctrl-st.c
 +F:    drivers/reset/sti/
 +F:    drivers/tty/serial/st-asc.c
 +F:    drivers/usb/dwc3/dwc3-st.c
 +F:    drivers/usb/host/ehci-st.c
 +F:    drivers/usb/host/ohci-st.c
  
  ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -1549,7 -1517,6 +1549,7 @@@ T:      git git://git.xilinx.com/linux-xlnx.
  S:    Supported
  F:    arch/arm/mach-zynq/
  F:    drivers/cpuidle/cpuidle-zynq.c
 +F:    drivers/block/xsysace.c
  N:    zynq
  N:    xilinx
  F:    drivers/clocksource/cadence_ttc_timer.c
@@@ -1595,9 -1562,9 +1595,9 @@@ F:      drivers/platform/x86/asus*.
  F:    drivers/platform/x86/eeepc*.c
  
  ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
 -M:    Dan Williams <dan.j.williams@intel.com>
 +R:    Dan Williams <dan.j.williams@intel.com>
  W:    http://sourceforge.net/projects/xscaleiop
 -S:    Maintained
 +S:    Odd fixes
  F:    Documentation/crypto/async-tx-api.txt
  F:    crypto/async_tx/
  F:    drivers/dma/
@@@ -1649,7 -1616,6 +1649,7 @@@ L:      wil6210@qca.qualcomm.co
  S:    Supported
  W:    http://wireless.kernel.org/en/users/Drivers/wil6210
  F:    drivers/net/wireless/ath/wil6210/
 +F:    include/uapi/linux/wil6210_uapi.h
  
  CARL9170 LINUX COMMUNITY WIRELESS DRIVER
  M:    Christian Lamparter <chunkeey@googlemail.com>
@@@ -1699,12 -1665,6 +1699,12 @@@ M:    Nicolas Ferre <nicolas.ferre@atmel.c
  S:    Supported
  F:    drivers/tty/serial/atmel_serial.c
  
 +ATMEL Audio ALSA driver
 +M:    Bo Shen <voice.shen@atmel.com>
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +S:    Supported
 +F:    sound/soc/atmel
 +
  ATMEL DMA DRIVER
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1738,12 -1698,6 +1738,12 @@@ M:    Nicolas Ferre <nicolas.ferre@atmel.c
  S:    Supported
  F:    drivers/net/ethernet/cadence/
  
 +ATMEL NAND DRIVER
 +M:    Josh Wu <josh.wu@atmel.com>
 +L:    linux-mtd@lists.infradead.org
 +S:    Supported
 +F:    drivers/mtd/nand/atmel_nand*
 +
  ATMEL SPI DRIVER
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  S:    Supported
@@@ -2050,7 -2004,6 +2050,7 @@@ F:      drivers/net/ethernet/broadcom/bnx2x
  BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
  M:    Christian Daudt <bcm@fixthebug.org>
  M:    Matt Porter <mporter@linaro.org>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/mach-bcm
  S:    Maintained
@@@ -2080,14 -2033,6 +2080,14 @@@ F:    arch/arm/mach-bcm/bcm_5301x.
  F:    arch/arm/boot/dts/bcm5301x.dtsi
  F:    arch/arm/boot/dts/bcm470*
  
 +BROADCOM BCM63XX ARM ARCHITECTURE
 +M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +T:    git git://git.github.com/brcm/linux.git
 +S:    Maintained
 +F:    arch/arm/mach-bcm/bcm63xx.c
 +F:    arch/arm/include/debug/bcm63xx.S
 +
  BROADCOM BCM7XXX ARM ARCHITECTURE
  M:    Marc Carino <marc.ceeeee@gmail.com>
  M:    Brian Norris <computersforpeace@gmail.com>
@@@ -2153,7 -2098,7 +2153,7 @@@ S:      Supporte
  F:    drivers/scsi/bfa/
  
  BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 -M:    Rasesh Mody <rmody@brocade.com>
 +M:    Rasesh Mody <rasesh.mody@qlogic.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/brocade/bna/
@@@ -2766,18 -2711,6 +2766,18 @@@ W:    http://www.chelsio.co
  S:    Supported
  F:    drivers/net/ethernet/chelsio/cxgb4vf/
  
 +CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
 +M:    Ian Munsie <imunsie@au1.ibm.com>
 +M:    Michael Neuling <mikey@neuling.org>
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Supported
 +F:    drivers/misc/cxl/
 +F:    include/misc/cxl.h
 +F:    include/uapi/misc/cxl.h
 +F:    Documentation/powerpc/cxl.txt
 +F:    Documentation/powerpc/cxl.txt
 +F:    Documentation/ABI/testing/sysfs-class-cxl
 +
  STMMAC ETHERNET DRIVER
  M:    Giuseppe Cavallaro <peppe.cavallaro@st.com>
  L:    netdev@vger.kernel.org
@@@ -2915,7 -2848,6 +2915,7 @@@ F:      drivers/platform/x86/dell-wmi.
  DESIGNWARE USB2 DRD IP DRIVER
  M:    Paul Zimmerman <paulz@synopsys.com>
  L:    linux-usb@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
  F:    drivers/usb/dwc2/
  
@@@ -2927,13 -2859,6 +2927,13 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    drivers/usb/dwc3/
  
 +DEVICE COREDUMP (DEV_COREDUMP)
 +M:    Johannes Berg <johannes@sipsolutions.net>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    drivers/base/devcoredump.c
 +F:    include/linux/devcoredump.h
 +
  DEVICE FREQUENCY (DEVFREQ)
  M:    MyungJoo Ham <myungjoo.ham@samsung.com>
  M:    Kyungmin Park <kyungmin.park@samsung.com>
@@@ -3054,7 -2979,7 +3054,7 @@@ M:      Sumit Semwal <sumit.semwal@linaro.or
  S:    Maintained
  L:    linux-media@vger.kernel.org
  L:    dri-devel@lists.freedesktop.org
 -L:    linaro-mm-sig@lists.linaro.org
 +L:    linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
  F:    drivers/dma-buf/
  F:    include/linux/dma-buf*
  F:    include/linux/reservation.h
@@@ -3064,11 -2989,13 +3064,11 @@@ T:   git git://git.linaro.org/people/sumi
  
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <vinod.koul@intel.com>
 -M:    Dan Williams <dan.j.williams@intel.com>
  L:    dmaengine@vger.kernel.org
  Q:    https://patchwork.kernel.org/project/linux-dmaengine/list/
 -S:    Supported
 +S:    Maintained
  F:    drivers/dma/
  F:    include/linux/dma*
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
  T:    git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
  
  DME1737 HARDWARE MONITOR DRIVER
@@@ -3085,14 -3012,14 +3085,14 @@@ S:   Supporte
  F:    drivers/acpi/dock.c
  
  DOCUMENTATION
 -M:    Randy Dunlap <rdunlap@infradead.org>
 +M:    Jiri Kosina <jkosina@suse.cz>
  L:    linux-doc@vger.kernel.org
 -T:    quilt http://www.infradead.org/~rdunlap/Doc/patches/
  S:    Maintained
  F:    Documentation/
  X:    Documentation/ABI/
  X:    Documentation/devicetree/
  X:    Documentation/[a-z][a-z]_[A-Z][A-Z]/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/doc.git
  
  DOUBLETALK DRIVER
  M:    "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@@ -3164,7 -3091,7 +3164,7 @@@ F:      include/drm/drm_panel.
  F:    Documentation/devicetree/bindings/panel/
  
  INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
- M:    Daniel Vetter <daniel.vetter@ffwll.ch>
+ M:    Daniel Vetter <daniel.vetter@intel.com>
  M:    Jani Nikula <jani.nikula@linux.intel.com>
  L:    intel-gfx@lists.freedesktop.org
  L:    dri-devel@lists.freedesktop.org
@@@ -3624,11 -3551,6 +3624,11 @@@ T:    git git://git.kernel.org/pub/scm/lin
  F:    drivers/video/fbdev/s1d13xxxfb.c
  F:    include/video/s1d13xxxfb.h
  
 +ET131X NETWORK DRIVER
 +M:    Mark Einon <mark.einon@gmail.com>
 +S:    Odd Fixes
 +F:    drivers/net/ethernet/agere/
 +
  ETHERNET BRIDGE
  M:    Stephen Hemminger <stephen@networkplumber.org>
  L:    bridge@lists.linux-foundation.org
@@@ -4250,16 -4172,6 +4250,16 @@@ L:    linuxppc-dev@lists.ozlabs.or
  S:    Odd Fixes
  F:    drivers/tty/hvc/
  
 +HACKRF MEDIA DRIVER
 +M:    Antti Palosaari <crope@iki.fi>
 +L:    linux-media@vger.kernel.org
 +W:    http://linuxtv.org/
 +W:    http://palosaari.fi/linux/
 +Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 +T:    git git://linuxtv.org/anttip/media_tree.git
 +S:    Maintained
 +F:    drivers/media/usb/hackrf/
 +
  HARDWARE MONITORING
  M:    Jean Delvare <jdelvare@suse.de>
  M:    Guenter Roeck <linux@roeck-us.net>
@@@ -4303,8 -4215,9 +4303,8 @@@ S:      Maintaine
  F:    drivers/media/dvb-frontends/hd29l2*
  
  HEWLETT-PACKARD SMART2 RAID DRIVER
 -M:    Chirag Kantharia <chirag.kantharia@hp.com>
  L:    iss_storagedev@hp.com
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/blockdev/cpqarray.txt
  F:    drivers/block/cpqarray.*
  
@@@ -4564,6 -4477,7 +4564,6 @@@ M:      Mika Westerberg <mika.westerberg@lin
  L:    linux-i2c@vger.kernel.org
  L:    linux-acpi@vger.kernel.org
  S:    Maintained
 -F:    drivers/i2c/i2c-acpi.c
  
  I2C-TAOS-EVM DRIVER
  M:    Jean Delvare <jdelvare@suse.de>
@@@ -4683,14 -4597,13 +4683,14 @@@ F:   drivers/idle/i7300_idle.
  
  IEEE 802.15.4 SUBSYSTEM
  M:    Alexander Aring <alex.aring@gmail.com>
 -L:    linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 -W:    http://apps.sourceforge.net/trac/linux-zigbee
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
 +L:    linux-wpan@vger.kernel.org
 +W:    https://github.com/linux-wpan
 +T:    git git://github.com/linux-wpan/linux-wpan-next.git
  S:    Maintained
  F:    net/ieee802154/
  F:    net/mac802154/
  F:    drivers/net/ieee802154/
 +F:    Documentation/networking/ieee802154.txt
  
  IGUANAWORKS USB IR TRANSCEIVER
  M:    Sean Young <sean@mess.org>
@@@ -4700,9 -4613,6 +4700,9 @@@ F:      drivers/media/rc/iguanair.
  
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <jic23@kernel.org>
 +R:    Hartmut Knaack <knaack.h@gmx.de>
 +R:    Lars-Peter Clausen <lars@metafoo.de>
 +R:    Peter Meerwald <pmeerw@pmeerw.net>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  F:    drivers/iio/
@@@ -4840,8 -4750,8 +4840,8 @@@ F:      arch/x86/kernel/cpu/microcode/core
  F:    arch/x86/kernel/cpu/microcode/intel*
  
  INTEL I/OAT DMA DRIVER
 -M:    Dan Williams <dan.j.williams@intel.com>
  M:    Dave Jiang <dave.jiang@intel.com>
 +R:    Dan Williams <dan.j.williams@intel.com>
  L:    dmaengine@vger.kernel.org
  Q:    https://patchwork.kernel.org/project/linux-dmaengine/list/
  S:    Supported
@@@ -4856,12 -4766,12 +4856,12 @@@ F:   drivers/iommu/intel-iommu.
  F:    include/linux/intel-iommu.h
  
  INTEL IOP-ADMA DMA DRIVER
 -M:    Dan Williams <dan.j.williams@intel.com>
 +R:    Dan Williams <dan.j.williams@intel.com>
  S:    Odd fixes
  F:    drivers/dma/iop-adma.c
  
  INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
 -M:    Krzysztof Halasa <khc@pm.waw.pl>
 +M:    Krzysztof Halasa <khalasa@piap.pl>
  S:    Maintained
  F:    arch/arm/mach-ixp4xx/include/mach/qmgr.h
  F:    arch/arm/mach-ixp4xx/include/mach/npe.h
@@@ -4875,14 -4785,14 +4875,14 @@@ M:   Deepak Saxena <dsaxena@plexity.net
  S:    Maintained
  F:    drivers/char/hw_random/ixp4xx-rng.c
  
 -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
 +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
  M:    Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  M:    Jesse Brandeburg <jesse.brandeburg@intel.com>
  M:    Bruce Allan <bruce.w.allan@intel.com>
  M:    Carolyn Wyborny <carolyn.wyborny@intel.com>
  M:    Don Skidmore <donald.c.skidmore@intel.com>
  M:    Greg Rose <gregory.v.rose@intel.com>
 -M:    Alex Duyck <alexander.h.duyck@intel.com>
 +M:    Matthew Vick <matthew.vick@intel.com>
  M:    John Ronciak <john.ronciak@intel.com>
  M:    Mitch Williams <mitch.a.williams@intel.com>
  M:    Linux NICS <linux.nics@intel.com>
@@@ -5071,7 -4981,6 +5071,7 @@@ L:      linux-kernel@vger.kernel.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
  T:    git git://git.infradead.org/users/jcooper/linux.git irqchip/core
 +F:    Documentation/devicetree/bindings/interrupt-controller/
  F:    drivers/irqchip/
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@@ -5157,7 -5066,7 +5157,7 @@@ W:      http://palosaari.fi/linux
  Q:    http://patchwork.linuxtv.org/project/linux-media/list/
  T:    git git://linuxtv.org/anttip/media_tree.git
  S:    Maintained
 -F:    drivers/media/tuners/tuner_it913x*
 +F:    drivers/media/tuners/it913x*
  
  IVTV VIDEO4LINUX DRIVER
  M:    Andy Walls <awalls@md.metrocast.net>
@@@ -5305,13 -5214,6 +5305,13 @@@ F:    include/linux/lockd
  F:    include/linux/sunrpc/
  F:    include/uapi/linux/sunrpc/
  
 +KERNEL SELFTEST FRAMEWORK
 +M:    Shuah Khan <shuahkh@osg.samsung.com>
 +L:    linux-api@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/shuah/linux-kselftest
 +S:    Maintained
 +F:    tools/testing/selftests
 +
  KERNEL VIRTUAL MACHINE (KVM)
  M:    Gleb Natapov <gleb@kernel.org>
  M:    Paolo Bonzini <pbonzini@redhat.com>
@@@ -5578,7 -5480,7 +5578,7 @@@ F:      drivers/macintosh
  LINUX FOR POWERPC EMBEDDED MPC5XXX
  M:    Anatolij Gustschin <agust@denx.de>
  L:    linuxppc-dev@lists.ozlabs.org
 -T:    git git://git.denx.de/linux-2.6-agust.git
 +T:    git git://git.denx.de/linux-denx-agust.git
  S:    Maintained
  F:    arch/powerpc/platforms/512x/
  F:    arch/powerpc/platforms/52xx/
@@@ -5692,8 -5594,8 +5692,8 @@@ M:      Ingo Molnar <mingo@redhat.com
  L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
  S:    Maintained
 -F:    Documentation/lockdep*.txt
 -F:    Documentation/lockstat.txt
 +F:    Documentation/locking/lockdep*.txt
 +F:    Documentation/locking/lockstat.txt
  F:    include/linux/lockdep.h
  F:    kernel/locking/
  
@@@ -5758,8 -5660,11 +5758,8 @@@ T:     git git://github.com/linux-test-proj
  S:    Maintained
  
  M32R ARCHITECTURE
 -M:    Hirokazu Takata <takata@linux-m32r.org>
 -L:    linux-m32r@ml.linux-m32r.org (moderated for non-subscribers)
 -L:    linux-m32r-ja@ml.linux-m32r.org (in Japanese)
  W:    http://www.linux-m32r.org/
 -S:    Maintained
 +S:    Orphan
  F:    arch/m32r/
  
  M68K ARCHITECTURE
@@@ -6468,7 -6373,7 +6468,7 @@@ M:      Lauro Ramos Venancio <lauro.venancio
  M:    Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
  M:    Samuel Ortiz <sameo@linux.intel.com>
  L:    linux-wireless@vger.kernel.org
 -L:    linux-nfc@lists.01.org (moderated for non-subscribers)
 +L:    linux-nfc@lists.01.org (subscribers-only)
  S:    Supported
  F:    net/nfc/
  F:    include/net/nfc/
@@@ -6636,9 -6541,10 +6636,9 @@@ S:     Maintaine
  F:    drivers/mmc/host/omap.c
  
  OMAP HS MMC SUPPORT
 -M:    Balaji T K <balajitk@ti.com>
  L:    linux-mmc@vger.kernel.org
  L:    linux-omap@vger.kernel.org
 -S:    Maintained
 +S:    Orphan
  F:    drivers/mmc/host/omap_hsmmc.c
  
  OMAP RANDOM NUMBER GENERATOR SUPPORT
@@@ -6968,29 -6874,14 +6968,29 @@@ F:   include/linux/pci
  F:    arch/x86/pci/
  F:    arch/x86/kernel/quirks.c
  
 +PCI DRIVER FOR APPLIEDMICRO XGENE
 +M:    Tanmay Inamdar <tinamdar@apm.com>
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/xgene-pci.txt
 +F:    drivers/pci/host/pci-xgene.c
 +
  PCI DRIVER FOR IMX6
  M:    Richard Zhu <r65037@freescale.com>
 -M:    Shawn Guo <shawn.guo@freescale.com>
 +M:    Lucas Stach <l.stach@pengutronix.de>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    drivers/pci/host/*imx6*
  
 +PCI DRIVER FOR TI KEYSTONE
 +M:    Murali Karicheri <m-karicheri2@ti.com>
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/pci/host/*keystone*
 +
  PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
  M:    Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  M:    Jason Cooper <jason@lakedaemon.net>
@@@ -7393,14 -7284,6 +7393,14 @@@ T:    git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/usb/pwc/*
  
 +PWM FAN DRIVER
 +M:    Kamil Debski <k.debski@samsung.com>
 +L:    lm-sensors@lm-sensors.org
 +S:    Supported
 +F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
 +F:    Documentation/hwmon/pwm-fan
 +F:    drivers/hwmon/pwm-fan.c
 +
  PWM SUBSYSTEM
  M:    Thierry Reding <thierry.reding@gmail.com>
  L:    linux-pwm@vger.kernel.org
@@@ -7414,12 -7297,12 +7414,12 @@@ F:   drivers/video/backlight/pwm_bl.
  F:    include/linux/pwm_backlight.h
  
  PXA2xx/PXA3xx SUPPORT
 -M:    Eric Miao <eric.y.miao@gmail.com>
 -M:    Russell King <linux@arm.linux.org.uk>
 +M:    Daniel Mack <daniel@zonque.org>
  M:    Haojian Zhuang <haojian.zhuang@gmail.com>
 +M:    Robert Jarzmik <robert.jarzmik@free.fr>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://github.com/hzhuang1/linux.git
 -T:    git git://git.linaro.org/people/ycmiao/pxa-linux.git
 +T:    git git://github.com/rjarzmik/linux.git
  S:    Maintained
  F:    arch/arm/mach-pxa/
  F:    drivers/pcmcia/pxa2xx*
@@@ -7496,15 -7379,15 +7496,15 @@@ F:   drivers/net/ethernet/qlogic/qla3xxx.
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
  M:    Shahed Shaikh <shahed.shaikh@qlogic.com>
 -M:    Dept-HSGLinuxNICDev@qlogic.com
 +M:    Dept-GELinuxNICDev@qlogic.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
 -M:    Shahed Shaikh <shahed.shaikh@qlogic.com>
 -M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 -M:    Ron Mercer <ron.mercer@qlogic.com>
 +M:    Harish Patil <harish.patil@qlogic.com>
 +M:    Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
 +M:    Dept-GELinuxNICDev@qlogic.com
  M:    linux-driver@qlogic.com
  L:    netdev@vger.kernel.org
  S:    Supported
@@@ -7607,12 -7490,13 +7607,12 @@@ F:   drivers/video/fbdev/aty/aty128fb.
  
  RALINK RT2X00 WIRELESS LAN DRIVER
  P:    rt2x00 project
 -M:    Ivo van Doorn <IvDoorn@gmail.com>
 +M:    Stanislaw Gruszka <sgruszka@redhat.com>
  M:    Helmut Schaa <helmut.schaa@googlemail.com>
  L:    linux-wireless@vger.kernel.org
  L:    users@rt2x00.serialmonkey.com (moderated for non-subscribers)
  W:    http://rt2x00.serialmonkey.com/
  S:    Maintained
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ivd/rt2x00.git
  F:    drivers/net/wireless/rt2x00/
  
  RAMDISK RAM BLOCK DEVICE DRIVER
@@@ -7714,7 -7598,6 +7714,7 @@@ F:      fs/reiserfs
  
  REGISTER MAP ABSTRACTION
  M:    Mark Brown <broonie@kernel.org>
 +L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
  S:    Supported
  F:    drivers/base/regmap/
@@@ -7983,6 -7866,7 +7983,6 @@@ S:      Supporte
  F:    drivers/mfd/sec*.c
  F:    drivers/regulator/s2m*.c
  F:    drivers/regulator/s5m*.c
 -F:    drivers/rtc/rtc-sec.c
  F:    include/linux/mfd/samsung/
  
  SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
@@@ -8206,8 -8090,6 +8206,8 @@@ F:      drivers/mmc/host/sdhci-pltfm.[ch
  
  SECURE COMPUTING
  M:    Kees Cook <keescook@chromium.org>
 +R:    Andy Lutomirski <luto@amacapital.net>
 +R:    Will Drewry <wad@chromium.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git seccomp
  S:    Supported
  F:    kernel/seccomp.c
@@@ -8528,11 -8410,11 +8528,11 @@@ S:   Maintaine
  F:    Documentation/security/Smack.txt
  F:    security/smack/
  
 -SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
 +DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
  M:    Kevin Hilman <khilman@kernel.org>
  M:    Nishanth Menon <nm@ti.com>
  S:    Maintained
 -F:    drivers/power/avs/smartreflex.c
 +F:    drivers/power/avs/
  F:    include/linux/power/smartreflex.h
  L:    linux-pm@vger.kernel.org
  
@@@ -8702,14 -8584,6 +8702,14 @@@ F:    include/sound/dmaengine_pcm.
  F:    sound/core/pcm_dmaengine.c
  F:    sound/soc/soc-generic-dmaengine-pcm.c
  
 +SP2 MEDIA DRIVER
 +M:    Olli Salonen <olli.salonen@iki.fi>
 +L:    linux-media@vger.kernel.org
 +W:    http://linuxtv.org/
 +Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 +S:    Maintained
 +F:    drivers/media/dvb-frontends/sp2*
 +
  SPARC + UltraSPARC (sparc/sparc64)
  M:    "David S. Miller" <davem@davemloft.net>
  L:    sparclinux@vger.kernel.org
@@@ -8821,6 -8695,11 +8821,6 @@@ M:     H Hartley Sweeten <hsweeten@visionen
  S:    Odd Fixes
  F:    drivers/staging/comedi/
  
 -STAGING - ET131X NETWORK DRIVER
 -M:    Mark Einon <mark.einon@gmail.com>
 -S:    Odd Fixes
 -F:    drivers/staging/et131x/
 -
  STAGING - FLARION FT1000 DRIVERS
  M:    Marek Belisko <marek.belisko@gmail.com>
  S:    Odd Fixes
@@@ -8838,14 -8717,6 +8838,14 @@@ W:    http://www.lirc.org
  S:    Odd Fixes
  F:    drivers/staging/media/lirc/
  
 +STAGING - LUSTRE PARALLEL FILESYSTEM
 +M:    Oleg Drokin <oleg.drokin@intel.com>
 +M:    Andreas Dilger <andreas.dilger@intel.com>
 +L:    HPDD-discuss@lists.01.org (moderated for non-subscribers)
 +W:    http://lustre.opensfs.org/
 +S:    Maintained
 +F:    drivers/staging/lustre
 +
  STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
  M:    Julian Andres Klode <jak@jak-linux.org>
  M:    Marc Dietrich <marvin24@gmx.de>
@@@ -9139,13 -9010,17 +9139,13 @@@ F:   drivers/media/rc/ttusbir.
  TEGRA ARCHITECTURE SUPPORT
  M:    Stephen Warren <swarren@wwwdotorg.org>
  M:    Thierry Reding <thierry.reding@gmail.com>
 +M:    Alexandre Courbot <gnurou@gmail.com>
  L:    linux-tegra@vger.kernel.org
  Q:    http://patchwork.ozlabs.org/project/linux-tegra/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
  S:    Supported
  N:    [^a-z]tegra
  
 -TEGRA ASOC DRIVER
 -M:    Stephen Warren <swarren@wwwdotorg.org>
 -S:    Supported
 -F:    sound/soc/tegra/
 -
  TEGRA CLOCK DRIVER
  M:    Peter De Schrijver <pdeschrijver@nvidia.com>
  M:    Prashant Gaikwad <pgaikwad@nvidia.com>
@@@ -9157,6 -9032,11 +9157,6 @@@ M:     Laxman Dewangan <ldewangan@nvidia.co
  S:    Supported
  F:    drivers/dma/tegra20-apb-dma.c
  
 -TEGRA GPIO DRIVER
 -M:    Stephen Warren <swarren@wwwdotorg.org>
 -S:    Supported
 -F:    drivers/gpio/gpio-tegra.c
 -
  TEGRA I2C DRIVER
  M:    Laxman Dewangan <ldewangan@nvidia.com>
  S:    Supported
@@@ -9173,6 -9053,11 +9173,6 @@@ M:     Laxman Dewangan <ldewangan@nvidia.co
  S:    Supported
  F:    drivers/input/keyboard/tegra-kbc.c
  
 -TEGRA PINCTRL DRIVER
 -M:    Stephen Warren <swarren@wwwdotorg.org>
 -S:    Supported
 -F:    drivers/pinctrl/pinctrl-tegra*
 -
  TEGRA PWM DRIVER
  M:    Thierry Reding <thierry.reding@gmail.com>
  S:    Supported
@@@ -9263,15 -9148,6 +9263,15 @@@ F:    drivers/misc/tifm
  F:    drivers/mmc/host/tifm_sd.c
  F:    include/linux/tifm.h
  
 +TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
 +M:    Santosh Shilimkar <santosh.shilimkar@ti.com>
 +L:    linux-kernel@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/soc/ti/*
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 +
 +
  TI LM49xxx FAMILY ASoC CODEC DRIVERS
  M:    M R Swami Reddy <mr.swami.reddy@ti.com>
  M:    Vishwas A Deshpande <vishwas.a.deshpande@ti.com>
@@@ -9413,14 -9289,6 +9413,14 @@@ T:    git git://linuxtv.org/media_tree.gi
  S:    Odd fixes
  F:    drivers/media/usb/tm6000/
  
 +TW68 VIDEO4LINUX DRIVER
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +W:    http://linuxtv.org
 +S:    Odd Fixes
 +F:    drivers/media/pci/tw68/
 +
  TPM DEVICE DRIVER
  M:    Peter Huewe <peterhuewe@gmx.de>
  M:    Ashley Lai <ashley@ashleylai.com>
@@@ -9442,7 -9310,6 +9442,7 @@@ F:      include/*/ftrace.
  F:    include/linux/trace*.h
  F:    include/trace/
  F:    kernel/trace/
 +F:    tools/testing/selftests/ftrace/
  
  TRIVIAL PATCHES
  M:    Jiri Kosina <trivial@kernel.org>
@@@ -9816,7 -9683,7 +9816,7 @@@ USB WEBCAM GADGE
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
 -F:    drivers/usb/gadget/function/*uvc*.c
 +F:    drivers/usb/gadget/function/*uvc*
  F:    drivers/usb/gadget/legacy/webcam.c
  
  USB WIRELESS RNDIS DRIVER (rndis_wlan)
@@@ -10030,7 -9897,6 +10030,7 @@@ F:    drivers/scsi/vmw_pvscsi.
  VOLTAGE AND CURRENT REGULATOR FRAMEWORK
  M:    Liam Girdwood <lgirdwood@gmail.com>
  M:    Mark Brown <broonie@kernel.org>
 +L:    linux-kernel@vger.kernel.org
  W:    http://opensource.wolfsonmicro.com/node/15
  W:    http://www.slimlogic.co.uk/?p=48
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
@@@ -10290,15 -10156,6 +10290,15 @@@ S: Supporte
  F:    drivers/block/xen-blkback/*
  F:    drivers/block/xen*
  
 +XEN PVSCSI DRIVERS
 +M:    Juergen Gross <jgross@suse.com>
 +L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
 +L:    linux-scsi@vger.kernel.org
 +S:    Supported
 +F:    drivers/scsi/xen-scsifront.c
 +F:    drivers/xen/xen-scsiback.c
 +F:    include/xen/interface/io/vscsiif.h
 +
  XEN SWIOTLB SUBSYSTEM
  M:    Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
@@@ -10323,18 -10180,16 +10323,18 @@@ M:        John Linn <John.Linn@xilinx.com
  S:    Maintained
  F:    drivers/net/ethernet/xilinx/xilinx_axienet*
  
 -XILINX SYSTEMACE DRIVER
 -S:    Orphan
 -F:    drivers/block/xsysace.c
 -
  XILINX UARTLITE SERIAL DRIVER
  M:    Peter Korsgaard <jacmet@sunsite.dk>
  L:    linux-serial@vger.kernel.org
  S:    Maintained
  F:    drivers/tty/serial/uartlite.c
  
 +XILLYBUS DRIVER
 +M:    Eli Billauer <eli.billauer@gmail.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Supported
 +F:    drivers/char/xillybus/
 +
  XTENSA XTFPGA PLATFORM SUPPORT
  M:    Max Filippov <jcmvbkbc@gmail.com>
  L:    linux-xtensa@linux-xtensa.org
index 429a6c6cfcf95156df66fe8082a6e0a2ac2d8a73,480dedf79b0dd4ed0b1632ead87ba543a84ed5ea..8831c48c2bc93b260d15a0cbb255d64467eb3a4d
                        reg = <0x10020000 0x4000>;
                };
  
+               mipi_phy: video-phy@10020710 {
+                       compatible = "samsung,s5pv210-mipi-video-phy";
+                       reg = <0x10020710 8>;
+                       #phy-cells = <1>;
+               };
                pd_cam: cam-power-domain@10023C00 {
                        compatible = "samsung,exynos4210-pd";
                        reg = <0x10023C00 0x20>;
                };
  
                rtc: rtc@10070000 {
 -                      compatible = "samsung,s3c6410-rtc";
 +                      compatible = "samsung,exynos3250-rtc";
                        reg = <0x10070000 0x100>;
                        interrupts = <0 73 0>, <0 74 0>;
                        status = "disabled";
                        interrupts = <0 240 0>;
                };
  
+               fimd: fimd@11c00000 {
+                       compatible = "samsung,exynos3250-fimd";
+                       reg = <0x11c00000 0x30000>;
+                       interrupt-names = "fifo", "vsync", "lcd_sys";
+                       interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
+                       clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
+                       clock-names = "sclk_fimd", "fimd";
+                       samsung,power-domain = <&pd_lcd0>;
+                       samsung,sysreg = <&sys_reg>;
+                       status = "disabled";
+               };
+               dsi_0: dsi@11C80000 {
+                       compatible = "samsung,exynos3250-mipi-dsi";
+                       reg = <0x11C80000 0x10000>;
+                       interrupts = <0 83 0>;
+                       samsung,phy-type = <0>;
+                       samsung,power-domain = <&pd_lcd0>;
+                       phys = <&mipi_phy 1>;
+                       phy-names = "dsim";
+                       clocks = <&cmu CLK_DSIM0>, <&cmu CLK_SCLK_MIPI0>;
+                       clock-names = "bus_clk", "pll_clk";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
                mshc_0: mshc@12510000 {
                        compatible = "samsung,exynos5250-dw-mshc";
                        reg = <0x12510000 0x1000>;
index 9db5e6774fb75753d3a8199301d75887132f5640,364e69bf85d445701107ce1357e3c07a2a51fa19..46aa540133d6b81cb05053ffaab7a1a28aa21553
@@@ -41,16 -41,15 +41,15 @@@ static struct rcar_du_encoder_data koel
                        .width_mm = 210,
                        .height_mm = 158,
                        .mode = {
-                               .clock = 65000,
-                               .hdisplay = 1024,
-                               .hsync_start = 1048,
-                               .hsync_end = 1184,
-                               .htotal = 1344,
-                               .vdisplay = 768,
-                               .vsync_start = 771,
-                               .vsync_end = 777,
-                               .vtotal = 806,
-                               .flags = 0,
+                               .pixelclock = 65000000,
+                               .hactive = 1024,
+                               .hfront_porch = 20,
+                               .hback_porch = 160,
+                               .hsync_len = 136,
+                               .vactive = 768,
+                               .vfront_porch = 3,
+                               .vback_porch = 29,
+                               .vsync_len = 6,
                        },
                },
        },
@@@ -88,6 -87,7 +87,6 @@@ static void __init koelsch_add_du_devic
   * devices until they get moved to DT.
   */
  static const struct clk_name clk_names[] __initconst = {
 -      { "cmt0", "fck", "sh-cmt-48-gen2.0" },
        { "du0", "du.0", "rcar-du-r8a7791" },
        { "du1", "du.1", "rcar-du-r8a7791" },
        { "lvds0", "lvds.0", "rcar-du-r8a7791" },
@@@ -96,6 -96,7 +95,6 @@@
  static void __init koelsch_add_standard_devices(void)
  {
        shmobile_clk_workaround(clk_names, ARRAY_SIZE(clk_names), false);
 -      r8a7791_add_dt_devices();
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  
        koelsch_add_du_device();
index 126a8b4ec491fa0b6c449eb0628ad02e33ad7c1d,ad10ddb6a3211da235152956f091142c4d30ec9e..7111b5c1d67b764f79daec420390fea7f408c92c
@@@ -63,16 -63,15 +63,15 @@@ static struct rcar_du_encoder_data koel
                        .width_mm = 210,
                        .height_mm = 158,
                        .mode = {
-                               .clock = 65000,
-                               .hdisplay = 1024,
-                               .hsync_start = 1048,
-                               .hsync_end = 1184,
-                               .htotal = 1344,
-                               .vdisplay = 768,
-                               .vsync_start = 771,
-                               .vsync_end = 777,
-                               .vtotal = 806,
-                               .flags = 0,
+                               .pixelclock = 65000000,
+                               .hactive = 1024,
+                               .hfront_porch = 20,
+                               .hback_porch = 160,
+                               .hsync_len = 136,
+                               .vactive = 768,
+                               .vfront_porch = 3,
+                               .vback_porch = 29,
+                               .vsync_len = 6,
                        },
                },
        },
@@@ -331,6 -330,7 +330,6 @@@ SDHI_REGULATOR(2, RCAR_GP_PIN(7, 19), R
  static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
        .tmio_caps      = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
                          MMC_CAP_POWER_OFF_CARD,
 -      .tmio_caps2     = MMC_CAP2_NO_MULTI_READ,
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT,
  };
  
@@@ -343,6 -343,7 +342,6 @@@ static struct resource sdhi0_resources[
  static struct sh_mobile_sdhi_info sdhi1_info __initdata = {
        .tmio_caps      = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
                          MMC_CAP_POWER_OFF_CARD,
 -      .tmio_caps2     = MMC_CAP2_NO_MULTI_READ,
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT,
  };
  
@@@ -355,6 -356,7 +354,6 @@@ static struct resource sdhi1_resources[
  static struct sh_mobile_sdhi_info sdhi2_info __initdata = {
        .tmio_caps      = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
                          MMC_CAP_POWER_OFF_CARD,
 -      .tmio_caps2     = MMC_CAP2_NO_MULTI_READ,
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT |
                          TMIO_MMC_WRPROTECT_DISABLE,
  };
index 2a05c02bec3965f4f1dc4493b5d7fdaea40af275,12a53a1c3d022597a4237937b6afcf9b2a2835ec..bc4b48357ddea891ccb0365ba267325adff374d8
@@@ -43,16 -43,15 +43,15 @@@ static struct rcar_du_encoder_data lage
                        .width_mm = 210,
                        .height_mm = 158,
                        .mode = {
-                               .clock = 65000,
-                               .hdisplay = 1024,
-                               .hsync_start = 1048,
-                               .hsync_end = 1184,
-                               .htotal = 1344,
-                               .vdisplay = 768,
-                               .vsync_start = 771,
-                               .vsync_end = 777,
-                               .vtotal = 806,
-                               .flags = 0,
+                               .pixelclock = 65000000,
+                               .hactive = 1024,
+                               .hfront_porch = 20,
+                               .hback_porch = 160,
+                               .hsync_len = 136,
+                               .vactive = 768,
+                               .vfront_porch = 3,
+                               .vback_porch = 29,
+                               .vsync_len = 6,
                        },
                },
        },
@@@ -92,6 -91,7 +91,6 @@@ static void __init lager_add_du_device(
   * devices until they get moved to DT.
   */
  static const struct clk_name clk_names[] __initconst = {
 -      { "cmt0", "fck", "sh-cmt-48-gen2.0" },
        { "du0", "du.0", "rcar-du-r8a7790" },
        { "du1", "du.1", "rcar-du-r8a7790" },
        { "du2", "du.2", "rcar-du-r8a7790" },
  static void __init lager_add_standard_devices(void)
  {
        shmobile_clk_workaround(clk_names, ARRAY_SIZE(clk_names), false);
 -      r8a7790_add_dt_devices();
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  
        lager_add_du_device();
index f5a98e2942b30d2e6b39f2fb00a17e3a01999a30,80576c2ee668978c6cfb32692bc81221f5f8ff66..571327b1c942c138fbba5cfe8e5ea03d7896ccb1
@@@ -99,16 -99,15 +99,15 @@@ static struct rcar_du_encoder_data lage
                        .width_mm = 210,
                        .height_mm = 158,
                        .mode = {
-                               .clock = 65000,
-                               .hdisplay = 1024,
-                               .hsync_start = 1048,
-                               .hsync_end = 1184,
-                               .htotal = 1344,
-                               .vdisplay = 768,
-                               .vsync_start = 771,
-                               .vsync_end = 777,
-                               .vtotal = 806,
-                               .flags = 0,
+                               .pixelclock = 65000000,
+                               .hactive = 1024,
+                               .hfront_porch = 20,
+                               .hback_porch = 160,
+                               .hsync_len = 136,
+                               .vactive = 768,
+                               .vfront_porch = 3,
+                               .vback_porch = 29,
+                               .vsync_len = 6,
                        },
                },
        },
@@@ -630,6 -629,7 +629,6 @@@ static void __init lager_add_rsnd_devic
  static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
        .tmio_caps      = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
                          MMC_CAP_POWER_OFF_CARD,
 -      .tmio_caps2     = MMC_CAP2_NO_MULTI_READ,
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT |
                          TMIO_MMC_WRPROTECT_DISABLE,
  };
@@@ -643,6 -643,7 +642,6 @@@ static struct resource sdhi0_resources[
  static struct sh_mobile_sdhi_info sdhi2_info __initdata = {
        .tmio_caps      = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
                          MMC_CAP_POWER_OFF_CARD,
 -      .tmio_caps2     = MMC_CAP2_NO_MULTI_READ,
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT |
                          TMIO_MMC_WRPROTECT_DISABLE,
  };
index 3a02e5e3e9f3b58f83923776773cedd3fafa7012,8749fc06570ebb62540dc4df99393d6c5e7daf57..474e4d12a2d8a73109a35fe3eb3b9948635fedaa
@@@ -35,7 -35,7 +35,7 @@@
   * of extra utility/tracking out of our acquire-ctx.  This is provided
   * by drm_modeset_lock / drm_modeset_acquire_ctx.
   *
 - * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
 + * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
   *
   * The basic usage pattern is to:
   *
   */
  
  
+ /**
+  * __drm_modeset_lock_all - internal helper to grab all modeset locks
+  * @dev: DRM device
+  * @trylock: trylock mode for atomic contexts
+  *
+  * This is a special version of drm_modeset_lock_all() which can also be used in
+  * atomic contexts. Then @trylock must be set to true.
+  *
+  * Returns:
+  * 0 on success or negative error code on failure.
+  */
+ int __drm_modeset_lock_all(struct drm_device *dev,
+                          bool trylock)
+ {
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_modeset_acquire_ctx *ctx;
+       int ret;
+       ctx = kzalloc(sizeof(*ctx),
+                     trylock ? GFP_ATOMIC : GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       if (trylock) {
+               if (!mutex_trylock(&config->mutex))
+                       return -EBUSY;
+       } else {
+               mutex_lock(&config->mutex);
+       }
+       drm_modeset_acquire_init(ctx, 0);
+       ctx->trylock_only = trylock;
+ retry:
+       ret = drm_modeset_lock(&config->connection_mutex, ctx);
+       if (ret)
+               goto fail;
+       ret = drm_modeset_lock_all_crtcs(dev, ctx);
+       if (ret)
+               goto fail;
+       WARN_ON(config->acquire_ctx);
+       /* now we hold the locks, so now that it is safe, stash the
+        * ctx for drm_modeset_unlock_all():
+        */
+       config->acquire_ctx = ctx;
+       drm_warn_on_modeset_not_all_locked(dev);
+       return 0;
+ fail:
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(ctx);
+               goto retry;
+       }
+       return ret;
+ }
+ EXPORT_SYMBOL(__drm_modeset_lock_all);
+ /**
+  * drm_modeset_lock_all - take all modeset locks
+  * @dev: drm device
+  *
+  * This function takes all modeset locks, suitable where a more fine-grained
+  * scheme isn't (yet) implemented. Locks must be dropped with
+  * drm_modeset_unlock_all.
+  */
+ void drm_modeset_lock_all(struct drm_device *dev)
+ {
+       WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
+ }
+ EXPORT_SYMBOL(drm_modeset_lock_all);
+ /**
+  * drm_modeset_unlock_all - drop all modeset locks
+  * @dev: device
+  *
+  * This function drop all modeset locks taken by drm_modeset_lock_all.
+  */
+ void drm_modeset_unlock_all(struct drm_device *dev)
+ {
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+       if (WARN_ON(!ctx))
+               return;
+       config->acquire_ctx = NULL;
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+       kfree(ctx);
+       mutex_unlock(&dev->mode_config.mutex);
+ }
+ EXPORT_SYMBOL(drm_modeset_unlock_all);
+ /**
+  * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
+  * @crtc: drm crtc
+  *
+  * This function locks the given crtc using a hidden acquire context. This is
+  * necessary so that drivers internally using the atomic interfaces can grab
+  * further locks with the lock acquire context.
+  */
+ void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+ {
+       struct drm_modeset_acquire_ctx *ctx;
+       int ret;
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (WARN_ON(!ctx))
+               return;
+       drm_modeset_acquire_init(ctx, 0);
+ retry:
+       ret = drm_modeset_lock(&crtc->mutex, ctx);
+       if (ret)
+               goto fail;
+       WARN_ON(crtc->acquire_ctx);
+       /* now we hold the locks, so now that it is safe, stash the
+        * ctx for drm_modeset_unlock_crtc():
+        */
+       crtc->acquire_ctx = ctx;
+       return;
+ fail:
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(ctx);
+               goto retry;
+       }
+ }
+ EXPORT_SYMBOL(drm_modeset_lock_crtc);
+ /**
+  * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
+  * @crtc: drm crtc
+  *
+  * Legacy ioctl operations like cursor updates or page flips only have per-crtc
+  * locking, and store the acquire ctx in the corresponding crtc. All other
+  * legacy operations take all locks and use a global acquire context. This
+  * function grabs the right one.
+  */
+ struct drm_modeset_acquire_ctx *
+ drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
+ {
+       if (crtc->acquire_ctx)
+               return crtc->acquire_ctx;
+       WARN_ON(!crtc->dev->mode_config.acquire_ctx);
+       return crtc->dev->mode_config.acquire_ctx;
+ }
+ EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
+ /**
+  * drm_modeset_unlock_crtc - drop crtc lock
+  * @crtc: drm crtc
+  *
+  * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
+  * locks acquired through the hidden context.
+  */
+ void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
+ {
+       struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
+       if (WARN_ON(!ctx))
+               return;
+       crtc->acquire_ctx = NULL;
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+       kfree(ctx);
+ }
+ EXPORT_SYMBOL(drm_modeset_unlock_crtc);
+ /**
+  * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+  * @dev: device
+  *
+  * Useful as a debug assert.
+  */
+ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+ {
+       struct drm_crtc *crtc;
+       /* Locking is currently fubar in the panic handler. */
+       if (oops_in_progress)
+               return;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ }
+ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
  /**
   * drm_modeset_acquire_init - initialize acquire context
   * @ctx: the acquire context
@@@ -108,7 -314,12 +314,12 @@@ static inline int modeset_lock(struct d
  
        WARN_ON(ctx->contended);
  
-       if (interruptible && slow) {
+       if (ctx->trylock_only) {
+               if (!ww_mutex_trylock(&lock->mutex))
+                       return -EBUSY;
+               else
+                       return 0;
+       } else if (interruptible && slow) {
                ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
        } else if (interruptible) {
                ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
index 4b7ed52892173ea8fb595d7350f42eec20234591,c45856bcc8b910c568f611c011ee9df91e423326..593b657d3e59e21d3e73dac35086a64a850f8b69
@@@ -709,13 -709,11 +709,13 @@@ int i915_cmd_parser_init_ring(struct in
        BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
        BUG_ON(!validate_regs_sorted(ring));
  
 -      ret = init_hash_table(ring, cmd_tables, cmd_table_count);
 -      if (ret) {
 -              DRM_ERROR("CMD: cmd_parser_init failed!\n");
 -              fini_hash_table(ring);
 -              return ret;
 +      if (hash_empty(ring->cmd_hash)) {
 +              ret = init_hash_table(ring, cmd_tables, cmd_table_count);
 +              if (ret) {
 +                      DRM_ERROR("CMD: cmd_parser_init failed!\n");
 +                      fini_hash_table(ring);
 +                      return ret;
 +              }
        }
  
        ring->needs_cmd_parser = true;
@@@ -844,8 -842,6 +844,6 @@@ finish
   */
  bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
  {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        if (!ring->needs_cmd_parser)
                return false;
  
         * disabled. That will cause all of the parser's PPGTT checks to
         * fail. For now, disable parsing when PPGTT is off.
         */
-       if (!dev_priv->mm.aliasing_ppgtt)
+       if (USES_PPGTT(ring->dev))
                return false;
  
        return (i915.enable_cmd_parser == 1);
index e42925f76b4bb807393040e60c809fa8bc84e096,6f410cfb051000fb062d1578a5077f7557cbff6e..b672b843fd5e5831323094824116cffe912d8a55
  static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
  static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
  
- bool intel_enable_ppgtt(struct drm_device *dev, bool full)
- {
-       if (i915.enable_ppgtt == 0)
-               return false;
-       if (i915.enable_ppgtt == 1 && full)
-               return false;
-       return true;
- }
  static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
  {
        if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
@@@ -78,7 -67,6 +67,6 @@@ static void ppgtt_bind_vma(struct i915_
                           enum i915_cache_level cache_level,
                           u32 flags);
  static void ppgtt_unbind_vma(struct i915_vma *vma);
- static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
  
  static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
@@@ -216,19 -204,12 +204,12 @@@ static gen6_gtt_pte_t iris_pte_encode(d
  
  /* Broadwell Page Directory Pointer Descriptors */
  static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
-                          uint64_t val, bool synchronous)
+                          uint64_t val)
  {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
  
        BUG_ON(entry >= 4);
  
-       if (synchronous) {
-               I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
-               I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
-               return 0;
-       }
        ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
  }
  
  static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                         bool synchronous)
+                         struct intel_engine_cs *ring)
  {
        int i, ret;
  
  
        for (i = used_pd - 1; i >= 0; i--) {
                dma_addr_t addr = ppgtt->pd_dma_addr[i];
-               ret = gen8_write_pdp(ring, i, addr, synchronous);
+               ret = gen8_write_pdp(ring, i, addr);
                if (ret)
                        return ret;
        }
@@@ -403,9 -383,6 +383,6 @@@ static void gen8_ppgtt_cleanup(struct i
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
  
-       list_del(&vm->global_link);
-       drm_mm_takedown(&vm->mm);
        gen8_ppgtt_unmap_pages(ppgtt);
        gen8_ppgtt_free(ppgtt);
  }
@@@ -615,7 -592,6 +592,6 @@@ static int gen8_ppgtt_init(struct i915_
                kunmap_atomic(pd_vaddr);
        }
  
-       ppgtt->enable = gen8_ppgtt_enable;
        ppgtt->switch_mm = gen8_mm_switch;
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
@@@ -724,29 -700,10 +700,10 @@@ static uint32_t get_pd_offset(struct i9
  }
  
  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_engine_cs *ring,
-                        bool synchronous)
+                        struct intel_engine_cs *ring)
  {
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       /* If we're in reset, we can assume the GPU is sufficiently idle to
-        * manually frob these bits. Ideally we could use the ring functions,
-        * except our error handling makes it quite difficult (can't use
-        * intel_ring_begin, ring->flush, or intel_ring_advance)
-        *
-        * FIXME: We should try not to special case reset
-        */
-       if (synchronous ||
-           i915_reset_in_progress(&dev_priv->gpu_error)) {
-               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-               POSTING_READ(RING_PP_DIR_BASE(ring));
-               return 0;
-       }
        /* NB: TLBs must be flushed and invalidated before a switch */
        ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
  }
  
  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                         bool synchronous)
+                         struct intel_engine_cs *ring)
  {
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       /* If we're in reset, we can assume the GPU is sufficiently idle to
-        * manually frob these bits. Ideally we could use the ring functions,
-        * except our error handling makes it quite difficult (can't use
-        * intel_ring_begin, ring->flush, or intel_ring_advance)
-        *
-        * FIXME: We should try not to special case reset
-        */
-       if (synchronous ||
-           i915_reset_in_progress(&dev_priv->gpu_error)) {
-               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-               POSTING_READ(RING_PP_DIR_BASE(ring));
-               return 0;
-       }
        /* NB: TLBs must be flushed and invalidated before a switch */
        ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
  }
  
  static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                         bool synchronous)
+                         struct intel_engine_cs *ring)
  {
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       if (!synchronous)
-               return 0;
  
        I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
        I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
        return 0;
  }
  
- static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ static void gen8_ppgtt_enable(struct drm_device *dev)
  {
-       struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
-       int j, ret;
+       int j;
  
        for_each_ring(ring, dev_priv, j) {
                I915_WRITE(RING_MODE_GEN7(ring),
                           _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-               /* We promise to do a switch later with FULL PPGTT. If this is
-                * aliasing, this is the one and only switch we'll do */
-               if (USES_FULL_PPGTT(dev))
-                       continue;
-               ret = ppgtt->switch_mm(ppgtt, ring, true);
-               if (ret)
-                       goto err_out;
        }
-       return 0;
- err_out:
-       for_each_ring(ring, dev_priv, j)
-               I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
-       return ret;
  }
  
- static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ static void gen7_ppgtt_enable(struct drm_device *dev)
  {
-       struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
        uint32_t ecochk, ecobits;
        I915_WRITE(GAM_ECOCHK, ecochk);
  
        for_each_ring(ring, dev_priv, i) {
-               int ret;
                /* GFX_MODE is per-ring on gen7+ */
                I915_WRITE(RING_MODE_GEN7(ring),
                           _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-               /* We promise to do a switch later with FULL PPGTT. If this is
-                * aliasing, this is the one and only switch we'll do */
-               if (USES_FULL_PPGTT(dev))
-                       continue;
-               ret = ppgtt->switch_mm(ppgtt, ring, true);
-               if (ret)
-                       return ret;
        }
-       return 0;
  }
  
- static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ static void gen6_ppgtt_enable(struct drm_device *dev)
  {
-       struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
        uint32_t ecochk, gab_ctl, ecobits;
-       int i;
  
        ecobits = I915_READ(GAC_ECO_BITS);
        I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
        I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
  
        I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       for_each_ring(ring, dev_priv, i) {
-               int ret = ppgtt->switch_mm(ppgtt, ring, true);
-               if (ret)
-                       return ret;
-       }
-       return 0;
  }
  
  /* PPGTT support for Sandybdrige/Gen6 and later */
@@@ -1029,8 -922,6 +922,6 @@@ static void gen6_ppgtt_cleanup(struct i
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
  
-       list_del(&vm->global_link);
-       drm_mm_takedown(&ppgtt->base.mm);
        drm_mm_remove_node(&ppgtt->node);
  
        gen6_ppgtt_unmap_pages(ppgtt);
@@@ -1151,13 -1042,10 +1042,10 @@@ static int gen6_ppgtt_init(struct i915_
  
        ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        if (IS_GEN6(dev)) {
-               ppgtt->enable = gen6_ppgtt_enable;
                ppgtt->switch_mm = gen6_mm_switch;
        } else if (IS_HASWELL(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
                ppgtt->switch_mm = hsw_mm_switch;
        } else if (IS_GEN7(dev)) {
-               ppgtt->enable = gen7_ppgtt_enable;
                ppgtt->switch_mm = gen7_mm_switch;
        } else
                BUG();
                         ppgtt->node.size >> 20,
                         ppgtt->node.start / PAGE_SIZE);
  
+       gen6_write_pdes(ppgtt);
+       DRM_DEBUG("Adding PPGTT at offset %x\n",
+                 ppgtt->pd_offset << 10);
        return 0;
  }
  
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
  
        ppgtt->base.dev = dev;
        ppgtt->base.scratch = dev_priv->gtt.base.scratch;
  
        if (INTEL_INFO(dev)->gen < 8)
-               ret = gen6_ppgtt_init(ppgtt);
+               return gen6_ppgtt_init(ppgtt);
        else if (IS_GEN8(dev))
-               ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
+               return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
        else
                BUG();
+ }
+ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = 0;
  
-       if (!ret) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
+       ret = __hw_ppgtt_init(dev, ppgtt);
+       if (ret == 0) {
                kref_init(&ppgtt->ref);
                drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
                            ppgtt->base.total);
                i915_init_vm(dev_priv, &ppgtt->base);
-               if (INTEL_INFO(dev)->gen < 8) {
-                       gen6_write_pdes(ppgtt);
-                       DRM_DEBUG("Adding PPGTT at offset %x\n",
-                                 ppgtt->pd_offset << 10);
+       }
+       return ret;
+ }
+ int i915_ppgtt_init_hw(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       int i, ret = 0;
+       /* In the case of execlists, PPGTT is enabled by the context descriptor
+        * and the PDPs are contained within the context itself.  We don't
+        * need to do anything here. */
+       if (i915.enable_execlists)
+               return 0;
+       if (!USES_PPGTT(dev))
+               return 0;
+       if (IS_GEN6(dev))
+               gen6_ppgtt_enable(dev);
+       else if (IS_GEN7(dev))
+               gen7_ppgtt_enable(dev);
+       else if (INTEL_INFO(dev)->gen >= 8)
+               gen8_ppgtt_enable(dev);
+       else
+               WARN_ON(1);
+       if (ppgtt) {
+               for_each_ring(ring, dev_priv, i) {
+                       ret = ppgtt->switch_mm(ppgtt, ring);
+                       if (ret != 0)
+                               return ret;
                }
        }
  
        return ret;
  }
+ struct i915_hw_ppgtt *
+ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+ {
+       struct i915_hw_ppgtt *ppgtt;
+       int ret;
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+       ret = i915_ppgtt_init(dev, ppgtt);
+       if (ret) {
+               kfree(ppgtt);
+               return ERR_PTR(ret);
+       }
+       ppgtt->file_priv = fpriv;
+       return ppgtt;
+ }
+ void  i915_ppgtt_release(struct kref *kref)
+ {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(kref, struct i915_hw_ppgtt, ref);
+       /* vmas should already be unbound */
+       WARN_ON(!list_empty(&ppgtt->base.active_list));
+       WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+       list_del(&ppgtt->base.global_link);
+       drm_mm_takedown(&ppgtt->base.mm);
+       ppgtt->base.cleanup(&ppgtt->base);
+       kfree(ppgtt);
+ }
  
  static void
  ppgtt_bind_vma(struct i915_vma *vma,
@@@ -1310,16 -1273,6 +1273,16 @@@ void i915_check_and_clear_faults(struc
        POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
  }
  
 +static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
 +{
 +      if (INTEL_INFO(dev_priv->dev)->gen < 6) {
 +              intel_gtt_chipset_flush();
 +      } else {
 +              I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 +              POSTING_READ(GFX_FLSH_CNTL_GEN6);
 +      }
 +}
 +
  void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                                       dev_priv->gtt.base.start,
                                       dev_priv->gtt.base.total,
                                       true);
 +
 +      i915_ggtt_flush(dev_priv);
  }
  
  void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
        }
  
 -      i915_gem_chipset_flush(dev);
 +      i915_ggtt_flush(dev_priv);
  }
  
  int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@@ -1687,10 -1638,10 +1650,10 @@@ static void i915_gtt_color_adjust(struc
        }
  }
  
void i915_gem_setup_global_gtt(struct drm_device *dev,
-                              unsigned long start,
-                              unsigned long mappable_end,
-                              unsigned long end)
int i915_gem_setup_global_gtt(struct drm_device *dev,
+                             unsigned long start,
+                             unsigned long mappable_end,
+                             unsigned long end)
  {
        /* Let GEM Manage all of the aperture.
         *
        struct drm_mm_node *entry;
        struct drm_i915_gem_object *obj;
        unsigned long hole_start, hole_end;
+       int ret;
  
        BUG_ON(mappable_end > end);
  
        /* Mark any preallocated objects as occupied */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
-               int ret;
                DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
                              i915_gem_obj_ggtt_offset(obj), obj->base.size);
  
                WARN_ON(i915_gem_obj_ggtt_bound(obj));
                ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
-               if (ret)
-                       DRM_DEBUG_KMS("Reservation failed\n");
+               if (ret) {
+                       DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
+                       return ret;
+               }
                obj->has_global_gtt_mapping = 1;
        }
  
  
        /* And finally clear the reserved guard page */
        ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+       if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+               struct i915_hw_ppgtt *ppgtt;
+               ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+               if (!ppgtt)
+                       return -ENOMEM;
+               ret = __hw_ppgtt_init(dev, ppgtt);
+               if (ret != 0)
+                       return ret;
+               dev_priv->mm.aliasing_ppgtt = ppgtt;
+       }
+       return 0;
  }
  
  void i915_gem_init_global_gtt(struct drm_device *dev)
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
  }
  
+ void i915_global_gtt_cleanup(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
+       if (dev_priv->mm.aliasing_ppgtt) {
+               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+               ppgtt->base.cleanup(&ppgtt->base);
+       }
+       if (drm_mm_initialized(&vm->mm)) {
+               drm_mm_takedown(&vm->mm);
+               list_del(&vm->global_link);
+       }
+       vm->cleanup(vm);
+ }
  static int setup_scratch_page(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -2022,10 -2011,6 +2023,6 @@@ static void gen6_gmch_remove(struct i91
  
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
  
-       if (drm_mm_initialized(&vm->mm)) {
-               drm_mm_takedown(&vm->mm);
-               list_del(&vm->global_link);
-       }
        iounmap(gtt->gsm);
        teardown_scratch_page(vm->dev);
  }
@@@ -2058,10 -2043,6 +2055,6 @@@ static int i915_gmch_probe(struct drm_d
  
  static void i915_gmch_remove(struct i915_address_space *vm)
  {
-       if (drm_mm_initialized(&vm->mm)) {
-               drm_mm_takedown(&vm->mm);
-               list_del(&vm->global_link);
-       }
        intel_gmch_remove();
  }
  
@@@ -2160,8 -2141,10 +2153,10 @@@ static struct i915_vma *__i915_gem_vma_
        /* Keep GGTT vmas first to make debug easier */
        if (i915_is_ggtt(vm))
                list_add(&vma->vma_link, &obj->vma_list);
-       else
+       else {
                list_add_tail(&vma->vma_link, &obj->vma_list);
+               i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+       }
  
        return vma;
  }
index 9842fd2e742a40d0b83c51f2e9f2dd012e1bc304,139f490d464df8ae5ac1c400c877d3cb67b23988..c91cb2033cc5ec02c248b95271fac95469e1acde
@@@ -35,6 -35,7 +35,7 @@@ struct i915_params i915 __read_mostly 
        .vbt_sdvo_panel_type = -1,
        .enable_rc6 = -1,
        .enable_fbc = -1,
+       .enable_execlists = 0,
        .enable_hangcheck = true,
        .enable_ppgtt = -1,
        .enable_psr = 0,
@@@ -66,12 -67,12 +67,12 @@@ module_param_named(powersave, i915.powe
  MODULE_PARM_DESC(powersave,
        "Enable powersavings, fbc, downclocking, etc. (default: true)");
  
 -module_param_named(semaphores, i915.semaphores, int, 0400);
 +module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
  MODULE_PARM_DESC(semaphores,
        "Use semaphores for inter-ring sync "
        "(default: -1 (use per-chip defaults))");
  
 -module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
 +module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
  MODULE_PARM_DESC(enable_rc6,
        "Enable power-saving render C-state 6. "
        "Different stages can be selected via bitmask values "
@@@ -79,7 -80,7 +80,7 @@@
        "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
        "default: -1 (use per-chip default)");
  
 -module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
 +module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
  MODULE_PARM_DESC(enable_fbc,
        "Enable frame buffer compression for power savings "
        "(default: -1 (use per-chip default))");
@@@ -113,11 -114,16 +114,16 @@@ MODULE_PARM_DESC(enable_hangcheck
        "WARNING: Disabling this can cause system wide hangs. "
        "(default: true)");
  
 -module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
 +module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
  MODULE_PARM_DESC(enable_ppgtt,
        "Override PPGTT usage. "
        "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
  
+ module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
+ MODULE_PARM_DESC(enable_execlists,
+       "Override execlists usage. "
+       "(-1=auto, 0=disabled [default], 1=enabled)");
  module_param_named(enable_psr, i915.enable_psr, int, 0600);
  MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
  
index afcc8dd40bdd437fa32c113d6aa966fb87bf3a1c,b3e579b4428e5989f0404813621a0750cc859abb..a4bd90f36a03396ea7e04854b2520a38e73918b2
@@@ -627,16 -627,16 +627,16 @@@ parse_edp(struct drm_i915_private *dev_
  
        switch (edp_link_params->preemphasis) {
        case EDP_PREEMPHASIS_NONE:
-               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
                break;
        case EDP_PREEMPHASIS_3_5dB:
-               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
                break;
        case EDP_PREEMPHASIS_6dB:
-               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
                break;
        case EDP_PREEMPHASIS_9_5dB:
-               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+               dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
                break;
        default:
                DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
  
        switch (edp_link_params->vswing) {
        case EDP_VSWING_0_4V:
-               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
+               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
                break;
        case EDP_VSWING_0_6V:
-               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
+               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
                break;
        case EDP_VSWING_0_8V:
-               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
+               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
                break;
        case EDP_VSWING_1_2V:
-               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+               dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
                break;
        default:
                DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@@ -946,7 -946,7 +946,7 @@@ static void parse_ddi_port(struct drm_i
                DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
                              port_name(port));
        if (is_dvi && (port == PORT_A || port == PORT_E))
 -              DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
 +              DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
        if (!is_dvi && !is_dp && !is_crt)
                DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
                              port_name(port));
        if (bdb->version >= 158) {
                /* The VBT HDMI level shift values match the table we have. */
                hdmi_level_shift = child->raw[7] & 0xF;
-               if (hdmi_level_shift < 0xC) {
-                       DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
-                                     port_name(port),
-                                     hdmi_level_shift);
-                       info->hdmi_level_shift = hdmi_level_shift;
-               }
+               DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+                             port_name(port),
+                             hdmi_level_shift);
+               info->hdmi_level_shift = hdmi_level_shift;
        }
  }
  
@@@ -1114,8 -1112,7 +1112,7 @@@ init_vbt_defaults(struct drm_i915_priva
                struct ddi_vbt_port_info *info =
                        &dev_priv->vbt.ddi_port_info[port];
  
-               /* Recommended BSpec default: 800mV 0dB. */
-               info->hdmi_level_shift = 6;
+               info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
  
                info->supports_dvi = (port != PORT_A && port != PORT_E);
                info->supports_hdmi = info->supports_dvi;
index fdff1d420c14ab3536717607975c6cfa273fb474,59754451ae504a38e1b4064066172647ca528c71..f6a3fdd5589e33420e2f9b15ef4de42193eea356
@@@ -111,7 -111,7 +111,7 @@@ static struct intel_dp *intel_attached_
  }
  
  static void intel_dp_link_down(struct intel_dp *intel_dp);
- static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
+ static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
  static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  
  int
@@@ -290,32 -290,201 +290,201 @@@ intel_dp_init_panel_power_sequencer_reg
                                              struct intel_dp *intel_dp,
                                              struct edp_power_seq *out);
  
+ static void pps_lock(struct intel_dp *intel_dp)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
+       /*
+        * See vlv_power_sequencer_reset() why we need
+        * a power domain reference here.
+        */
+       power_domain = intel_display_port_power_domain(encoder);
+       intel_display_power_get(dev_priv, power_domain);
+       mutex_lock(&dev_priv->pps_mutex);
+ }
+ static void pps_unlock(struct intel_dp *intel_dp)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
+       mutex_unlock(&dev_priv->pps_mutex);
+       power_domain = intel_display_port_power_domain(encoder);
+       intel_display_power_put(dev_priv, power_domain);
+ }
  static enum pipe
  vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum port port = intel_dig_port->port;
-       enum pipe pipe;
+       struct intel_encoder *encoder;
+       unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+       struct edp_power_seq power_seq;
+       lockdep_assert_held(&dev_priv->pps_mutex);
  
-       /* modeset should have pipe */
-       if (crtc)
-               return to_intel_crtc(crtc)->pipe;
+       if (intel_dp->pps_pipe != INVALID_PIPE)
+               return intel_dp->pps_pipe;
+       /*
+        * We don't have power sequencer currently.
+        * Pick one that's not used by other ports.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+                           base.head) {
+               struct intel_dp *tmp;
+               if (encoder->type != INTEL_OUTPUT_EDP)
+                       continue;
+               tmp = enc_to_intel_dp(&encoder->base);
+               if (tmp->pps_pipe != INVALID_PIPE)
+                       pipes &= ~(1 << tmp->pps_pipe);
+       }
+       /*
+        * Didn't find one. This should not happen since there
+        * are two power sequencers and up to two eDP ports.
+        */
+       if (WARN_ON(pipes == 0))
+               return PIPE_A;
+       intel_dp->pps_pipe = ffs(pipes) - 1;
+       DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+                     pipe_name(intel_dp->pps_pipe),
+                     port_name(intel_dig_port->port));
+       /* init power sequencer on this pipe and port */
+       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                     &power_seq);
+       return intel_dp->pps_pipe;
+ }
+ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+                              enum pipe pipe);
+ static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+                              enum pipe pipe)
+ {
+       return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
+ }
+ static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+                               enum pipe pipe)
+ {
+       return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ }
+ static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+                        enum pipe pipe)
+ {
+       return true;
+ }
+ static enum pipe
+ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+                    enum port port,
+                    vlv_pipe_check pipe_check)
+ {
+       enum pipe pipe;
  
-       /* init time, try to find a pipe with this port selected */
        for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
                u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
                        PANEL_PORT_SELECT_MASK;
-               if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
-                       return pipe;
-               if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
-                       return pipe;
+               if (port_sel != PANEL_PORT_SELECT_VLV(port))
+                       continue;
+               if (!pipe_check(dev_priv, pipe))
+                       continue;
+               return pipe;
        }
  
-       /* shrug */
-       return PIPE_A;
+       return INVALID_PIPE;
+ }
+ static void
+ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct edp_power_seq power_seq;
+       enum port port = intel_dig_port->port;
+       lockdep_assert_held(&dev_priv->pps_mutex);
+       /* try to find a pipe with this port selected */
+       /* first pick one where the panel is on */
+       intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+                                                 vlv_pipe_has_pp_on);
+       /* didn't find one? pick one where vdd is on */
+       if (intel_dp->pps_pipe == INVALID_PIPE)
+               intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+                                                         vlv_pipe_has_vdd_on);
+       /* didn't find one? pick one with just the correct port */
+       if (intel_dp->pps_pipe == INVALID_PIPE)
+               intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+                                                         vlv_pipe_any);
+       /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+       if (intel_dp->pps_pipe == INVALID_PIPE) {
+               DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
+                             port_name(port));
+               return;
+       }
+       DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
+                     port_name(port), pipe_name(intel_dp->pps_pipe));
+       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                     &power_seq);
+ }
+ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_encoder *encoder;
+       if (WARN_ON(!IS_VALLEYVIEW(dev)))
+               return;
+       /*
+        * We can't grab pps_mutex here due to deadlock with power_domain
+        * mutex when power_domain functions are called while holding pps_mutex.
+        * That also means that in order to use pps_pipe the code needs to
+        * hold both a power domain reference and pps_mutex, and the power domain
+        * reference get/put must be done while _not_ holding pps_mutex.
+        * pps_{lock,unlock}() do these steps in the correct order, so one
+        * should use them always.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+               struct intel_dp *intel_dp;
+               if (encoder->type != INTEL_OUTPUT_EDP)
+                       continue;
+               intel_dp = enc_to_intel_dp(&encoder->base);
+               intel_dp->pps_pipe = INVALID_PIPE;
+       }
  }
  
  static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
@@@ -349,12 -518,15 +518,15 @@@ static int edp_notify_handler(struct no
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_div;
        u32 pp_ctrl_reg, pp_div_reg;
-       enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  
        if (!is_edp(intel_dp) || code != SYS_RESTART)
                return 0;
  
+       pps_lock(intel_dp);
        if (IS_VALLEYVIEW(dev)) {
+               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
                pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
                pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
                pp_div = I915_READ(pp_div_reg);
                msleep(intel_dp->panel_power_cycle_delay);
        }
  
+       pps_unlock(intel_dp);
        return 0;
  }
  
@@@ -374,6 -548,8 +548,8 @@@ static bool edp_have_panel_power(struc
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
+       lockdep_assert_held(&dev_priv->pps_mutex);
        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  }
  
@@@ -381,13 -557,10 +557,10 @@@ static bool edp_have_panel_vdd(struct i
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       enum intel_display_power_domain power_domain;
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       return intel_display_power_enabled(dev_priv, power_domain) &&
-              (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
+       lockdep_assert_held(&dev_priv->pps_mutex);
+       return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
  }
  
  static void
@@@ -535,7 -708,15 +708,15 @@@ intel_dp_aux_ch(struct intel_dp *intel_
        bool has_aux_irq = HAS_AUX_IRQ(dev);
        bool vdd;
  
-       vdd = _edp_panel_vdd_on(intel_dp);
+       pps_lock(intel_dp);
+       /*
+        * We will be called with VDD already enabled for dpcd/edid/oui reads.
+        * In such cases we want to leave VDD enabled and it's up to upper layers
+        * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+        * ourselves.
+        */
+       vdd = edp_panel_vdd_on(intel_dp);
  
        /* dp aux is extremely sensitive to irq latency, hence request the
         * lowest possible wakeup latency and so prevent the cpu from going into
@@@ -644,6 -825,8 +825,8 @@@ out
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
  
+       pps_unlock(intel_dp);
        return ret;
  }
  
@@@ -828,20 -1011,6 +1011,6 @@@ intel_dp_set_clock(struct intel_encode
        }
  }
  
- static void
- intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
- {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum transcoder transcoder = crtc->config.cpu_transcoder;
-       I915_WRITE(PIPE_DATA_M2(transcoder),
-               TU_SIZE(m_n->tu) | m_n->gmch_m);
-       I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
-       I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
-       I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
- }
  bool
  intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_config *pipe_config)
                pipe_config->has_pch_encoder = true;
  
        pipe_config->has_dp_encoder = true;
+       pipe_config->has_drrs = false;
        pipe_config->has_audio = intel_dp->has_audio;
  
        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
                        bpp = dev_priv->vbt.edp_bpp;
                }
  
-               if (IS_BROADWELL(dev)) {
-                       /* Yes, it's an ugly hack. */
-                       min_lane_count = max_lane_count;
-                       DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
-                                     min_lane_count);
-               } else if (dev_priv->vbt.edp_lanes) {
-                       min_lane_count = min(dev_priv->vbt.edp_lanes,
-                                            max_lane_count);
-                       DRM_DEBUG_KMS("using min %u lanes per VBT\n",
-                                     min_lane_count);
-               }
-               if (dev_priv->vbt.edp_rate) {
-                       min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
-                       DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
-                                     bws[min_clock]);
-               }
+               /*
+                * Use the maximum clock and number of lanes the eDP panel
+                * advertizes being capable of. The panels are generally
+                * designed to support only a single clock and lane
+                * configuration, and typically these values correspond to the
+                * native resolution of the panel.
+                */
+               min_lane_count = max_lane_count;
+               min_clock = max_clock;
        }
  
        for (; bpp >= 6*3; bpp -= 2*3) {
@@@ -970,13 -1132,14 +1132,14 @@@ found
  
        if (intel_connector->panel.downclock_mode != NULL &&
                intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+                       pipe_config->has_drrs = true;
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
                                &pipe_config->dp_m2_n2);
        }
  
-       if (HAS_DDI(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
        else
                intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@@ -1110,6 -1273,8 +1273,8 @@@ static void wait_panel_status(struct in
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_stat_reg, pp_ctrl_reg;
  
+       lockdep_assert_held(&dev_priv->pps_mutex);
        pp_stat_reg = _pp_stat_reg(intel_dp);
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  
@@@ -1173,13 -1338,20 +1338,20 @@@ static  u32 ironlake_get_pp_control(str
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 control;
  
+       lockdep_assert_held(&dev_priv->pps_mutex);
        control = I915_READ(_pp_ctrl_reg(intel_dp));
        control &= ~PANEL_UNLOCK_MASK;
        control |= PANEL_UNLOCK_REGS;
        return control;
  }
  
- static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
+ /*
+  * Must be paired with edp_panel_vdd_off().
+  * Must hold pps_mutex around the whole on/off sequence.
+  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
+  */
+ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        u32 pp_stat_reg, pp_ctrl_reg;
        bool need_to_disable = !intel_dp->want_panel_vdd;
  
+       lockdep_assert_held(&dev_priv->pps_mutex);
        if (!is_edp(intel_dp))
                return false;
  
        return need_to_disable;
  }
  
+ /*
+  * Must be paired with intel_edp_panel_vdd_off() or
+  * intel_edp_panel_off().
+  * Nested calls to these functions are not allowed since
+  * we drop the lock. Caller must use some higher level
+  * locking to prevent nested calls from other threads.
+  */
  void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
  {
-       if (is_edp(intel_dp)) {
-               bool vdd = _edp_panel_vdd_on(intel_dp);
+       bool vdd;
  
-               WARN(!vdd, "eDP VDD already requested on\n");
-       }
+       if (!is_edp(intel_dp))
+               return;
+       pps_lock(intel_dp);
+       vdd = edp_panel_vdd_on(intel_dp);
+       pps_unlock(intel_dp);
+       WARN(!vdd, "eDP VDD already requested on\n");
  }
  
  static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_digital_port *intel_dig_port =
+               dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       enum intel_display_power_domain power_domain;
        u32 pp;
        u32 pp_stat_reg, pp_ctrl_reg;
  
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->pps_mutex);
  
-       if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
-               struct intel_digital_port *intel_dig_port =
-                                               dp_to_dig_port(intel_dp);
-               struct intel_encoder *intel_encoder = &intel_dig_port->base;
-               enum intel_display_power_domain power_domain;
+       WARN_ON(intel_dp->want_panel_vdd);
  
-               DRM_DEBUG_KMS("Turning eDP VDD off\n");
+       if (!edp_have_panel_vdd(intel_dp))
+               return;
  
-               pp = ironlake_get_pp_control(intel_dp);
-               pp &= ~EDP_FORCE_VDD;
+       DRM_DEBUG_KMS("Turning eDP VDD off\n");
  
-               pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-               pp_stat_reg = _pp_stat_reg(intel_dp);
+       pp = ironlake_get_pp_control(intel_dp);
+       pp &= ~EDP_FORCE_VDD;
  
-               I915_WRITE(pp_ctrl_reg, pp);
-               POSTING_READ(pp_ctrl_reg);
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+       pp_stat_reg = _pp_stat_reg(intel_dp);
  
-               /* Make sure sequencer is idle before allowing subsequent activity */
-               DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
-               I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
  
-               if ((pp & POWER_TARGET_ON) == 0)
-                       intel_dp->last_power_cycle = jiffies;
+       /* Make sure sequencer is idle before allowing subsequent activity */
+       DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+       I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  
-               power_domain = intel_display_port_power_domain(intel_encoder);
-               intel_display_power_put(dev_priv, power_domain);
-       }
+       if ((pp & POWER_TARGET_ON) == 0)
+               intel_dp->last_power_cycle = jiffies;
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_put(dev_priv, power_domain);
  }
  
  static void edp_panel_vdd_work(struct work_struct *__work)
  {
        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
                                                 struct intel_dp, panel_vdd_work);
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
  
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       edp_panel_vdd_off_sync(intel_dp);
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+       pps_lock(intel_dp);
+       if (!intel_dp->want_panel_vdd)
+               edp_panel_vdd_off_sync(intel_dp);
+       pps_unlock(intel_dp);
  }
  
  static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
        schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
  }
  
+ /*
+  * Must be paired with edp_panel_vdd_on().
+  * Must hold pps_mutex around the whole on/off sequence.
+  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
+  */
  static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  {
+       struct drm_i915_private *dev_priv =
+               intel_dp_to_dev(intel_dp)->dev_private;
+       lockdep_assert_held(&dev_priv->pps_mutex);
        if (!is_edp(intel_dp))
                return;
  
                edp_panel_vdd_schedule_off(intel_dp);
  }
  
+ /*
+  * Must be paired with intel_edp_panel_vdd_on().
+  * Nested calls to these functions are not allowed since
+  * we drop the lock. Caller must use some higher level
+  * locking to prevent nested calls from other threads.
+  */
+ static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+ {
+       if (!is_edp(intel_dp))
+               return;
+       pps_lock(intel_dp);
+       edp_panel_vdd_off(intel_dp, sync);
+       pps_unlock(intel_dp);
+ }
  void intel_edp_panel_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
  
        DRM_DEBUG_KMS("Turn eDP power on\n");
  
+       pps_lock(intel_dp);
        if (edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP power already on\n");
-               return;
+               goto out;
        }
  
        wait_panel_power_cycle(intel_dp);
                I915_WRITE(pp_ctrl_reg, pp);
                POSTING_READ(pp_ctrl_reg);
        }
+  out:
+       pps_unlock(intel_dp);
  }
  
  void intel_edp_panel_off(struct intel_dp *intel_dp)
  
        DRM_DEBUG_KMS("Turn eDP power off\n");
  
+       pps_lock(intel_dp);
        WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
  
        pp = ironlake_get_pp_control(intel_dp);
        /* We got a reference when we enabled the VDD. */
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_put(dev_priv, power_domain);
+       pps_unlock(intel_dp);
  }
  
- void intel_edp_backlight_on(struct intel_dp *intel_dp)
+ /* Enable backlight in the panel power control. */
+ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        u32 pp;
        u32 pp_ctrl_reg;
  
-       if (!is_edp(intel_dp))
-               return;
-       DRM_DEBUG_KMS("\n");
-       intel_panel_enable_backlight(intel_dp->attached_connector);
        /*
         * If we enable the backlight right away following a panel power
         * on, we may see slight flicker as the panel syncs with the eDP
         * allowing it to appear.
         */
        wait_backlight_on(intel_dp);
+       pps_lock(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
  
  
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
+       pps_unlock(intel_dp);
  }
  
- void intel_edp_backlight_off(struct intel_dp *intel_dp)
+ /* Enable backlight PWM and backlight PP control. */
+ void intel_edp_backlight_on(struct intel_dp *intel_dp)
+ {
+       if (!is_edp(intel_dp))
+               return;
+       DRM_DEBUG_KMS("\n");
+       intel_panel_enable_backlight(intel_dp->attached_connector);
+       _intel_edp_backlight_on(intel_dp);
+ }
+ /* Disable backlight in the panel power control. */
+ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        if (!is_edp(intel_dp))
                return;
  
-       DRM_DEBUG_KMS("\n");
+       pps_lock(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_BLC_ENABLE;
  
  
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
-       intel_dp->last_backlight_off = jiffies;
  
+       pps_unlock(intel_dp);
+       intel_dp->last_backlight_off = jiffies;
        edp_wait_backlight_off(intel_dp);
+ }
  
+ /* Disable backlight PP control and backlight PWM. */
+ void intel_edp_backlight_off(struct intel_dp *intel_dp)
+ {
+       if (!is_edp(intel_dp))
+               return;
+       DRM_DEBUG_KMS("\n");
+       _intel_edp_backlight_off(intel_dp);
        intel_panel_disable_backlight(intel_dp->attached_connector);
  }
  
+ /*
+  * Hook for controlling the panel power control backlight through the bl_power
+  * sysfs attribute. Take care to handle multiple calls.
+  */
+ static void intel_edp_backlight_power(struct intel_connector *connector,
+                                     bool enable)
+ {
+       struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+       bool is_enabled;
+       pps_lock(intel_dp);
+       is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+       pps_unlock(intel_dp);
+       if (is_enabled == enable)
+               return;
+       DRM_DEBUG_KMS("panel power control backlight %s\n",
+                     enable ? "enable" : "disable");
+       if (enable)
+               _intel_edp_backlight_on(intel_dp);
+       else
+               _intel_edp_backlight_off(intel_dp);
+ }
  static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@@ -1515,8 -1789,6 +1789,6 @@@ void intel_dp_sink_dpms(struct intel_d
        if (mode != DRM_MODE_DPMS_ON) {
                ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
                                         DP_SET_POWER_D3);
-               if (ret != 1)
-                       DRM_DEBUG_DRIVER("failed to write sink power state\n");
        } else {
                /*
                 * When turning on, we need to retry for 1ms to give the sink
                        msleep(1);
                }
        }
+       if (ret != 1)
+               DRM_DEBUG_KMS("failed to %s sink power state\n",
+                             mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
  }
  
  static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                        return true;
                }
  
-               for_each_pipe(i) {
+               for_each_pipe(dev_priv, i) {
                        trans_dp = I915_READ(TRANS_DP_CTL(i));
                        if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
                                *pipe = i;
@@@ -1631,10 -1907,6 +1907,10 @@@ static void intel_dp_get_config(struct 
  
        pipe_config->adjusted_mode.flags |= flags;
  
 +      if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
 +          tmp & DP_COLOR_RANGE_16_235)
 +              pipe_config->limited_color_range = true;
 +
        pipe_config->has_dp_encoder = true;
  
        intel_dp_get_m_n(crtc, pipe_config);
@@@ -2036,7 -2308,6 +2312,6 @@@ void intel_edp_psr_init(struct drm_devi
  static void intel_disable_dp(struct intel_encoder *encoder)
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       enum port port = dp_to_dig_port(intel_dp)->port;
        struct drm_device *dev = encoder->base.dev;
  
        /* Make sure the panel is off before trying to change the mode. But also
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        intel_edp_panel_off(intel_dp);
  
-       /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
-       if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
+       /* disable the port before the pipe on g4x */
+       if (INTEL_INFO(dev)->gen < 5)
                intel_dp_link_down(intel_dp);
  }
  
- static void g4x_post_disable_dp(struct intel_encoder *encoder)
+ static void ilk_post_disable_dp(struct intel_encoder *encoder)
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
  
-       if (port != PORT_A)
-               return;
        intel_dp_link_down(intel_dp);
-       ironlake_edp_pll_off(intel_dp);
+       if (port == PORT_A)
+               ironlake_edp_pll_off(intel_dp);
  }
  
  static void vlv_post_disable_dp(struct intel_encoder *encoder)
@@@ -2103,7 -2372,105 +2376,105 @@@ static void chv_post_disable_dp(struct 
        val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  
-       mutex_unlock(&dev_priv->dpio_lock);
+       mutex_unlock(&dev_priv->dpio_lock);
+ }
+ static void
+ _intel_dp_set_link_train(struct intel_dp *intel_dp,
+                        uint32_t *DP,
+                        uint8_t dp_train_pat)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
+       if (HAS_DDI(dev)) {
+               uint32_t temp = I915_READ(DP_TP_CTL(port));
+               if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+                       temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+               else
+                       temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               case DP_TRAINING_PATTERN_DISABLE:
+                       temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+                       break;
+               case DP_TRAINING_PATTERN_1:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+                       break;
+               case DP_TRAINING_PATTERN_2:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+                       break;
+               case DP_TRAINING_PATTERN_3:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+                       break;
+               }
+               I915_WRITE(DP_TP_CTL(port), temp);
+       } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+               *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               case DP_TRAINING_PATTERN_DISABLE:
+                       *DP |= DP_LINK_TRAIN_OFF_CPT;
+                       break;
+               case DP_TRAINING_PATTERN_1:
+                       *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+                       break;
+               case DP_TRAINING_PATTERN_2:
+                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+                       break;
+               case DP_TRAINING_PATTERN_3:
+                       DRM_ERROR("DP training pattern 3 not supported\n");
+                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+                       break;
+               }
+       } else {
+               if (IS_CHERRYVIEW(dev))
+                       *DP &= ~DP_LINK_TRAIN_MASK_CHV;
+               else
+                       *DP &= ~DP_LINK_TRAIN_MASK;
+               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               case DP_TRAINING_PATTERN_DISABLE:
+                       *DP |= DP_LINK_TRAIN_OFF;
+                       break;
+               case DP_TRAINING_PATTERN_1:
+                       *DP |= DP_LINK_TRAIN_PAT_1;
+                       break;
+               case DP_TRAINING_PATTERN_2:
+                       *DP |= DP_LINK_TRAIN_PAT_2;
+                       break;
+               case DP_TRAINING_PATTERN_3:
+                       if (IS_CHERRYVIEW(dev)) {
+                               *DP |= DP_LINK_TRAIN_PAT_3_CHV;
+                       } else {
+                               DRM_ERROR("DP training pattern 3 not supported\n");
+                               *DP |= DP_LINK_TRAIN_PAT_2;
+                       }
+                       break;
+               }
+       }
+ }
+ static void intel_dp_enable_port(struct intel_dp *intel_dp)
+ {
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       intel_dp->DP |= DP_PORT_EN;
+       /* enable with pattern 1 (as per spec) */
+       _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
+                                DP_TRAINING_PATTERN_1);
+       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+       POSTING_READ(intel_dp->output_reg);
  }
  
  static void intel_enable_dp(struct intel_encoder *encoder)
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
  
+       intel_dp_enable_port(intel_dp);
        intel_edp_panel_vdd_on(intel_dp);
+       intel_edp_panel_on(intel_dp);
+       intel_edp_panel_vdd_off(intel_dp, true);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
-       intel_edp_panel_on(intel_dp);
-       edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
  }
@@@ -2154,6 -2522,78 +2526,78 @@@ static void g4x_pre_enable_dp(struct in
        }
  }
  
+ static void vlv_steal_power_sequencer(struct drm_device *dev,
+                                     enum pipe pipe)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *encoder;
+       lockdep_assert_held(&dev_priv->pps_mutex);
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+                           base.head) {
+               struct intel_dp *intel_dp;
+               enum port port;
+               if (encoder->type != INTEL_OUTPUT_EDP)
+                       continue;
+               intel_dp = enc_to_intel_dp(&encoder->base);
+               port = dp_to_dig_port(intel_dp)->port;
+               if (intel_dp->pps_pipe != pipe)
+                       continue;
+               DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
+                             pipe_name(pipe), port_name(port));
+               /* make sure vdd is off before we steal it */
+               edp_panel_vdd_off_sync(intel_dp);
+               intel_dp->pps_pipe = INVALID_PIPE;
+       }
+ }
+ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct edp_power_seq power_seq;
+       lockdep_assert_held(&dev_priv->pps_mutex);
+       if (intel_dp->pps_pipe == crtc->pipe)
+               return;
+       /*
+        * If another power sequencer was being used on this
+        * port previously make sure to turn off vdd there while
+        * we still have control of it.
+        */
+       if (intel_dp->pps_pipe != INVALID_PIPE)
+               edp_panel_vdd_off_sync(intel_dp);
+       /*
+        * We may be stealing the power
+        * sequencer from another port.
+        */
+       vlv_steal_power_sequencer(dev, crtc->pipe);
+       /* now it's all ours */
+       intel_dp->pps_pipe = crtc->pipe;
+       DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
+                     pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
+       /* init power sequencer on this pipe and port */
+       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                     &power_seq);
+ }
  static void vlv_pre_enable_dp(struct intel_encoder *encoder)
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
-       struct edp_power_seq power_seq;
        u32 val;
  
        mutex_lock(&dev_priv->dpio_lock);
        mutex_unlock(&dev_priv->dpio_lock);
  
        if (is_edp(intel_dp)) {
-               /* init power sequencer on this pipe and port */
-               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                             &power_seq);
+               pps_lock(intel_dp);
+               vlv_init_panel_power_sequencer(intel_dp);
+               pps_unlock(intel_dp);
        }
  
        intel_enable_dp(encoder);
@@@ -2229,7 -2667,6 +2671,6 @@@ static void chv_pre_enable_dp(struct in
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_power_seq power_seq;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel ch = vlv_dport_to_channel(dport);
        mutex_unlock(&dev_priv->dpio_lock);
  
        if (is_edp(intel_dp)) {
-               /* init power sequencer on this pipe and port */
-               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                             &power_seq);
+               pps_lock(intel_dp);
+               vlv_init_panel_power_sequencer(intel_dp);
+               pps_unlock(intel_dp);
        }
  
        intel_enable_dp(encoder);
@@@ -2297,6 -2733,8 +2737,8 @@@ static void chv_dp_pre_pll_enable(struc
        enum pipe pipe = intel_crtc->pipe;
        u32 val;
  
+       intel_dp_prepare(encoder);
        mutex_lock(&dev_priv->dpio_lock);
  
        /* program left/right clock distribution */
@@@ -2395,13 -2833,13 +2837,13 @@@ intel_dp_voltage_max(struct intel_dp *i
        enum port port = dp_to_dig_port(intel_dp)->port;
  
        if (IS_VALLEYVIEW(dev))
-               return DP_TRAIN_VOLTAGE_SWING_1200;
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
        else if (IS_GEN7(dev) && port == PORT_A)
-               return DP_TRAIN_VOLTAGE_SWING_800;
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
        else if (HAS_PCH_CPT(dev) && port != PORT_A)
-               return DP_TRAIN_VOLTAGE_SWING_1200;
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
        else
-               return DP_TRAIN_VOLTAGE_SWING_800;
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  }
  
  static uint8_t
@@@ -2412,49 -2850,49 +2854,49 @@@ intel_dp_pre_emphasis_max(struct intel_
  
        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-                       return DP_TRAIN_PRE_EMPHASIS_9_5;
-               case DP_TRAIN_VOLTAGE_SWING_600:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
        } else if (IS_VALLEYVIEW(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-                       return DP_TRAIN_PRE_EMPHASIS_9_5;
-               case DP_TRAIN_VOLTAGE_SWING_600:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
        } else if (IS_GEN7(dev) && port == PORT_A) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_600:
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
                default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
        } else {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_600:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
        }
  }
@@@ -2473,22 -2911,22 +2915,22 @@@ static uint32_t intel_vlv_signal_levels
        int pipe = intel_crtc->pipe;
  
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
-       case DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_PRE_EMPH_LEVEL_0:
                preemph_reg_value = 0x0004000;
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        demph_reg_value = 0x2B405555;
                        uniqtranscale_reg_value = 0x552AB83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        demph_reg_value = 0x2B404040;
                        uniqtranscale_reg_value = 0x5548B83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_800:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
                        demph_reg_value = 0x2B245555;
                        uniqtranscale_reg_value = 0x5560B83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                        demph_reg_value = 0x2B405555;
                        uniqtranscale_reg_value = 0x5598DA3A;
                        break;
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_1:
                preemph_reg_value = 0x0002000;
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        demph_reg_value = 0x2B404040;
                        uniqtranscale_reg_value = 0x5552B83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        demph_reg_value = 0x2B404848;
                        uniqtranscale_reg_value = 0x5580B83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_800:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
                        demph_reg_value = 0x2B404040;
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_6:
+       case DP_TRAIN_PRE_EMPH_LEVEL_2:
                preemph_reg_value = 0x0000000;
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        demph_reg_value = 0x2B305555;
                        uniqtranscale_reg_value = 0x5570B83A;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        demph_reg_value = 0x2B2B4040;
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_9_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_3:
                preemph_reg_value = 0x0006000;
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        demph_reg_value = 0x1B405555;
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
@@@ -2572,21 -3010,21 +3014,21 @@@ static uint32_t intel_chv_signal_levels
        int i;
  
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
-       case DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_PRE_EMPH_LEVEL_0:
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        deemph_reg_value = 128;
                        margin_reg_value = 52;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        deemph_reg_value = 128;
                        margin_reg_value = 77;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_800:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
                        deemph_reg_value = 128;
                        margin_reg_value = 102;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                        deemph_reg_value = 128;
                        margin_reg_value = 154;
                        /* FIXME extra to set for 1200 */
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_1:
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        deemph_reg_value = 85;
                        margin_reg_value = 78;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        deemph_reg_value = 85;
                        margin_reg_value = 116;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_800:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
                        deemph_reg_value = 85;
                        margin_reg_value = 154;
                        break;
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_6:
+       case DP_TRAIN_PRE_EMPH_LEVEL_2:
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        deemph_reg_value = 64;
                        margin_reg_value = 104;
                        break;
-               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                        deemph_reg_value = 64;
                        margin_reg_value = 154;
                        break;
                        return 0;
                }
                break;
-       case DP_TRAIN_PRE_EMPHASIS_9_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_3:
                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        deemph_reg_value = 43;
                        margin_reg_value = 154;
                        break;
        /* Program swing margin */
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-               val &= ~DPIO_SWING_MARGIN_MASK;
-               val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+               val &= ~DPIO_SWING_MARGIN000_MASK;
+               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
        }
  
        }
  
        if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
-                       == DP_TRAIN_PRE_EMPHASIS_0) &&
+                       == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
                ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
-                       == DP_TRAIN_VOLTAGE_SWING_1200)) {
+                       == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
  
                /*
                 * The document said it needs to set bit 27 for ch0 and bit 26
@@@ -2757,32 -3195,32 +3199,32 @@@ intel_gen4_signal_levels(uint8_t train_
        uint32_t        signal_levels = 0;
  
        switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
        default:
                signal_levels |= DP_VOLTAGE_0_4;
                break;
-       case DP_TRAIN_VOLTAGE_SWING_600:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
                signal_levels |= DP_VOLTAGE_0_6;
                break;
-       case DP_TRAIN_VOLTAGE_SWING_800:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
                signal_levels |= DP_VOLTAGE_0_8;
                break;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                signal_levels |= DP_VOLTAGE_1_2;
                break;
        }
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
-       case DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_PRE_EMPH_LEVEL_0:
        default:
                signal_levels |= DP_PRE_EMPHASIS_0;
                break;
-       case DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_1:
                signal_levels |= DP_PRE_EMPHASIS_3_5;
                break;
-       case DP_TRAIN_PRE_EMPHASIS_6:
+       case DP_TRAIN_PRE_EMPH_LEVEL_2:
                signal_levels |= DP_PRE_EMPHASIS_6;
                break;
-       case DP_TRAIN_PRE_EMPHASIS_9_5:
+       case DP_TRAIN_PRE_EMPH_LEVEL_3:
                signal_levels |= DP_PRE_EMPHASIS_9_5;
                break;
        }
@@@ -2796,19 -3234,19 +3238,19 @@@ intel_gen6_edp_signal_levels(uint8_t tr
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
        switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
                return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-       case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
        default:
                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
@@@ -2824,21 -3262,21 +3266,21 @@@ intel_gen7_edp_signal_levels(uint8_t tr
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
        switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_400MV_0DB_IVB;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
                return EDP_LINK_TRAIN_400MV_6DB_IVB;
  
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_600MV_0DB_IVB;
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
  
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_800MV_0DB_IVB;
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
  
        default:
@@@ -2855,30 -3293,30 +3297,30 @@@ intel_hsw_signal_levels(uint8_t train_s
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
        switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_400MV_0DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_400MV_3_5DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_400MV_6DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
-               return DDI_BUF_EMP_400MV_9_5DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_600MV_0DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_600MV_3_5DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_600MV_6DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_800MV_0DB_HSW;
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_800MV_3_5DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               return DDI_BUF_TRANS_SELECT(0);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               return DDI_BUF_TRANS_SELECT(1);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               return DDI_BUF_TRANS_SELECT(2);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+               return DDI_BUF_TRANS_SELECT(3);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               return DDI_BUF_TRANS_SELECT(4);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               return DDI_BUF_TRANS_SELECT(5);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               return DDI_BUF_TRANS_SELECT(6);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               return DDI_BUF_TRANS_SELECT(7);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               return DDI_BUF_TRANS_SELECT(8);
        default:
                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
                              "0x%x\n", signal_levels);
-               return DDI_BUF_EMP_400MV_0DB_HSW;
+               return DDI_BUF_TRANS_SELECT(0);
        }
  }
  
@@@ -2925,74 -3363,10 +3367,10 @@@ intel_dp_set_link_train(struct intel_d
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum port port = intel_dig_port->port;
        uint8_t buf[sizeof(intel_dp->train_set) + 1];
        int ret, len;
  
-       if (HAS_DDI(dev)) {
-               uint32_t temp = I915_READ(DP_TP_CTL(port));
-               if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
-                       temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
-               else
-                       temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
-               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
-                       break;
-               }
-               I915_WRITE(DP_TP_CTL(port), temp);
-       } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
-               *DP &= ~DP_LINK_TRAIN_MASK_CPT;
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       *DP |= DP_LINK_TRAIN_OFF_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       *DP |= DP_LINK_TRAIN_PAT_1_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       DRM_ERROR("DP training pattern 3 not supported\n");
-                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
-                       break;
-               }
-       } else {
-               *DP &= ~DP_LINK_TRAIN_MASK;
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       *DP |= DP_LINK_TRAIN_OFF;
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       *DP |= DP_LINK_TRAIN_PAT_1;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       *DP |= DP_LINK_TRAIN_PAT_2;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       DRM_ERROR("DP training pattern 3 not supported\n");
-                       *DP |= DP_LINK_TRAIN_PAT_2;
-                       break;
-               }
-       }
+       _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
  
        I915_WRITE(intel_dp->output_reg, *DP);
        POSTING_READ(intel_dp->output_reg);
@@@ -3276,7 -3650,10 +3654,10 @@@ intel_dp_link_down(struct intel_dp *int
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
-               DP &= ~DP_LINK_TRAIN_MASK;
+               if (IS_CHERRYVIEW(dev))
+                       DP &= ~DP_LINK_TRAIN_MASK_CHV;
+               else
+                       DP &= ~DP_LINK_TRAIN_MASK;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
        }
        POSTING_READ(intel_dp->output_reg);
@@@ -3322,15 -3699,11 +3703,11 @@@ intel_dp_get_dpcd(struct intel_dp *inte
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
        if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
                                    sizeof(intel_dp->dpcd)) < 0)
                return false; /* aux transfer failed */
  
-       hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
-                          32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
-       DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+       DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
  
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
            intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
                intel_dp->use_tps3 = true;
-               DRM_DEBUG_KMS("Displayport TPS3 supported");
+               DRM_DEBUG_KMS("Displayport TPS3 supported\n");
        } else
                intel_dp->use_tps3 = false;
  
@@@ -3388,7 -3761,7 +3765,7 @@@ intel_dp_probe_oui(struct intel_dp *int
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
  
-       edp_panel_vdd_off(intel_dp, false);
+       intel_edp_panel_vdd_off(intel_dp, false);
  }
  
  static bool
@@@ -3402,7 -3775,7 +3779,7 @@@ intel_dp_probe_mst(struct intel_dp *int
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
                return false;
  
-       _edp_panel_vdd_on(intel_dp);
+       intel_edp_panel_vdd_on(intel_dp);
        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
                if (buf[0] & DP_MST_CAP) {
                        DRM_DEBUG_KMS("Sink is MST capable\n");
                        intel_dp->is_mst = false;
                }
        }
-       edp_panel_vdd_off(intel_dp, false);
+       intel_edp_panel_vdd_off(intel_dp, false);
  
        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
        return intel_dp->is_mst;
@@@ -3427,21 -3800,21 +3804,21 @@@ int intel_dp_sink_crc(struct intel_dp *
        u8 buf[1];
  
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
-               return -EAGAIN;
+               return -EIO;
  
        if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
                return -ENOTTY;
  
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
                               DP_TEST_SINK_START) < 0)
-               return -EAGAIN;
+               return -EIO;
  
        /* Wait 2 vblanks to be sure we will have the correct CRC value */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
        intel_wait_for_vblank(dev, intel_crtc->pipe);
  
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
-               return -EAGAIN;
+               return -EIO;
  
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
        return 0;
@@@ -3643,21 -4016,25 +4020,25 @@@ intel_dp_detect_dpcd(struct intel_dp *i
        return connector_status_disconnected;
  }
  
+ static enum drm_connector_status
+ edp_detect(struct intel_dp *intel_dp)
+ {
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       enum drm_connector_status status;
+       status = intel_panel_detect(dev);
+       if (status == connector_status_unknown)
+               status = connector_status_connected;
+       return status;
+ }
  static enum drm_connector_status
  ironlake_dp_detect(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum drm_connector_status status;
-       /* Can't disconnect eDP, but you can close the lid... */
-       if (is_edp(intel_dp)) {
-               status = intel_panel_detect(dev);
-               if (status == connector_status_unknown)
-                       status = connector_status_connected;
-               return status;
-       }
  
        if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
                return connector_status_disconnected;
@@@ -3733,9 -4110,9 +4114,9 @@@ g4x_dp_detect(struct intel_dp *intel_dp
  }
  
  static struct edid *
- intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+ intel_dp_get_edid(struct intel_dp *intel_dp)
  {
-       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
  
        /* use cached edid if we have one */
        if (intel_connector->edid) {
                        return NULL;
  
                return drm_edid_duplicate(intel_connector->edid);
-       }
+       } else
+               return drm_get_edid(&intel_connector->base,
+                                   &intel_dp->aux.ddc);
+ }
+ static void
+ intel_dp_set_edid(struct intel_dp *intel_dp)
+ {
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
+       struct edid *edid;
  
-       return drm_get_edid(connector, adapter);
+       edid = intel_dp_get_edid(intel_dp);
+       intel_connector->detect_edid = edid;
+       if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
+               intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
+       else
+               intel_dp->has_audio = drm_detect_monitor_audio(edid);
  }
  
- static int
- intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+ static void
+ intel_dp_unset_edid(struct intel_dp *intel_dp)
  {
-       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
  
-       /* use cached edid if we have one */
-       if (intel_connector->edid) {
-               /* invalid edid */
-               if (IS_ERR(intel_connector->edid))
-                       return 0;
+       kfree(intel_connector->detect_edid);
+       intel_connector->detect_edid = NULL;
  
-               return intel_connector_update_modes(connector,
-                                                   intel_connector->edid);
-       }
+       intel_dp->has_audio = false;
+ }
+ static enum intel_display_power_domain
+ intel_dp_power_get(struct intel_dp *dp)
+ {
+       struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
+       enum intel_display_power_domain power_domain;
+       power_domain = intel_display_port_power_domain(encoder);
+       intel_display_power_get(to_i915(encoder->base.dev), power_domain);
+       return power_domain;
+ }
  
-       return intel_ddc_get_modes(connector, adapter);
+ static void
+ intel_dp_power_put(struct intel_dp *dp,
+                  enum intel_display_power_domain power_domain)
+ {
+       struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
+       intel_display_power_put(to_i915(encoder->base.dev), power_domain);
  }
  
  static enum drm_connector_status
@@@ -3774,33 -4179,30 +4183,30 @@@ intel_dp_detect(struct drm_connector *c
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        enum drm_connector_status status;
        enum intel_display_power_domain power_domain;
-       struct edid *edid = NULL;
        bool ret;
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
+       intel_dp_unset_edid(intel_dp);
  
        if (intel_dp->is_mst) {
                /* MST devices are disconnected from a monitor POV */
                if (intel_encoder->type != INTEL_OUTPUT_EDP)
                        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-               status = connector_status_disconnected;
-               goto out;
+               return connector_status_disconnected;
        }
  
-       intel_dp->has_audio = false;
+       power_domain = intel_dp_power_get(intel_dp);
  
-       if (HAS_PCH_SPLIT(dev))
+       /* Can't disconnect eDP, but you can close the lid... */
+       if (is_edp(intel_dp))
+               status = edp_detect(intel_dp);
+       else if (HAS_PCH_SPLIT(dev))
                status = ironlake_dp_detect(intel_dp);
        else
                status = g4x_dp_detect(intel_dp);
        if (status != connector_status_connected)
                goto out;
  
                goto out;
        }
  
-       if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
-               intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
-       } else {
-               edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
-               if (edid) {
-                       intel_dp->has_audio = drm_detect_monitor_audio(edid);
-                       kfree(edid);
-               }
-       }
+       intel_dp_set_edid(intel_dp);
  
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
                intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        status = connector_status_connected;
  
  out:
-       intel_display_power_put(dev_priv, power_domain);
+       intel_dp_power_put(intel_dp, power_domain);
        return status;
  }
  
- static int intel_dp_get_modes(struct drm_connector *connector)
+ static void
+ intel_dp_force(struct drm_connector *connector)
  {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
        enum intel_display_power_domain power_domain;
-       int ret;
  
-       /* We should parse the EDID data and find out if it has an audio sink
-        */
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, connector->name);
+       intel_dp_unset_edid(intel_dp);
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
+       if (connector->status != connector_status_connected)
+               return;
  
-       ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
-       intel_display_power_put(dev_priv, power_domain);
-       if (ret)
-               return ret;
+       power_domain = intel_dp_power_get(intel_dp);
+       intel_dp_set_edid(intel_dp);
+       intel_dp_power_put(intel_dp, power_domain);
+       if (intel_encoder->type != INTEL_OUTPUT_EDP)
+               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ }
+ static int intel_dp_get_modes(struct drm_connector *connector)
+ {
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct edid *edid;
+       edid = intel_connector->detect_edid;
+       if (edid) {
+               int ret = intel_connector_update_modes(connector, edid);
+               if (ret)
+                       return ret;
+       }
  
        /* if eDP has no EDID, fall back to fixed mode */
-       if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+       if (is_edp(intel_attached_dp(connector)) &&
+           intel_connector->panel.fixed_mode) {
                struct drm_display_mode *mode;
-               mode = drm_mode_duplicate(dev,
+               mode = drm_mode_duplicate(connector->dev,
                                          intel_connector->panel.fixed_mode);
                if (mode) {
                        drm_mode_probed_add(connector, mode);
                        return 1;
                }
        }
        return 0;
  }
  
  static bool
  intel_dp_detect_audio(struct drm_connector *connector)
  {
-       struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum intel_display_power_domain power_domain;
-       struct edid *edid;
        bool has_audio = false;
+       struct edid *edid;
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
-       edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
-       if (edid) {
+       edid = to_intel_connector(connector)->detect_edid;
+       if (edid)
                has_audio = drm_detect_monitor_audio(edid);
-               kfree(edid);
-       }
-       intel_display_power_put(dev_priv, power_domain);
  
        return has_audio;
  }
@@@ -3989,6 -4387,8 +4391,8 @@@ intel_dp_connector_destroy(struct drm_c
  {
        struct intel_connector *intel_connector = to_intel_connector(connector);
  
+       kfree(intel_connector->detect_edid);
        if (!IS_ERR_OR_NULL(intel_connector->edid))
                kfree(intel_connector->edid);
  
@@@ -4005,16 -4405,20 +4409,20 @@@ void intel_dp_encoder_destroy(struct dr
  {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
  
        drm_dp_aux_unregister(&intel_dp->aux);
        intel_dp_mst_encoder_cleanup(intel_dig_port);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-               drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+               /*
+                * vdd might still be enabled do to the delayed vdd off.
+                * Make sure vdd is actually turned off here.
+                */
+               pps_lock(intel_dp);
                edp_panel_vdd_off_sync(intel_dp);
-               drm_modeset_unlock(&dev->mode_config.connection_mutex);
+               pps_unlock(intel_dp);
                if (intel_dp->edp_notifier.notifier_call) {
                        unregister_reboot_notifier(&intel_dp->edp_notifier);
                        intel_dp->edp_notifier.notifier_call = NULL;
@@@ -4030,7 -4434,13 +4438,13 @@@ static void intel_dp_encoder_suspend(st
        if (!is_edp(intel_dp))
                return;
  
+       /*
+        * vdd might still be enabled do to the delayed vdd off.
+        * Make sure vdd is actually turned off here.
+        */
+       pps_lock(intel_dp);
        edp_panel_vdd_off_sync(intel_dp);
+       pps_unlock(intel_dp);
  }
  
  static void intel_dp_encoder_reset(struct drm_encoder *encoder)
  static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_dp_detect,
+       .force = intel_dp_force,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_set_property,
        .destroy = intel_dp_connector_destroy,
@@@ -4076,7 -4487,8 +4491,8 @@@ intel_dp_hpd_pulse(struct intel_digital
        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
                intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
  
-       DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+       DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
+                     port_name(intel_dig_port->port),
                      long_hpd ? "long" : "short");
  
        power_domain = intel_display_port_power_domain(intel_encoder);
@@@ -4216,6 -4628,8 +4632,8 @@@ intel_dp_init_panel_power_sequencer(str
        u32 pp_on, pp_off, pp_div, pp;
        int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
  
+       lockdep_assert_held(&dev_priv->pps_mutex);
        if (HAS_PCH_SPLIT(dev)) {
                pp_ctrl_reg = PCH_PP_CONTROL;
                pp_on_reg = PCH_PP_ON_DELAYS;
@@@ -4315,6 -4729,9 +4733,9 @@@ intel_dp_init_panel_power_sequencer_reg
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
        int pp_on_reg, pp_off_reg, pp_div_reg;
+       enum port port = dp_to_dig_port(intel_dp)->port;
+       lockdep_assert_held(&dev_priv->pps_mutex);
  
        if (HAS_PCH_SPLIT(dev)) {
                pp_on_reg = PCH_PP_ON_DELAYS;
        /* Haswell doesn't have any port selection bits for the panel
         * power sequencer any more. */
        if (IS_VALLEYVIEW(dev)) {
-               if (dp_to_dig_port(intel_dp)->port == PORT_B)
-                       port_sel = PANEL_PORT_SELECT_DPB_VLV;
-               else
-                       port_sel = PANEL_PORT_SELECT_DPC_VLV;
+               port_sel = PANEL_PORT_SELECT_VLV(port);
        } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-               if (dp_to_dig_port(intel_dp)->port == PORT_A)
+               if (port == PORT_A)
                        port_sel = PANEL_PORT_SELECT_DPA;
                else
                        port_sel = PANEL_PORT_SELECT_DPD;
@@@ -4438,7 -4852,7 +4856,7 @@@ void intel_dp_set_drrs_state(struct drm
                val = I915_READ(reg);
                if (index > DRRS_HIGH_RR) {
                        val |= PIPECONF_EDP_RR_MODE_SWITCH;
-                       intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
+                       intel_dp_set_m_n(intel_crtc);
                } else {
                        val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
                }
@@@ -4478,7 -4892,7 +4896,7 @@@ intel_dp_drrs_init(struct intel_digital
        }
  
        if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
-               DRM_INFO("VBT doesn't support DRRS\n");
+               DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
                return NULL;
        }
  
                                        (dev, fixed_mode, connector);
  
        if (!downclock_mode) {
-               DRM_INFO("DRRS not supported\n");
+               DRM_DEBUG_KMS("DRRS not supported\n");
                return NULL;
        }
  
        intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
  
        intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
-       DRM_INFO("seamless DRRS supported for eDP panel.\n");
+       DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
        return downclock_mode;
  }
  
@@@ -4512,8 -4926,11 +4930,11 @@@ void intel_edp_panel_vdd_sanitize(struc
                return;
  
        intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       pps_lock(intel_dp);
        if (!edp_have_panel_vdd(intel_dp))
-               return;
+               goto out;
        /*
         * The VDD bit needs a power domain reference, so if the bit is
         * already enabled when we boot or resume, grab this reference and
        intel_display_power_get(dev_priv, power_domain);
  
        edp_panel_vdd_schedule_off(intel_dp);
+  out:
+       pps_unlock(intel_dp);
  }
  
  static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        /* Cache DPCD and EDID for edp. */
        intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
-       edp_panel_vdd_off(intel_dp, false);
+       intel_edp_panel_vdd_off(intel_dp, false);
  
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
        }
  
        /* We now know it's not a ghost, init power sequence regs. */
+       pps_lock(intel_dp);
        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+       pps_unlock(intel_dp);
  
        mutex_lock(&dev->mode_config.mutex);
        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
        }
  
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+       intel_connector->panel.backlight_power = intel_edp_backlight_power;
        intel_panel_setup_backlight(connector);
  
        return true;
@@@ -4628,6 -5050,8 +5054,8 @@@ intel_dp_init_connector(struct intel_di
        struct edp_power_seq power_seq = { 0 };
        int type;
  
+       intel_dp->pps_pipe = INVALID_PIPE;
        /* intel_dp vfuncs */
        if (IS_VALLEYVIEW(dev))
                intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
        }
  
        if (is_edp(intel_dp)) {
-               intel_dp_init_panel_power_timestamps(intel_dp);
-               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+               pps_lock(intel_dp);
+               if (IS_VALLEYVIEW(dev)) {
+                       vlv_initial_power_sequencer_setup(intel_dp);
+               } else {
+                       intel_dp_init_panel_power_timestamps(intel_dp);
+                       intel_dp_init_panel_power_sequencer(dev, intel_dp,
+                                                           &power_seq);
+               }
+               pps_unlock(intel_dp);
        }
  
        intel_dp_aux_init(intel_dp, intel_connector);
        /* init MST on ports that can support it */
        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                if (port == PORT_B || port == PORT_C || port == PORT_D) {
-                       intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+                       intel_dp_mst_encoder_init(intel_dig_port,
+                                                 intel_connector->base.base.id);
                }
        }
  
                drm_dp_aux_unregister(&intel_dp->aux);
                if (is_edp(intel_dp)) {
                        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-                       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+                       /*
+                        * vdd might still be enabled do to the delayed vdd off.
+                        * Make sure vdd is actually turned off here.
+                        */
+                       pps_lock(intel_dp);
                        edp_panel_vdd_off_sync(intel_dp);
-                       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+                       pps_unlock(intel_dp);
                }
                drm_connector_unregister(connector);
                drm_connector_cleanup(connector);
@@@ -4781,7 -5217,8 +5221,8 @@@ intel_dp_init(struct drm_device *dev, i
        } else {
                intel_encoder->pre_enable = g4x_pre_enable_dp;
                intel_encoder->enable = g4x_enable_dp;
-               intel_encoder->post_disable = g4x_post_disable_dp;
+               if (INTEL_INFO(dev)->gen >= 5)
+                       intel_encoder->post_disable = ilk_post_disable_dp;
        }
  
        intel_dig_port->port = port;
index 5a9de21637b7819818e1993f911432e0fdb634de,7fed5bedc10f616b3e9cf85730d30a4d8714bac0..29ec1535992d413fa5faf70c8192c02d6334b02c
@@@ -712,8 -712,7 +712,8 @@@ static void intel_hdmi_get_config(struc
                                  struct intel_crtc_config *pipe_config)
  {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 -      struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 +      struct drm_device *dev = encoder->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp, flags = 0;
        int dotclock;
  
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
  
 -      if (tmp & HDMI_MODE_SELECT_HDMI)
 +      if (tmp & SDVO_AUDIO_ENABLE)
                pipe_config->has_audio = true;
  
 +      if (!HAS_PCH_SPLIT(dev) &&
 +          tmp & HDMI_COLOR_RANGE_16_235)
 +              pipe_config->limited_color_range = true;
 +
        pipe_config->adjusted_mode.flags |= flags;
  
        if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
@@@ -869,10 -864,15 +869,15 @@@ static enum drm_mode_statu
  intel_hdmi_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
  {
-       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
-                                              true))
+       int clock = mode->clock;
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               clock *= 2;
+       if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+                                        true))
                return MODE_CLOCK_HIGH;
-       if (mode->clock < 20000)
+       if (clock < 20000)
                return MODE_CLOCK_LOW;
  
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@@ -890,7 -890,7 +895,7 @@@ static bool hdmi_12bpc_possible(struct 
        if (HAS_GMCH_DISPLAY(dev))
                return false;
  
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+       for_each_intel_encoder(dev, encoder) {
                if (encoder->new_crtc != crtc)
                        continue;
  
@@@ -926,6 -926,10 +931,10 @@@ bool intel_hdmi_compute_config(struct i
                        intel_hdmi->color_range = 0;
        }
  
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               pipe_config->pixel_multiplier = 2;
+       }
        if (intel_hdmi->color_range)
                pipe_config->limited_color_range = true;
  
        return true;
  }
  
- static enum drm_connector_status
- intel_hdmi_detect(struct drm_connector *connector, bool force)
+ static void
+ intel_hdmi_unset_edid(struct drm_connector *connector)
  {
-       struct drm_device *dev = connector->dev;
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-       struct intel_digital_port *intel_dig_port =
-               hdmi_to_dig_port(intel_hdmi);
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edid *edid;
-       enum intel_display_power_domain power_domain;
-       enum drm_connector_status status = connector_status_disconnected;
  
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       intel_hdmi->has_hdmi_sink = false;
+       intel_hdmi->has_audio = false;
+       intel_hdmi->rgb_quant_range_selectable = false;
+       kfree(to_intel_connector(connector)->detect_edid);
+       to_intel_connector(connector)->detect_edid = NULL;
+ }
+ static bool
+ intel_hdmi_set_edid(struct drm_connector *connector)
+ {
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_encoder *intel_encoder =
+               &hdmi_to_dig_port(intel_hdmi)->base;
+       enum intel_display_power_domain power_domain;
+       struct edid *edid;
+       bool connected = false;
  
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
  
-       intel_hdmi->has_hdmi_sink = false;
-       intel_hdmi->has_audio = false;
-       intel_hdmi->rgb_quant_range_selectable = false;
        edid = drm_get_edid(connector,
                            intel_gmbus_get_adapter(dev_priv,
                                                    intel_hdmi->ddc_bus));
  
-       if (edid) {
-               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
-                       status = connector_status_connected;
-                       if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
-                               intel_hdmi->has_hdmi_sink =
-                                               drm_detect_hdmi_monitor(edid);
-                       intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
-                       intel_hdmi->rgb_quant_range_selectable =
-                               drm_rgb_quant_range_selectable(edid);
-               }
-               kfree(edid);
-       }
+       intel_display_power_put(dev_priv, power_domain);
  
-       if (status == connector_status_connected) {
+       to_intel_connector(connector)->detect_edid = edid;
+       if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+               intel_hdmi->rgb_quant_range_selectable =
+                       drm_rgb_quant_range_selectable(edid);
+               intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
                if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
                        intel_hdmi->has_audio =
-                               (intel_hdmi->force_audio == HDMI_AUDIO_ON);
-               intel_encoder->type = INTEL_OUTPUT_HDMI;
+                               intel_hdmi->force_audio == HDMI_AUDIO_ON;
+               if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+                       intel_hdmi->has_hdmi_sink =
+                               drm_detect_hdmi_monitor(edid);
+               connected = true;
        }
  
-       intel_display_power_put(dev_priv, power_domain);
+       return connected;
+ }
+ static enum drm_connector_status
+ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ {
+       enum drm_connector_status status;
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, connector->name);
+       intel_hdmi_unset_edid(connector);
+       if (intel_hdmi_set_edid(connector)) {
+               struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+               hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+               status = connector_status_connected;
+       } else
+               status = connector_status_disconnected;
  
        return status;
  }
  
- static int intel_hdmi_get_modes(struct drm_connector *connector)
+ static void
+ intel_hdmi_force(struct drm_connector *connector)
  {
-       struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       enum intel_display_power_domain power_domain;
-       int ret;
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
  
-       /* We should parse the EDID data and find out if it's an HDMI sink so
-        * we can send audio to it.
-        */
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, connector->name);
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
+       intel_hdmi_unset_edid(connector);
  
-       ret = intel_ddc_get_modes(connector,
-                                  intel_gmbus_get_adapter(dev_priv,
-                                                          intel_hdmi->ddc_bus));
+       if (connector->status != connector_status_connected)
+               return;
  
-       intel_display_power_put(dev_priv, power_domain);
+       intel_hdmi_set_edid(connector);
+       hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+ }
+ static int intel_hdmi_get_modes(struct drm_connector *connector)
+ {
+       struct edid *edid;
+       edid = to_intel_connector(connector)->detect_edid;
+       if (edid == NULL)
+               return 0;
  
-       return ret;
+       return intel_connector_update_modes(connector, edid);
  }
  
  static bool
  intel_hdmi_detect_audio(struct drm_connector *connector)
  {
-       struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       enum intel_display_power_domain power_domain;
-       struct edid *edid;
        bool has_audio = false;
+       struct edid *edid;
  
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
-       edid = drm_get_edid(connector,
-                           intel_gmbus_get_adapter(dev_priv,
-                                                   intel_hdmi->ddc_bus));
-       if (edid) {
-               if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                       has_audio = drm_detect_monitor_audio(edid);
-               kfree(edid);
-       }
-       intel_display_power_put(dev_priv, power_domain);
+       edid = to_intel_connector(connector)->detect_edid;
+       if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
+               has_audio = drm_detect_monitor_audio(edid);
  
        return has_audio;
  }
@@@ -1265,6 -1282,8 +1287,8 @@@ static void chv_hdmi_pre_pll_enable(str
        enum pipe pipe = intel_crtc->pipe;
        u32 val;
  
+       intel_hdmi_prepare(encoder);
        mutex_lock(&dev_priv->dpio_lock);
  
        /* program left/right clock distribution */
@@@ -1434,8 -1453,8 +1458,8 @@@ static void chv_hdmi_pre_enable(struct 
  
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-               val &= ~DPIO_SWING_MARGIN_MASK;
-               val |= 102 << DPIO_SWING_MARGIN_SHIFT;
+               val &= ~DPIO_SWING_MARGIN000_MASK;
+               val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
        }
  
  
  static void intel_hdmi_destroy(struct drm_connector *connector)
  {
+       kfree(to_intel_connector(connector)->detect_edid);
        drm_connector_cleanup(connector);
        kfree(connector);
  }
  static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_hdmi_detect,
+       .force = intel_hdmi_force,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_hdmi_set_property,
        .destroy = intel_hdmi_destroy,
index 47a126a0493f811cad30474532477cfe5994734e,6dc981f0671eebb2275018daeb13b4a81ef57436..0a80e419b5894f1c36a1b6eb0f6078f76fe7c0f4
  #include "i915_trace.h"
  #include "intel_drv.h"
  
- /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
-  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
-  * to give some inclination as to some of the magic values used in the various
-  * workarounds!
-  */
- #define CACHELINE_BYTES 64
+ bool
+ intel_ring_initialized(struct intel_engine_cs *ring)
+ {
+       struct drm_device *dev = ring->dev;
+       if (!dev)
+               return false;
+       if (i915.enable_execlists) {
+               struct intel_context *dctx = ring->default_context;
+               struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+               return ringbuf->obj;
+       } else
+               return ring->buffer && ring->buffer->obj;
+ }
  
static inline int __ring_space(int head, int tail, int size)
int __intel_ring_space(int head, int tail, int size)
  {
        int space = head - (tail + I915_RING_FREE_SPACE);
        if (space < 0)
        return space;
  }
  
static inline int ring_space(struct intel_ringbuffer *ringbuf)
int intel_ring_space(struct intel_ringbuffer *ringbuf)
  {
-       return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+       return __intel_ring_space(ringbuf->head & HEAD_ADDR,
+                                 ringbuf->tail, ringbuf->size);
  }
  
static bool intel_ring_stopped(struct intel_engine_cs *ring)
+ bool intel_ring_stopped(struct intel_engine_cs *ring)
  {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
@@@ -433,7 -444,14 +444,14 @@@ gen8_render_ring_flush(struct intel_eng
                        return ret;
        }
  
-       return gen8_emit_pipe_control(ring, flags, scratch_addr);
+       ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
+       if (ret)
+               return ret;
+       if (!invalidate_domains && flush_domains)
+               return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
+       return 0;
  }
  
  static void ring_write_tail(struct intel_engine_cs *ring,
@@@ -476,9 -494,14 +494,14 @@@ static bool stop_ring(struct intel_engi
  
        if (!IS_GEN2(ring->dev)) {
                I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-                       DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
-                       return false;
+               if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+                       /* Sometimes we observe that the idle flag is not
+                        * set even though the ring is empty. So double
+                        * check before giving up.
+                        */
+                       if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+                               return false;
                }
        }
  
@@@ -540,6 -563,14 +563,14 @@@ static int init_ring_common(struct inte
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
        I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+       /* WaClearRingBufHeadRegAtInit:ctg,elk */
+       if (I915_READ_HEAD(ring))
+               DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
+                         ring->name, I915_READ_HEAD(ring));
+       I915_WRITE_HEAD(ring, 0);
+       (void)I915_READ_HEAD(ring);
        I915_WRITE_CTL(ring,
                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
        else {
                ringbuf->head = I915_READ_HEAD(ring);
                ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ringbuf->space = ring_space(ringbuf);
+               ringbuf->space = intel_ring_space(ringbuf);
                ringbuf->last_retired_head = -1;
        }
  
@@@ -575,8 -606,25 +606,25 @@@ out
        return ret;
  }
  
- static int
- init_pipe_control(struct intel_engine_cs *ring)
+ void
+ intel_fini_pipe_control(struct intel_engine_cs *ring)
+ {
+       struct drm_device *dev = ring->dev;
+       if (ring->scratch.obj == NULL)
+               return;
+       if (INTEL_INFO(dev)->gen >= 5) {
+               kunmap(sg_page(ring->scratch.obj->pages->sgl));
+               i915_gem_object_ggtt_unpin(ring->scratch.obj);
+       }
+       drm_gem_object_unreference(&ring->scratch.obj->base);
+       ring->scratch.obj = NULL;
+ }
+ int
+ intel_init_pipe_control(struct intel_engine_cs *ring)
  {
        int ret;
  
@@@ -617,6 -665,135 +665,135 @@@ err
        return ret;
  }
  
+ static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
+                                      u32 addr, u32 value)
+ {
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
+               return;
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+       intel_ring_emit(ring, addr);
+       intel_ring_emit(ring, value);
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
+       /* value is updated with the status of remaining bits of this
+        * register when it is read from debugfs file
+        */
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
+       dev_priv->num_wa_regs++;
+       return;
+ }
+ static int bdw_init_workarounds(struct intel_engine_cs *ring)
+ {
+       int ret;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       /*
+        * workarounds applied in this fn are part of register state context,
+        * they need to be re-initialized followed by gpu reset, suspend/resume,
+        * module reload.
+        */
+       dev_priv->num_wa_regs = 0;
+       memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+       /*
+        * update the number of dwords required based on the
+        * actual number of workarounds applied
+        */
+       ret = intel_ring_begin(ring, 18);
+       if (ret)
+               return ret;
+       /* WaDisablePartialInstShootdown:bdw */
+       /* WaDisableThreadStallDopClockGating:bdw */
+       /* FIXME: Unclear whether we really need this on production bdw. */
+       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
+                          _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
+                                            | STALL_DOP_GATING_DISABLE));
+       /* WaDisableDopClockGating:bdw May not be needed for production */
+       intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
+                          _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+       /* Use Force Non-Coherent whenever executing a 3D context. This is a
+        * workaround for for a possible hang in the unlikely event a TLB
+        * invalidation occurs during a PSD flush.
+        */
+       intel_ring_emit_wa(ring, HDC_CHICKEN0,
+                          _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+       /* Wa4x4STCOptimizationDisable:bdw */
+       intel_ring_emit_wa(ring, CACHE_MODE_1,
+                          _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       intel_ring_emit_wa(ring, GEN7_GT_MODE,
+                          GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+       intel_ring_advance(ring);
+       DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
+                        dev_priv->num_wa_regs);
+       return 0;
+ }
+ static int chv_init_workarounds(struct intel_engine_cs *ring)
+ {
+       int ret;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       /*
+        * workarounds applied in this fn are part of register state context,
+        * they need to be re-initialized followed by gpu reset, suspend/resume,
+        * module reload.
+        */
+       dev_priv->num_wa_regs = 0;
+       memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+       ret = intel_ring_begin(ring, 12);
+       if (ret)
+               return ret;
+       /* WaDisablePartialInstShootdown:chv */
+       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
+                          _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+       /* WaDisableThreadStallDopClockGating:chv */
+       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
+                          _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+       /* WaDisableDopClockGating:chv (pre-production hw) */
+       intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
+       intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
+                          _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+       intel_ring_advance(ring);
+       return 0;
+ }
  static int init_render_ring(struct intel_engine_cs *ring)
  {
        struct drm_device *dev = ring->dev;
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  
        if (INTEL_INFO(dev)->gen >= 5) {
-               ret = init_pipe_control(ring);
+               ret = intel_init_pipe_control(ring);
                if (ret)
                        return ret;
        }
@@@ -686,16 -863,7 +863,7 @@@ static void render_ring_cleanup(struct 
                dev_priv->semaphore_obj = NULL;
        }
  
-       if (ring->scratch.obj == NULL)
-               return;
-       if (INTEL_INFO(dev)->gen >= 5) {
-               kunmap(sg_page(ring->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(ring->scratch.obj);
-       }
-       drm_gem_object_unreference(&ring->scratch.obj->base);
-       ring->scratch.obj = NULL;
+       intel_fini_pipe_control(ring);
  }
  
  static int gen8_rcs_signal(struct intel_engine_cs *signaller,
@@@ -1400,7 -1568,7 +1568,7 @@@ i830_dispatch_execbuffer(struct intel_e
                 */
                intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
                intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
 -              intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
 +              intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
                intel_ring_emit(ring, cs_offset);
                intel_ring_emit(ring, 4096);
                intel_ring_emit(ring, offset);
@@@ -1526,7 -1694,7 +1694,7 @@@ static int init_phys_status_page(struc
        return 0;
  }
  
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
  {
        if (!ringbuf->obj)
                return;
        ringbuf->obj = NULL;
  }
  
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-                                     struct intel_ringbuffer *ringbuf)
+ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+                              struct intel_ringbuffer *ringbuf)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
@@@ -1600,7 -1768,9 +1768,9 @@@ static int intel_init_ring_buffer(struc
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->execlist_queue);
        ringbuf->size = 32 * PAGE_SIZE;
+       ringbuf->ring = ring;
        memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
  
        init_waitqueue_head(&ring->irq_queue);
@@@ -1683,13 -1853,14 +1853,14 @@@ static int intel_ring_wait_request(stru
                ringbuf->head = ringbuf->last_retired_head;
                ringbuf->last_retired_head = -1;
  
-               ringbuf->space = ring_space(ringbuf);
+               ringbuf->space = intel_ring_space(ringbuf);
                if (ringbuf->space >= n)
                        return 0;
        }
  
        list_for_each_entry(request, &ring->request_list, list) {
-               if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
+               if (__intel_ring_space(request->tail, ringbuf->tail,
+                                      ringbuf->size) >= n) {
                        seqno = request->seqno;
                        break;
                }
        ringbuf->head = ringbuf->last_retired_head;
        ringbuf->last_retired_head = -1;
  
-       ringbuf->space = ring_space(ringbuf);
+       ringbuf->space = intel_ring_space(ringbuf);
        return 0;
  }
  
@@@ -1735,7 -1906,7 +1906,7 @@@ static int ring_wait_for_space(struct i
        trace_i915_ring_wait_begin(ring);
        do {
                ringbuf->head = I915_READ_HEAD(ring);
-               ringbuf->space = ring_space(ringbuf);
+               ringbuf->space = intel_ring_space(ringbuf);
                if (ringbuf->space >= n) {
                        ret = 0;
                        break;
@@@ -1787,7 -1958,7 +1958,7 @@@ static int intel_wrap_ring_buffer(struc
                iowrite32(MI_NOOP, virt++);
  
        ringbuf->tail = 0;
-       ringbuf->space = ring_space(ringbuf);
+       ringbuf->space = intel_ring_space(ringbuf);
  
        return 0;
  }
@@@ -1992,9 -2163,7 +2163,7 @@@ gen8_ring_dispatch_execbuffer(struct in
                              u64 offset, u32 len,
                              unsigned flags)
  {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
-               !(flags & I915_DISPATCH_SECURE);
+       bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
        int ret;
  
        ret = intel_ring_begin(ring, 4);
@@@ -2023,8 -2192,9 +2192,9 @@@ hsw_ring_dispatch_execbuffer(struct int
                return ret;
  
        intel_ring_emit(ring,
-                       MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
-                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+                       MI_BATCH_BUFFER_START |
+                       (flags & I915_DISPATCH_SECURE ?
+                        0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@@ -2123,6 -2293,10 +2293,10 @@@ int intel_init_render_ring_buffer(struc
                                        dev_priv->semaphore_obj = obj;
                        }
                }
+               if (IS_CHERRYVIEW(dev))
+                       ring->init_context = chv_init_workarounds;
+               else
+                       ring->init_context = bdw_init_workarounds;
                ring->add_request = gen6_add_request;
                ring->flush = gen8_render_ring_flush;
                ring->irq_get = gen8_ring_get_irq;
index f8cbb512132fdcd224f6dda223689de4a4bb4350,a7efbff4dc8fb24dbdc4448821809c400c7b8cb1..2df3a937037de7c55d94f345bcdd8f2eeb88576e
@@@ -29,6 -29,7 +29,7 @@@
  #include <core/enum.h>
  #include <nvif/unpack.h>
  #include <nvif/class.h>
+ #include <nvif/event.h>
  
  #include <subdev/bios.h>
  #include <subdev/bios/dcb.h>
@@@ -82,6 -83,71 +83,71 @@@ nv50_disp_chan_destroy(struct nv50_disp
        nouveau_namedb_destroy(&chan->base);
  }
  
+ static void
+ nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+ {
+       struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+       nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
+ }
+ static void
+ nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+ {
+       struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+       nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
+ }
+ void
+ nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
+ {
+       struct nvif_notify_uevent_rep {
+       } rep;
+       nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
+ }
+ int
+ nv50_disp_chan_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
+                          struct nvkm_notify *notify)
+ {
+       struct nv50_disp_dmac *dmac = (void *)object;
+       union {
+               struct nvif_notify_uevent_req none;
+       } *args = data;
+       int ret;
+       if (nvif_unvers(args->none)) {
+               notify->size  = sizeof(struct nvif_notify_uevent_rep);
+               notify->types = 1;
+               notify->index = dmac->base.chid;
+               return 0;
+       }
+       return ret;
+ }
+ const struct nvkm_event_func
+ nv50_disp_chan_uevent = {
+       .ctor = nv50_disp_chan_uevent_ctor,
+       .init = nv50_disp_chan_uevent_init,
+       .fini = nv50_disp_chan_uevent_fini,
+ };
+ int
+ nv50_disp_chan_ntfy(struct nouveau_object *object, u32 type,
+                   struct nvkm_event **pevent)
+ {
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       switch (type) {
+       case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
+               *pevent = &priv->uevent;
+               return 0;
+       default:
+               break;
+       }
+       return -EINVAL;
+ }
  int
  nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
  {
@@@ -195,7 -261,7 +261,7 @@@ nv50_disp_dmac_init(struct nouveau_obje
                return ret;
  
        /* enable error reporting */
-       nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+       nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
  
        /* initialise channel for dma command submission */
        nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
@@@ -232,7 -298,7 +298,7 @@@ nv50_disp_dmac_fini(struct nouveau_obje
                        return -EBUSY;
        }
  
-       /* disable error reporting */
+       /* disable error reporting and completion notifications */
        nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
  
        return nv50_disp_chan_fini(&dmac->base, suspend);
@@@ -454,7 -520,7 +520,7 @@@ nv50_disp_mast_init(struct nouveau_obje
                return ret;
  
        /* enable error reporting */
-       nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+       nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
  
        /* attempt to unstick channel from some unknown state */
        if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
@@@ -494,7 -560,7 +560,7 @@@ nv50_disp_mast_fini(struct nouveau_obje
                        return -EBUSY;
        }
  
-       /* disable error reporting */
+       /* disable error reporting and completion notifications */
        nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
  
        return nv50_disp_chan_fini(&mast->base, suspend);
@@@ -507,6 -573,7 +573,7 @@@ nv50_disp_mast_ofuncs = 
        .base.init = nv50_disp_mast_init,
        .base.fini = nv50_disp_mast_fini,
        .base.map  = nv50_disp_chan_map,
+       .base.ntfy = nv50_disp_chan_ntfy,
        .base.rd32 = nv50_disp_chan_rd32,
        .base.wr32 = nv50_disp_chan_wr32,
        .chid = 0,
@@@ -607,6 -674,7 +674,7 @@@ nv50_disp_sync_ofuncs = 
        .base.dtor = nv50_disp_dmac_dtor,
        .base.init = nv50_disp_dmac_init,
        .base.fini = nv50_disp_dmac_fini,
+       .base.ntfy = nv50_disp_chan_ntfy,
        .base.map  = nv50_disp_chan_map,
        .base.rd32 = nv50_disp_chan_rd32,
        .base.wr32 = nv50_disp_chan_wr32,
@@@ -696,6 -764,7 +764,7 @@@ nv50_disp_ovly_ofuncs = 
        .base.dtor = nv50_disp_dmac_dtor,
        .base.init = nv50_disp_dmac_init,
        .base.fini = nv50_disp_dmac_fini,
+       .base.ntfy = nv50_disp_chan_ntfy,
        .base.map  = nv50_disp_chan_map,
        .base.rd32 = nv50_disp_chan_rd32,
        .base.wr32 = nv50_disp_chan_wr32,
@@@ -813,6 -882,7 +882,7 @@@ nv50_disp_oimm_ofuncs = 
        .base.dtor = nv50_disp_pioc_dtor,
        .base.init = nv50_disp_pioc_init,
        .base.fini = nv50_disp_pioc_fini,
+       .base.ntfy = nv50_disp_chan_ntfy,
        .base.map  = nv50_disp_chan_map,
        .base.rd32 = nv50_disp_chan_rd32,
        .base.wr32 = nv50_disp_chan_wr32,
@@@ -860,6 -930,7 +930,7 @@@ nv50_disp_curs_ofuncs = 
        .base.dtor = nv50_disp_pioc_dtor,
        .base.init = nv50_disp_pioc_init,
        .base.fini = nv50_disp_pioc_fini,
+       .base.ntfy = nv50_disp_chan_ntfy,
        .base.map  = nv50_disp_chan_map,
        .base.rd32 = nv50_disp_chan_rd32,
        .base.wr32 = nv50_disp_chan_wr32,
@@@ -1559,7 -1630,7 +1630,7 @@@ nv50_disp_intr_unk20_1(struct nv50_disp
  }
  
  static void
- nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
                          struct dcb_output *outp, u32 pclk)
  {
        const int link = !(outp->sorconf.link & 1);
        const u32 loff = (link * 0x080) + soff;
        const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
        const u32 symbol = 100000;
-       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+       const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
+       const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
+       const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
+       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
        u32 clksor = nv_rd32(priv, 0x614300 + soff);
        int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
        int TU, VTUi, VTUf, VTUa;
        u64 link_data_rate, link_ratio, unk;
        u32 best_diff = 64 * symbol;
        u32 link_nr, link_bw, bits;
-       /* calculate packed data rate for each lane */
-       if      (dpctrl > 0x00030000) link_nr = 4;
-       else if (dpctrl > 0x00010000) link_nr = 2;
-       else                          link_nr = 1;
-       if (clksor & 0x000c0000)
-               link_bw = 270000;
-       else
-               link_bw = 162000;
+       u64 value;
+       link_bw = (clksor & 0x000c0000) ? 270000 : 162000;
+       link_nr = hweight32(dpctrl & 0x000f0000);
+       /* symbols/hblank - algorithm taken from comments in tegra driver */
+       value = vblanke + vactive - vblanks - 7;
+       value = value * link_bw;
+       do_div(value, pclk);
+       value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
+       nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
+       /* symbols/vblank - algorithm taken from comments in tegra driver */
+       value = vblanks - vblanke - 25;
+       value = value * link_bw;
+       do_div(value, pclk);
+       value = value - ((36 / link_nr) + 3) - 1;
+       nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
+       /* watermark / activesym */
        if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
        else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
        else                                  bits = 18;
@@@ -1731,7 -1814,7 +1814,7 @@@ nv50_disp_intr_unk20_2(struct nv50_disp
        } else
        if (!outp->info.location) {
                if (outp->info.type == DCB_OUTPUT_DP)
-                       nv50_disp_intr_unk20_2_dp(priv, &outp->info, pclk);
+                       nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
                oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
                oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
                hval = 0x00000000;
@@@ -1763,10 -1846,9 +1846,10 @@@ nv50_disp_intr_unk40_0_tmds(struct nv50
        const int   or = ffs(outp->or) - 1;
        const u32 loff = (or * 0x800) + (link * 0x80);
        const u16 mask = (outp->sorconf.link << 6) | outp->or;
 +      struct dcb_output match;
        u8  ver, hdr;
  
 -      if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
 +      if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
                nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
  }
  
@@@ -1847,6 -1929,12 +1930,12 @@@ nv50_disp_intr(struct nouveau_subdev *s
                intr0 &= ~(0x00010000 << chid);
        }
  
+       while (intr0 & 0x0000001f) {
+               u32 chid = __ffs(intr0 & 0x0000001f);
+               nv50_disp_chan_uevent_send(priv, chid);
+               intr0 &= ~(0x00000001 << chid);
+       }
        if (intr1 & 0x00000004) {
                nouveau_disp_vblank(&priv->base, 0);
                nv_wr32(priv, 0x610024, 0x00000004);
@@@ -1881,6 -1969,10 +1970,10 @@@ nv50_disp_ctor(struct nouveau_object *p
        if (ret)
                return ret;
  
+       ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
+       if (ret)
+               return ret;
        nv_engine(priv)->sclass = nv50_disp_base_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
index d5d65285efe59188072f90fc020939d14c3f1dec,e7b7872481ef2c5e33c0f1064c66bb314ff7e86e..2db0977284f80dfb484f34969809491daf9cbbf7
@@@ -62,16 -62,38 +62,38 @@@ gf100_ltc_zbc_clear_depth(struct nvkm_l
        nv_wr32(priv, 0x17ea58, depth);
  }
  
+ static const struct nouveau_bitfield
+ gf100_ltc_lts_intr_name[] = {
+       { 0x00000001, "IDLE_ERROR_IQ" },
+       { 0x00000002, "IDLE_ERROR_CBC" },
+       { 0x00000004, "IDLE_ERROR_TSTG" },
+       { 0x00000008, "IDLE_ERROR_DSTG" },
+       { 0x00000010, "EVICTED_CB" },
+       { 0x00000020, "ILLEGAL_COMPSTAT" },
+       { 0x00000040, "BLOCKLINEAR_CB" },
+       { 0x00000100, "ECC_SEC_ERROR" },
+       { 0x00000200, "ECC_DED_ERROR" },
+       { 0x00000400, "DEBUG" },
+       { 0x00000800, "ATOMIC_TO_Z" },
+       { 0x00001000, "ILLEGAL_ATOMIC" },
+       { 0x00002000, "BLKACTIVITY_ERR" },
+       {}
+ };
  static void
- gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+ gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
  {
        u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
-       u32 stat = nv_rd32(priv, base + 0x020);
+       u32 intr = nv_rd32(priv, base + 0x020);
+       u32 stat = intr & 0x0000ffff;
  
        if (stat) {
-               nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
-               nv_wr32(priv, base + 0x020, stat);
+               nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
+               nouveau_bitfield_print(gf100_ltc_lts_intr_name, stat);
+               pr_cont("\n");
        }
+       nv_wr32(priv, base + 0x020, intr);
  }
  
  void
@@@ -84,21 -106,15 +106,16 @@@ gf100_ltc_intr(struct nouveau_subdev *s
        while (mask) {
                u32 lts, ltc = __ffs(mask);
                for (lts = 0; lts < priv->lts_nr; lts++)
-                       gf100_ltc_lts_isr(priv, ltc, lts);
+                       gf100_ltc_lts_intr(priv, ltc, lts);
                mask &= ~(1 << ltc);
        }
-       /* we do something horribly wrong and upset PMFB a lot, so mask off
-        * interrupts from it after the first one until it's fixed
-        */
-       nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
  }
  
  static int
  gf100_ltc_init(struct nouveau_object *object)
  {
        struct nvkm_ltc_priv *priv = (void *)object;
 +      u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
  
        ret = nvkm_ltc_init(priv);
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
        nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
        nv_wr32(priv, 0x17e8d4, priv->tag_base);
 +      nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
  }
  
@@@ -153,7 -168,7 +170,7 @@@ gf100_ltc_init_tag_ram(struct nouveau_f
        tag_size += tag_align;
        tag_size  = (tag_size + 0xfff) >> 12; /* round up */
  
-       ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
+       ret = nouveau_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
                              &priv->tag_ram);
        if (ret) {
                priv->num_tags = 0;
index a4de64289762fc9cfe82ab40e424b4fba67fe0f0,a26bed86f3841436b0f5b2f39cbf8b6686b495ae..89fc4238f50c4afea16fe2d5cb3501f5eb954216
@@@ -87,18 -87,12 +87,13 @@@ gm107_ltc_intr(struct nouveau_subdev *s
                        gm107_ltc_lts_isr(priv, ltc, lts);
                mask &= ~(1 << ltc);
        }
-       /* we do something horribly wrong and upset PMFB a lot, so mask off
-        * interrupts from it after the first one until it's fixed
-        */
-       nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
  }
  
  static int
  gm107_ltc_init(struct nouveau_object *object)
  {
        struct nvkm_ltc_priv *priv = (void *)object;
 +      u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
        int ret;
  
        ret = nvkm_ltc_init(priv);
  
        nv_wr32(priv, 0x17e27c, priv->ltc_nr);
        nv_wr32(priv, 0x17e278, priv->tag_base);
 +      nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
        return 0;
  }
  
index 3440fc999f2f3290b3557ea495a26afb65fda874,977fb8f15d977bc033f210ef23588e77fa27909c..589dbb582da200c543b2ee04e9296d58e0d70e1f
@@@ -36,7 -36,7 +36,7 @@@
  #include "nouveau_abi16.h"
  
  MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
static int nouveau_vram_pushbuf;
+ int nouveau_vram_pushbuf;
  module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
  
  int
@@@ -106,7 -106,7 +106,7 @@@ nouveau_channel_prep(struct nouveau_dr
        if (nouveau_vram_pushbuf)
                target = TTM_PL_FLAG_VRAM;
  
-       ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
+       ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
                            &chan->push.buffer);
        if (ret == 0) {
                ret = nouveau_bo_pin(chan->push.buffer, target);
@@@ -285,7 -285,6 +285,7 @@@ nouveau_channel_init(struct nouveau_cha
        struct nouveau_software_chan *swch;
        struct nv_dma_v0 args = {};
        int ret, i;
 +      bool save;
  
        nvif_object_map(chan->object);
  
        }
  
        /* initialise synchronisation */
 -      return nouveau_fence(chan->drm)->context_new(chan);
 +      save = cli->base.super;
 +      cli->base.super = true; /* hack until fencenv50 fixed */
 +      ret = nouveau_fence(chan->drm)->context_new(chan);
 +      cli->base.super = save;
 +      return ret;
  }
  
  int
index 4a21b2b06ce29beece2bcd102d30f005fcbc1307,334db3c6e40cd423c69ed33f10b4c0fe5ab9d8c2..a88e6927f5713e29a0db95a769dd16401f9f920f
@@@ -126,7 -126,7 +126,7 @@@ nouveau_display_scanoutpos_head(struct 
        if (etime) *etime = ns_to_ktime(args.scan.time[1]);
  
        if (*vpos < 0)
-               ret |= DRM_SCANOUTPOS_INVBL;
+               ret |= DRM_SCANOUTPOS_IN_VBLANK;
        return ret;
  }
  
@@@ -550,12 -550,14 +550,12 @@@ nouveau_display_destroy(struct drm_devi
  }
  
  int
 -nouveau_display_suspend(struct drm_device *dev)
 +nouveau_display_suspend(struct drm_device *dev, bool runtime)
  {
 -      struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_crtc *crtc;
  
        nouveau_display_fini(dev);
  
 -      NV_INFO(drm, "unpinning framebuffer(s)...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
  
  }
  
  void
 -nouveau_display_repin(struct drm_device *dev)
 +nouveau_display_resume(struct drm_device *dev, bool runtime)
  {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_crtc *crtc;
 -      int ret;
 +      int ret, head;
  
 +      /* re-pin fb/cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
  
                if (ret)
                        NV_ERROR(drm, "Could not pin/map cursor.\n");
        }
 -}
 -
 -void
 -nouveau_display_resume(struct drm_device *dev)
 -{
 -      struct drm_crtc *crtc;
 -      int head;
  
        nouveau_display_init(dev);
  
        for (head = 0; head < dev->mode_config.num_crtc; head++)
                drm_vblank_on(dev, head);
  
 +      /* This should ensure we don't hit a locking problem when someone
 +       * wakes us up via a connector.  We should never go into suspend
 +       * while the display is on anyways.
 +       */
 +      if (runtime)
 +              return;
 +
        drm_helper_resume_force_mode(dev);
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@@ -657,7 -658,7 +657,7 @@@ nouveau_page_flip_emit(struct nouveau_c
        spin_unlock_irqrestore(&dev->event_lock, flags);
  
        /* Synchronize with the old framebuffer */
-       ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+       ret = nouveau_fence_sync(old_bo, chan, false, false);
        if (ret)
                goto fail;
  
@@@ -716,19 -717,24 +716,24 @@@ nouveau_crtc_page_flip(struct drm_crtc 
        }
  
        mutex_lock(&cli->mutex);
-       /* synchronise rendering channel with the kernel's channel */
-       spin_lock(&new_bo->bo.bdev->fence_lock);
-       fence = nouveau_fence_ref(new_bo->bo.sync_obj);
-       spin_unlock(&new_bo->bo.bdev->fence_lock);
-       ret = nouveau_fence_sync(fence, chan);
-       nouveau_fence_unref(&fence);
+       ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
        if (ret)
                goto fail_unpin;
  
-       ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
-       if (ret)
+       /* synchronise rendering channel with the kernel's channel */
+       ret = nouveau_fence_sync(new_bo, chan, false, true);
+       if (ret) {
+               ttm_bo_unreserve(&new_bo->bo);
                goto fail_unpin;
+       }
+       if (new_bo != old_bo) {
+               ttm_bo_unreserve(&new_bo->bo);
+               ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
+               if (ret)
+                       goto fail_unpin;
+       }
  
        /* Initialize a page flip struct */
        *s = (struct nouveau_page_flip_state)
        /* Update the crtc struct and cleanup */
        crtc->primary->fb = fb;
  
-       nouveau_bo_fence(old_bo, fence);
+       nouveau_bo_fence(old_bo, fence, false);
        ttm_bo_unreserve(&old_bo->bo);
        if (old_bo != new_bo)
                nouveau_bo_unpin(old_bo);
index 3ed32dd9030364cd2a366726f9e6af09b05f21e3,244d78fc0cb534a1f6701ef36fe60a745e7f6648..57238076049f6fe16717ba6626bca954b1910aeb
@@@ -51,6 -51,7 +51,7 @@@
  #include "nouveau_fence.h"
  #include "nouveau_debugfs.h"
  #include "nouveau_usif.h"
+ #include "nouveau_connector.h"
  
  MODULE_PARM_DESC(config, "option string to pass to driver core");
  static char *nouveau_config;
@@@ -73,7 -74,9 +74,9 @@@ MODULE_PARM_DESC(runpm, "disable (0), f
  int nouveau_runtime_pm = -1;
  module_param_named(runpm, nouveau_runtime_pm, int, 0400);
  
- static struct drm_driver driver;
+ static struct drm_driver driver_stub;
+ static struct drm_driver driver_pci;
+ static struct drm_driver driver_platform;
  
  static u64
  nouveau_pci_name(struct pci_dev *pdev)
@@@ -322,7 -325,7 +325,7 @@@ static int nouveau_drm_probe(struct pci
  
        pci_set_master(pdev);
  
-       ret = drm_get_pci_dev(pdev, pent, &driver);
+       ret = drm_get_pci_dev(pdev, pent, &driver_pci);
        if (ret) {
                nouveau_object_ref(NULL, (struct nouveau_object **)&device);
                return ret;
@@@ -547,11 -550,9 +550,11 @@@ nouveau_do_suspend(struct drm_device *d
        struct nouveau_cli *cli;
        int ret;
  
 -      if (dev->mode_config.num_crtc && !runtime) {
 +      if (dev->mode_config.num_crtc) {
 +              NV_INFO(drm, "suspending console...\n");
 +              nouveau_fbcon_set_suspend(dev, 1);
                NV_INFO(drm, "suspending display...\n");
 -              ret = nouveau_display_suspend(dev);
 +              ret = nouveau_display_suspend(dev, runtime);
                if (ret)
                        return ret;
        }
@@@ -605,7 -606,7 +608,7 @@@ fail_client
  fail_display:
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "resuming display...\n");
 -              nouveau_display_resume(dev);
 +              nouveau_display_resume(dev, runtime);
        }
        return ret;
  }
@@@ -620,19 -621,21 +623,19 @@@ int nouveau_pmops_suspend(struct devic
            drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
  
 -      if (drm_dev->mode_config.num_crtc)
 -              nouveau_fbcon_set_suspend(drm_dev, 1);
 -
        ret = nouveau_do_suspend(drm_dev, false);
        if (ret)
                return ret;
  
        pci_save_state(pdev);
        pci_disable_device(pdev);
 +      pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
  }
  
  static int
 -nouveau_do_resume(struct drm_device *dev)
 +nouveau_do_resume(struct drm_device *dev, bool runtime)
  {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_cli *cli;
  
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "resuming display...\n");
 -              nouveau_display_repin(dev);
 +              nouveau_display_resume(dev, runtime);
 +              NV_INFO(drm, "resuming console...\n");
 +              nouveau_fbcon_set_suspend(dev, 0);
        }
  
        return 0;
@@@ -682,21 -683,47 +685,21 @@@ int nouveau_pmops_resume(struct device 
                return ret;
        pci_set_master(pdev);
  
 -      ret = nouveau_do_resume(drm_dev);
 -      if (ret)
 -              return ret;
 -
 -      if (drm_dev->mode_config.num_crtc) {
 -              nouveau_display_resume(drm_dev);
 -              nouveau_fbcon_set_suspend(drm_dev, 0);
 -      }
 -
 -      return 0;
 +      return nouveau_do_resume(drm_dev, false);
  }
  
  static int nouveau_pmops_freeze(struct device *dev)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 -      int ret;
 -
 -      if (drm_dev->mode_config.num_crtc)
 -              nouveau_fbcon_set_suspend(drm_dev, 1);
 -
 -      ret = nouveau_do_suspend(drm_dev, false);
 -      return ret;
 +      return nouveau_do_suspend(drm_dev, false);
  }
  
  static int nouveau_pmops_thaw(struct device *dev)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 -      int ret;
 -
 -      ret = nouveau_do_resume(drm_dev);
 -      if (ret)
 -              return ret;
 -
 -      if (drm_dev->mode_config.num_crtc) {
 -              nouveau_display_resume(drm_dev);
 -              nouveau_fbcon_set_suspend(drm_dev, 0);
 -      }
 -
 -      return 0;
 +      return nouveau_do_resume(drm_dev, false);
  }
  
  
@@@ -831,7 -858,7 +834,7 @@@ nouveau_driver_fops = 
  };
  
  static struct drm_driver
- driver = {
+ driver_stub = {
        .driver_features =
                DRIVER_USE_AGP |
                DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
@@@ -952,7 -979,7 +955,7 @@@ static int nouveau_pmops_runtime_resume
                return ret;
        pci_set_master(pdev);
  
 -      ret = nouveau_do_resume(drm_dev);
 +      ret = nouveau_do_resume(drm_dev, true);
        drm_kms_helper_poll_enable(drm_dev);
        /* do magic */
        nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
@@@ -1002,6 -1029,23 +1005,23 @@@ static int nouveau_pmops_runtime_idle(s
        return 1;
  }
  
+ static void nouveau_display_options(void)
+ {
+       DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
+       DRM_DEBUG_DRIVER("... tv_disable   : %d\n", nouveau_tv_disable);
+       DRM_DEBUG_DRIVER("... ignorelid    : %d\n", nouveau_ignorelid);
+       DRM_DEBUG_DRIVER("... duallink     : %d\n", nouveau_duallink);
+       DRM_DEBUG_DRIVER("... nofbaccel    : %d\n", nouveau_nofbaccel);
+       DRM_DEBUG_DRIVER("... config       : %s\n", nouveau_config);
+       DRM_DEBUG_DRIVER("... debug        : %s\n", nouveau_debug);
+       DRM_DEBUG_DRIVER("... noaccel      : %d\n", nouveau_noaccel);
+       DRM_DEBUG_DRIVER("... modeset      : %d\n", nouveau_modeset);
+       DRM_DEBUG_DRIVER("... runpm        : %d\n", nouveau_runtime_pm);
+       DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
+       DRM_DEBUG_DRIVER("... pstate       : %d\n", nouveau_pstate);
+ }
  static const struct dev_pm_ops nouveau_pm_ops = {
        .suspend = nouveau_pmops_suspend,
        .resume = nouveau_pmops_resume,
@@@ -1037,7 -1081,7 +1057,7 @@@ nouveau_platform_device_create_(struct 
        if (err)
                return ERR_PTR(err);
  
-       drm = drm_dev_alloc(&driver, &pdev->dev);
+       drm = drm_dev_alloc(&driver_platform, &pdev->dev);
        if (!drm) {
                err = -ENOMEM;
                goto err_free;
@@@ -1062,6 -1106,13 +1082,13 @@@ EXPORT_SYMBOL(nouveau_platform_device_c
  static int __init
  nouveau_drm_init(void)
  {
+       driver_pci = driver_stub;
+       driver_pci.set_busid = drm_pci_set_busid;
+       driver_platform = driver_stub;
+       driver_platform.set_busid = drm_platform_set_busid;
+       nouveau_display_options();
        if (nouveau_modeset == -1) {
  #ifdef CONFIG_VGA_CONSOLE
                if (vgacon_text_force())
                return 0;
  
        nouveau_register_dsm_handler();
-       return drm_pci_init(&driver, &nouveau_drm_pci_driver);
+       return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
  }
  
  static void __exit
@@@ -1082,7 -1133,7 +1109,7 @@@ nouveau_drm_exit(void
        if (!nouveau_modeset)
                return;
  
-       drm_pci_exit(&driver, &nouveau_drm_pci_driver);
+       drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
        nouveau_unregister_dsm_handler();
  }
  
index 49fe6075cc7c50c79d47d2ac24c273e536ac3159,dc1753c368e3096a46929ea6dda35a46ed0b9e2e..593ef8a2a069e16b4fdf96e9c95b67ca8ebd1b95
@@@ -52,7 -52,7 +52,7 @@@
  #include "nouveau_crtc.h"
  
  MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
static int nouveau_nofbaccel = 0;
+ int nouveau_nofbaccel = 0;
  module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
  
  static void
@@@ -308,7 -308,8 +308,8 @@@ static in
  nouveau_fbcon_create(struct drm_fb_helper *helper,
                     struct drm_fb_helper_surface_size *sizes)
  {
-       struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
+       struct nouveau_fbdev *fbcon =
+               container_of(helper, struct nouveau_fbdev, helper);
        struct drm_device *dev = fbcon->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvif_device *device = &drm->device;
@@@ -486,16 -487,6 +487,16 @@@ static const struct drm_fb_helper_func
        .fb_probe = nouveau_fbcon_create,
  };
  
 +static void
 +nouveau_fbcon_set_suspend_work(struct work_struct *work)
 +{
 +      struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
 +      console_lock();
 +      nouveau_fbcon_accel_restore(fbcon->dev);
 +      nouveau_fbcon_zfill(fbcon->dev, fbcon);
 +      fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
 +      console_unlock();
 +}
  
  int
  nouveau_fbcon_init(struct drm_device *dev)
        if (!fbcon)
                return -ENOMEM;
  
 +      INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
        fbcon->dev = dev;
        drm->fbcon = fbcon;
  
@@@ -562,14 -552,14 +563,14 @@@ nouveau_fbcon_set_suspend(struct drm_de
  {
        struct nouveau_drm *drm = nouveau_drm(dev);
        if (drm->fbcon) {
 -              console_lock();
 -              if (state == 0) {
 -                      nouveau_fbcon_accel_restore(dev);
 -                      nouveau_fbcon_zfill(dev, drm->fbcon);
 +              if (state == FBINFO_STATE_RUNNING) {
 +                      schedule_work(&drm->fbcon->work);
 +                      return;
                }
 +              flush_work(&drm->fbcon->work);
 +              console_lock();
                fb_set_suspend(drm->fbcon->helper.fbdev, state);
 -              if (state == 1)
 -                      nouveau_fbcon_accel_save_disable(dev);
 +              nouveau_fbcon_accel_save_disable(dev);
                console_unlock();
        }
  }
index 0b465c7d3907cb1a07667b667032dbe3b052c71b,1e2e9e27a03bbdfdbf215d14d22378f9607c4f76..6208e70e4a1cfdd16a949db884a605466d0ced38
@@@ -36,7 -36,6 +36,7 @@@ struct nouveau_fbdev 
        struct nouveau_framebuffer nouveau_fb;
        struct list_head fbdev_list;
        struct drm_device *dev;
 +      struct work_struct work;
        unsigned int saved_flags;
        struct nvif_object surf2d;
        struct nvif_object clip;
@@@ -73,5 -72,8 +73,8 @@@ void nouveau_fbcon_accel_save_disable(s
  void nouveau_fbcon_accel_restore(struct drm_device *dev);
  
  void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+ extern int nouveau_nofbaccel;
  #endif /* __NV50_FBCON_H__ */
  
index 3d546c606b43b6fd366a1257c751b16b5aaa2df5,d48a539b038a10adb24e5e8d878e43f606bfc2a0..377afa504d2bd045cfdc5f2eed06599610c30920
@@@ -3959,18 -3959,19 +3959,19 @@@ bool cik_semaphore_ring_emit(struct rad
   * @src_offset: src GPU address
   * @dst_offset: dst GPU address
   * @num_gpu_pages: number of GPU pages to xfer
-  * @fence: radeon fence object
+  * @resv: reservation object to sync to
   *
   * Copy GPU paging using the CP DMA engine (CIK+).
   * Used by the radeon ttm implementation to move pages if
   * registered as the asic copy callback.
   */
int cik_copy_cpdma(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_gpu_pages,
-                  struct radeon_fence **fence)
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+                                   uint64_t src_offset, uint64_t dst_offset,
+                                   unsigned num_gpu_pages,
+                                   struct reservation_object *resv)
  {
        struct radeon_semaphore *sem = NULL;
+       struct radeon_fence *fence;
        int ring_index = rdev->asic->copy.blit_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes, control;
        r = radeon_semaphore_create(rdev, &sem);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return r;
+               return ERR_PTR(r);
        }
  
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
-       radeon_semaphore_sync_to(sem, *fence);
+       radeon_semaphore_sync_resv(rdev, sem, resv, false);
        radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  
        for (i = 0; i < num_loops; i++) {
                dst_offset += cur_size_in_bytes;
        }
  
-       r = radeon_fence_emit(rdev, fence, ring->idx);
+       r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, *fence);
+       radeon_semaphore_free(rdev, &sem, fence);
  
-       return r;
+       return fence;
  }
  
  /*
@@@ -4234,7 -4235,7 +4235,7 @@@ static int cik_cp_gfx_load_microcode(st
                WREG32(CP_PFP_UCODE_ADDR, 0);
                for (i = 0; i < fw_size; i++)
                        WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
-               WREG32(CP_PFP_UCODE_ADDR, 0);
+               WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
  
                /* CE */
                fw_data = (const __le32 *)
                WREG32(CP_CE_UCODE_ADDR, 0);
                for (i = 0; i < fw_size; i++)
                        WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
-               WREG32(CP_CE_UCODE_ADDR, 0);
+               WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
  
                /* ME */
                fw_data = (const __be32 *)
                WREG32(CP_ME_RAM_WADDR, 0);
                for (i = 0; i < fw_size; i++)
                        WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
-               WREG32(CP_ME_RAM_WADDR, 0);
+               WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
+               WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
        } else {
                const __be32 *fw_data;
  
                WREG32(CP_ME_RAM_WADDR, 0);
        }
  
-       WREG32(CP_PFP_UCODE_ADDR, 0);
-       WREG32(CP_CE_UCODE_ADDR, 0);
-       WREG32(CP_ME_RAM_WADDR, 0);
-       WREG32(CP_ME_RAM_RADDR, 0);
        return 0;
  }
  
@@@ -4563,7 -4561,7 +4561,7 @@@ static int cik_cp_compute_load_microcod
                WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
                for (i = 0; i < fw_size; i++)
                        WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
-               WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+               WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
  
                /* MEC2 */
                if (rdev->family == CHIP_KAVERI) {
                        WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
                        for (i = 0; i < fw_size; i++)
                                WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
-                       WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+                       WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
                }
        } else {
                const __be32 *fw_data;
@@@ -4689,7 -4687,7 +4687,7 @@@ static int cik_mec_init(struct radeon_d
                r = radeon_bo_create(rdev,
                                     rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
                                     PAGE_SIZE, true,
-                                    RADEON_GEM_DOMAIN_GTT, 0, NULL,
+                                    RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
                                     &rdev->mec.hpd_eop_obj);
                if (r) {
                        dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@@ -4803,7 -4801,7 +4801,7 @@@ struct bonaire_mq
   */
  static int cik_cp_compute_resume(struct radeon_device *rdev)
  {
 -      int r, i, idx;
 +      int r, i, j, idx;
        u32 tmp;
        bool use_doorbell = true;
        u64 hqd_gpu_addr;
                                             sizeof(struct bonaire_mqd),
                                             PAGE_SIZE, true,
                                             RADEON_GEM_DOMAIN_GTT, 0, NULL,
-                                            &rdev->ring[idx].mqd_obj);
+                                            NULL, &rdev->ring[idx].mqd_obj);
                        if (r) {
                                dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
                                return r;
                mqd->queue_state.cp_hqd_pq_wptr= 0;
                if (RREG32(CP_HQD_ACTIVE) & 1) {
                        WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
 -                      for (i = 0; i < rdev->usec_timeout; i++) {
 +                      for (j = 0; j < rdev->usec_timeout; j++) {
                                if (!(RREG32(CP_HQD_ACTIVE) & 1))
                                        break;
                                udelay(1);
@@@ -6226,7 -6224,7 +6224,7 @@@ static int cik_rlc_resume(struct radeon
                WREG32(RLC_GPM_UCODE_ADDR, 0);
                for (i = 0; i < size; i++)
                        WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
-               WREG32(RLC_GPM_UCODE_ADDR, 0);
+               WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
        } else {
                const __be32 *fw_data;
  
@@@ -7751,17 -7749,17 +7749,17 @@@ static inline u32 cik_get_ih_wptr(struc
                wptr = RREG32(IH_RB_WPTR);
  
        if (wptr & RB_OVERFLOW) {
 +              wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
 -              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
 -                      wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
 +              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
 +                       wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
 -              wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
  }
@@@ -8251,15 -8249,17 +8249,17 @@@ restart_ih
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
 +              WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
-       if (queue_reset)
-               schedule_work(&rdev->reset_work);
+       if (queue_reset) {
+               rdev->needs_reset = true;
+               wake_up_all(&rdev->fence_queue);
+       }
        if (queue_thermal)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
 -      WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
  
        /* make sure wptr hasn't changed while processing */
index c4ffa54b1e3df503dc5dd9645765b2f0927e87b2,c473c9125295137002efa5fe72eaad254e165ecd..c77dad1a45769b151526377fac7e601129ee9221
@@@ -489,6 -489,13 +489,6 @@@ int cik_sdma_resume(struct radeon_devic
  {
        int r;
  
 -      /* Reset dma */
 -      WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
 -      RREG32(SRBM_SOFT_RESET);
 -      udelay(50);
 -      WREG32(SRBM_SOFT_RESET, 0);
 -      RREG32(SRBM_SOFT_RESET);
 -
        r = cik_sdma_load_microcode(rdev);
        if (r)
                return r;
@@@ -530,18 -537,19 +530,19 @@@ void cik_sdma_fini(struct radeon_devic
   * @src_offset: src GPU address
   * @dst_offset: dst GPU address
   * @num_gpu_pages: number of GPU pages to xfer
-  * @fence: radeon fence object
+  * @resv: reservation object to sync to
   *
   * Copy GPU paging using the DMA engine (CIK).
   * Used by the radeon ttm implementation to move pages if
   * registered as the asic copy callback.
   */
int cik_copy_dma(struct radeon_device *rdev,
-                uint64_t src_offset, uint64_t dst_offset,
-                unsigned num_gpu_pages,
-                struct radeon_fence **fence)
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+                                 uint64_t src_offset, uint64_t dst_offset,
+                                 unsigned num_gpu_pages,
+                                 struct reservation_object *resv)
  {
        struct radeon_semaphore *sem = NULL;
+       struct radeon_fence *fence;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes;
        r = radeon_semaphore_create(rdev, &sem);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return r;
+               return ERR_PTR(r);
        }
  
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
-       radeon_semaphore_sync_to(sem, *fence);
+       radeon_semaphore_sync_resv(rdev, sem, resv, false);
        radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  
        for (i = 0; i < num_loops; i++) {
                dst_offset += cur_size_in_bytes;
        }
  
-       r = radeon_fence_emit(rdev, fence, ring->idx);
+       r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, *fence);
+       radeon_semaphore_free(rdev, &sem, fence);
  
-       return r;
+       return fence;
  }
  
  /**
index e50807c29f696a6b8eb23cf1e5dd9ab7f58412ca,8fe9f870fb5a318a44dbb90983fdafe01342ed3f..a31f1ca40c6a7e81d06309e82bba86d3192a3562
@@@ -22,7 -22,6 +22,6 @@@
   * Authors: Alex Deucher
   */
  #include <linux/firmware.h>
- #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include <drm/drmP.h>
  #include "radeon.h"
@@@ -4023,7 -4022,7 +4022,7 @@@ int sumo_rlc_init(struct radeon_device 
                if (rdev->rlc.save_restore_obj == NULL) {
                        r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
                                             RADEON_GEM_DOMAIN_VRAM, 0, NULL,
-                                            &rdev->rlc.save_restore_obj);
+                                            NULL, &rdev->rlc.save_restore_obj);
                        if (r) {
                                dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
                                return r;
                if (rdev->rlc.clear_state_obj == NULL) {
                        r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
                                             RADEON_GEM_DOMAIN_VRAM, 0, NULL,
-                                            &rdev->rlc.clear_state_obj);
+                                            NULL, &rdev->rlc.clear_state_obj);
                        if (r) {
                                dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
                                sumo_rlc_fini(rdev);
                        r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
                                             PAGE_SIZE, true,
                                             RADEON_GEM_DOMAIN_VRAM, 0, NULL,
-                                            &rdev->rlc.cp_table_obj);
+                                            NULL, &rdev->rlc.cp_table_obj);
                        if (r) {
                                dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
                                sumo_rlc_fini(rdev);
@@@ -4749,17 -4748,17 +4748,17 @@@ static u32 evergreen_get_ih_wptr(struc
                wptr = RREG32(IH_RB_WPTR);
  
        if (wptr & RB_OVERFLOW) {
 +              wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
 -              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
 -                      wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
 +              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
 +                       wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
 -              wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
  }
@@@ -5137,7 -5136,6 +5136,7 @@@ restart_ih
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
 +              WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
 -      WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
  
        /* make sure wptr hasn't changed while processing */
index 67cb472d188ce05ec36c5a812ae5751fbb618254,7b129d2b44be14cc9e746d4a09566c02baa70d9f..1dd976f447faccd700fcfeac3ebc14cebbdd1320
@@@ -33,8 -33,6 +33,8 @@@
  #define KV_MINIMUM_ENGINE_CLOCK         800
  #define SMC_RAM_END                     0x40000
  
 +static int kv_enable_nb_dpm(struct radeon_device *rdev,
 +                          bool enable);
  static void kv_init_graphics_levels(struct radeon_device *rdev);
  static int kv_calculate_ds_divider(struct radeon_device *rdev);
  static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@@ -1297,9 -1295,6 +1297,9 @@@ void kv_dpm_disable(struct radeon_devic
  {
        kv_smc_bapm_enable(rdev, false);
  
 +      if (rdev->family == CHIP_MULLINS)
 +              kv_enable_nb_dpm(rdev, false);
 +
        /* powerup blocks */
        kv_dpm_powergate_acp(rdev, false);
        kv_dpm_powergate_samu(rdev, false);
@@@ -1774,24 -1769,15 +1774,24 @@@ static int kv_update_dfs_bypass_setting
        return ret;
  }
  
 -static int kv_enable_nb_dpm(struct radeon_device *rdev)
 +static int kv_enable_nb_dpm(struct radeon_device *rdev,
 +                          bool enable)
  {
        struct kv_power_info *pi = kv_get_pi(rdev);
        int ret = 0;
  
 -      if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
 -              ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
 -              if (ret == 0)
 -                      pi->nb_dpm_enabled = true;
 +      if (enable) {
 +              if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
 +                      ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
 +                      if (ret == 0)
 +                              pi->nb_dpm_enabled = true;
 +              }
 +      } else {
 +              if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
 +                      ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
 +                      if (ret == 0)
 +                              pi->nb_dpm_enabled = false;
 +              }
        }
  
        return ret;
@@@ -1878,7 -1864,7 +1878,7 @@@ int kv_dpm_set_power_state(struct radeo
                        }
                        kv_update_sclk_t(rdev);
                        if (rdev->family == CHIP_MULLINS)
 -                              kv_enable_nb_dpm(rdev);
 +                              kv_enable_nb_dpm(rdev, true);
                }
        } else {
                if (pi->enable_dpm) {
                        }
                        kv_update_acp_boot_level(rdev);
                        kv_update_sclk_t(rdev);
 -                      kv_enable_nb_dpm(rdev);
 +                      kv_enable_nb_dpm(rdev, true);
                }
        }
  
@@@ -2787,6 -2773,8 +2787,8 @@@ void kv_dpm_debugfs_print_current_perfo
                tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
                        SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
                vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+               seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+               seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
                seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
                           current_index, sclk, vddc);
        }
index b0098e792e62337a6b4d511ca62baa7f37bc4eba,c6b486f888d515ab7b68a4bac403d5755212d137..10f8be0ee1736394acaf9bac24eb516707917fd2
@@@ -821,20 -821,6 +821,20 @@@ u32 r100_get_vblank_counter(struct rade
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
  }
  
 +/**
 + * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
 + * rdev: radeon device structure
 + * ring: ring buffer struct for emitting packets
 + */
 +static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
 +{
 +      radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
 +      radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
 +                              RADEON_HDP_READ_BUFFER_INVALIDATE);
 +      radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
 +      radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
 +}
 +
  /* Who ever call radeon_fence_emit should call ring_lock and ask
   * for enough space (today caller are ib schedule and buffer move) */
  void r100_fence_ring_emit(struct radeon_device *rdev,
@@@ -869,13 -855,14 +869,14 @@@ bool r100_semaphore_ring_emit(struct ra
        return false;
  }
  
int r100_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset,
-                  uint64_t dst_offset,
-                  unsigned num_gpu_pages,
-                  struct radeon_fence **fence)
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+                                   uint64_t src_offset,
+                                   uint64_t dst_offset,
+                                   unsigned num_gpu_pages,
+                                   struct reservation_object *resv)
  {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_fence *fence;
        uint32_t cur_pages;
        uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        r = radeon_ring_lock(rdev, ring, ndw);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
        while (num_gpu_pages > 0) {
                cur_pages = num_gpu_pages;
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_HOST_IDLECLEAN |
                          RADEON_WAIT_DMA_GUI_IDLE);
-       if (fence) {
-               r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+       r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return ERR_PTR(r);
        }
        radeon_ring_unlock_commit(rdev, ring, false);
-       return r;
+       return fence;
  }
  
  static int r100_cp_wait_for_idle(struct radeon_device *rdev)
@@@ -1070,6 -1059,20 +1073,6 @@@ void r100_gfx_set_wptr(struct radeon_de
        (void)RREG32(RADEON_CP_RB_WPTR);
  }
  
 -/**
 - * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
 - * rdev: radeon device structure
 - * ring: ring buffer struct for emitting packets
 - */
 -void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
 -{
 -      radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
 -      radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
 -                              RADEON_HDP_READ_BUFFER_INVALIDATE);
 -      radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
 -      radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
 -}
 -
  static void r100_cp_load_microcode(struct radeon_device *rdev)
  {
        const __be32 *fw_data;
index ea5c9af722ef9f5d322d61a9d0ff628f8daa151f,85414283fcccd9f9abdae2620f760a78282c45b8..56b02927cd3de5901da653bbea13cfcc7e9858ab
@@@ -122,6 -122,94 +122,94 @@@ u32 r600_get_xclk(struct radeon_device 
  
  int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
  {
+       unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
+       int r;
+       /* bypass vclk and dclk with bclk */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+                ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+       /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
+                UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
+       if (rdev->family >= CHIP_RS780)
+               WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
+                        ~UPLL_BYPASS_CNTL);
+       if (!vclk || !dclk) {
+               /* keep the Bypass mode, put PLL to sleep */
+               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+               return 0;
+       }
+       if (rdev->clock.spll.reference_freq == 10000)
+               ref_div = 34;
+       else
+               ref_div = 4;
+       r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+                                         ref_div + 1, 0xFFF, 2, 30, ~0,
+                                         &fb_div, &vclk_div, &dclk_div);
+       if (r)
+               return r;
+       if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
+               fb_div >>= 1;
+       else
+               fb_div |= 1;
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+         if (r)
+                 return r;
+       /* assert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+       /* For RS780 we have to choose ref clk */
+       if (rdev->family >= CHIP_RS780)
+               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
+                        ~UPLL_REFCLK_SRC_SEL_MASK);
+       /* set the required fb, ref and post divder values */
+       WREG32_P(CG_UPLL_FUNC_CNTL,
+                UPLL_FB_DIV(fb_div) |
+                UPLL_REF_DIV(ref_div),
+                ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                UPLL_SW_HILEN(vclk_div >> 1) |
+                UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+                UPLL_SW_HILEN2(dclk_div >> 1) |
+                UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
+                UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
+                ~UPLL_SW_MASK);
+       /* give the PLL some time to settle */
+       mdelay(15);
+       /* deassert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+       mdelay(15);
+       /* deassert BYPASS EN */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+       if (rdev->family >= CHIP_RS780)
+               WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+       /* switch VCLK and DCLK selection */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+                ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+       mdelay(100);
        return 0;
  }
  
@@@ -992,6 -1080,8 +1080,8 @@@ static int r600_pcie_gart_enable(struc
        WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@@ -1042,6 -1132,8 +1132,8 @@@ static void r600_pcie_gart_disable(stru
        WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
        radeon_gart_table_vram_unpin(rdev);
  }
  
@@@ -1338,7 -1430,7 +1430,7 @@@ int r600_vram_scratch_init(struct radeo
        if (rdev->vram_scratch.robj == NULL) {
                r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
                                     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-                                    0, NULL, &rdev->vram_scratch.robj);
+                                    0, NULL, NULL, &rdev->vram_scratch.robj);
                if (r) {
                        return r;
                }
@@@ -2792,12 -2884,13 +2884,13 @@@ bool r600_semaphore_ring_emit(struct ra
   * Used by the radeon ttm implementation to move pages if
   * registered as the asic copy callback.
   */
int r600_copy_cpdma(struct radeon_device *rdev,
-                   uint64_t src_offset, uint64_t dst_offset,
-                   unsigned num_gpu_pages,
-                   struct radeon_fence **fence)
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+                                    uint64_t src_offset, uint64_t dst_offset,
+                                    unsigned num_gpu_pages,
+                                    struct reservation_object *resv)
  {
        struct radeon_semaphore *sem = NULL;
+       struct radeon_fence *fence;
        int ring_index = rdev->asic->copy.blit_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes, tmp;
        r = radeon_semaphore_create(rdev, &sem);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return r;
+               return ERR_PTR(r);
        }
  
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
-       radeon_semaphore_sync_to(sem, *fence);
+       radeon_semaphore_sync_resv(rdev, sem, resv, false);
        radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  
        radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
        radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
  
-       r = radeon_fence_emit(rdev, fence, ring->idx);
+       r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, *fence);
+       radeon_semaphore_free(rdev, &sem, fence);
  
-       return r;
+       return fence;
  }
  
  int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@@ -2907,6 -3000,18 +3000,18 @@@ static int r600_startup(struct radeon_d
                return r;
        }
  
+       if (rdev->has_uvd) {
+               r = uvd_v1_0_resume(rdev);
+               if (!r) {
+                       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+                       if (r) {
+                               dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+                       }
+               }
+               if (r)
+                       rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+       }
        /* Enable IRQ */
        if (!rdev->irq.installed) {
                r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
  
+       if (rdev->has_uvd) {
+               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+               if (ring->ring_size) {
+                       r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
+                                            RADEON_CP_PACKET2);
+                       if (!r)
+                               r = uvd_v1_0_init(rdev);
+                       if (r)
+                               DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+               }
+       }
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@@ -2994,6 -3111,10 +3111,10 @@@ int r600_suspend(struct radeon_device *
        radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        r600_cp_stop(rdev);
+       if (rdev->has_uvd) {
+               uvd_v1_0_fini(rdev);
+               radeon_uvd_suspend(rdev);
+       }
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
@@@ -3073,6 -3194,14 +3194,14 @@@ int r600_init(struct radeon_device *rde
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
  
+       if (rdev->has_uvd) {
+               r = radeon_uvd_init(rdev);
+               if (!r) {
+                       rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+                       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+               }
+       }
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
  
@@@ -3102,6 -3231,10 +3231,10 @@@ void r600_fini(struct radeon_device *rd
        r600_audio_fini(rdev);
        r600_cp_fini(rdev);
        r600_irq_fini(rdev);
+       if (rdev->has_uvd) {
+               uvd_v1_0_fini(rdev);
+               radeon_uvd_fini(rdev);
+       }
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
@@@ -3235,7 -3368,7 +3368,7 @@@ int r600_ih_ring_alloc(struct radeon_de
                r = radeon_bo_create(rdev, rdev->ih.ring_size,
                                     PAGE_SIZE, true,
                                     RADEON_GEM_DOMAIN_GTT, 0,
-                                    NULL, &rdev->ih.ring_obj);
+                                    NULL, NULL, &rdev->ih.ring_obj);
                if (r) {
                        DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
                        return r;
@@@ -3792,17 -3925,17 +3925,17 @@@ static u32 r600_get_ih_wptr(struct rade
                wptr = RREG32(IH_RB_WPTR);
  
        if (wptr & RB_OVERFLOW) {
 +              wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
 -              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
 -                      wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
 +              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
 +                       wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
 -              wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
  }
@@@ -4048,7 -4181,6 +4181,7 @@@ restart_ih
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
 +              WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
 -      WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
  
        /* make sure wptr hasn't changed while processing */
index a908daa006d23980bd51fe50d93e94ea3464aac5,a49db830a47fdb0ab968438f34db39e88673fdf8..100189ec5fa85133b840851d4def58bbbae6ffc0
@@@ -124,6 -124,15 +124,6 @@@ int r600_dma_resume(struct radeon_devic
        u32 rb_bufsz;
        int r;
  
 -      /* Reset dma */
 -      if (rdev->family >= CHIP_RV770)
 -              WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
 -      else
 -              WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
 -      RREG32(SRBM_SOFT_RESET);
 -      udelay(50);
 -      WREG32(SRBM_SOFT_RESET, 0);
 -
        WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
  
@@@ -427,18 -436,19 +427,19 @@@ void r600_dma_ring_ib_execute(struct ra
   * @src_offset: src GPU address
   * @dst_offset: dst GPU address
   * @num_gpu_pages: number of GPU pages to xfer
-  * @fence: radeon fence object
+  * @resv: reservation object to sync to
   *
   * Copy GPU paging using the DMA engine (r6xx).
   * Used by the radeon ttm implementation to move pages if
   * registered as the asic copy callback.
   */
int r600_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset, uint64_t dst_offset,
-                 unsigned num_gpu_pages,
-                 struct radeon_fence **fence)
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+                                  uint64_t src_offset, uint64_t dst_offset,
+                                  unsigned num_gpu_pages,
+                                  struct reservation_object *resv)
  {
        struct radeon_semaphore *sem = NULL;
+       struct radeon_fence *fence;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_dw, cur_size_in_dw;
        r = radeon_semaphore_create(rdev, &sem);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return r;
+               return ERR_PTR(r);
        }
  
        size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
-       radeon_semaphore_sync_to(sem, *fence);
+       radeon_semaphore_sync_resv(rdev, sem, resv, false);
        radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  
        for (i = 0; i < num_loops; i++) {
                dst_offset += cur_size_in_dw * 4;
        }
  
-       r = radeon_fence_emit(rdev, fence, ring->idx);
+       r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
                radeon_semaphore_free(rdev, &sem, NULL);
-               return r;
+               return ERR_PTR(r);
        }
  
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, *fence);
+       radeon_semaphore_free(rdev, &sem, fence);
  
-       return r;
+       return fence;
  }
index 31e1052ad3e3780cec522ab8a2e04bb62698c573,ebf68fa6d1f16bce540120e38ebc965fd9d2a470..1e8495cca41e9f5f611dfc3b84726104427969c7
  #define R6XX_MAX_PIPES                                8
  #define R6XX_MAX_PIPES_MASK                   0xff
  
 -/* PTE flags */
 -#define PTE_VALID                             (1 << 0)
 -#define PTE_SYSTEM                            (1 << 1)
 -#define PTE_SNOOPED                           (1 << 2)
 -#define PTE_READABLE                          (1 << 5)
 -#define PTE_WRITEABLE                         (1 << 6)
 -
  /* tiling bits */
  #define     ARRAY_LINEAR_GENERAL              0x00000000
  #define     ARRAY_LINEAR_ALIGNED              0x00000001
  #define       HDP_TILING_CONFIG                               0x2F3C
  #define HDP_DEBUG1                                      0x2F34
  
+ #define MC_CONFIG                                     0x2000
  #define MC_VM_AGP_TOP                                 0x2184
  #define MC_VM_AGP_BOT                                 0x2188
  #define       MC_VM_AGP_BASE                                  0x218C
  #define MC_VM_FB_LOCATION                             0x2180
- #define MC_VM_L1_TLB_MCD_RD_A_CNTL                    0x219C
+ #define MC_VM_L1_TLB_MCB_RD_UVD_CNTL                  0x2124
  #define       ENABLE_L1_TLB                                   (1 << 0)
  #define               ENABLE_L1_FRAGMENT_PROCESSING                   (1 << 1)
  #define               ENABLE_L1_STRICT_ORDERING                       (1 << 2)
  #define               EFFECTIVE_L1_QUEUE_SIZE(x)                      (((x) & 7) << 15)
  #define               EFFECTIVE_L1_QUEUE_SIZE_MASK                    0x00038000
  #define               EFFECTIVE_L1_QUEUE_SIZE_SHIFT                   15
+ #define MC_VM_L1_TLB_MCD_RD_A_CNTL                    0x219C
  #define MC_VM_L1_TLB_MCD_RD_B_CNTL                    0x21A0
  #define MC_VM_L1_TLB_MCB_RD_GFX_CNTL                  0x21FC
  #define MC_VM_L1_TLB_MCB_RD_HDP_CNTL                  0x2204
  #define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL                 0x2208
  #define MC_VM_L1_TLB_MCB_RD_SEM_CNTL                  0x220C
  #define       MC_VM_L1_TLB_MCB_RD_SYS_CNTL                    0x2200
+ #define MC_VM_L1_TLB_MCB_WR_UVD_CNTL                  0x212c
  #define MC_VM_L1_TLB_MCD_WR_A_CNTL                    0x21A4
  #define MC_VM_L1_TLB_MCD_WR_B_CNTL                    0x21A8
  #define MC_VM_L1_TLB_MCB_WR_GFX_CNTL                  0x2210
  #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR                       0x2194
  #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR            0x2198
  
+ #define RS_DQ_RD_RET_CONF                             0x2348
  #define       PA_CL_ENHANCE                                   0x8A14
  #define               CLIP_VTX_REORDER_ENA                            (1 << 0)
  #define               NUM_CLIP_SEQ(x)                                 ((x) << 1)
  #       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
  #       define SELECTABLE_DEEMPHASIS                      (1 << 6)
  
+ /* Audio */
+ #define AZ_HOT_PLUG_CONTROL               0x7300
+ #       define AZ_FORCE_CODEC_WAKE        (1 << 0)
+ #       define JACK_DETECTION_ENABLE      (1 << 4)
+ #       define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+ #       define CODEC_HOT_PLUG_ENABLE      (1 << 12)
+ #       define AUDIO_ENABLED              (1 << 31)
+ /* DCE3 adds */
+ #       define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+ #       define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+ #       define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+ #       define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+ #       define PIN0_AUDIO_ENABLED         (1 << 24)
+ #       define PIN1_AUDIO_ENABLED         (1 << 25)
+ #       define PIN2_AUDIO_ENABLED         (1 << 26)
+ #       define PIN3_AUDIO_ENABLED         (1 << 27)
  /* Audio clocks DCE 2.0/3.0 */
  #define AUDIO_DTO                         0x7340
  #       define AUDIO_DTO_PHASE(x)         (((x) & 0xffff) << 0)
  #define UVD_CGC_GATE                                  0xf4a8
  #define UVD_LMI_CTRL2                                 0xf4f4
  #define UVD_MASTINT_EN                                        0xf500
+ #define UVD_FW_START                                  0xf51C
  #define UVD_LMI_ADDR_EXT                              0xf594
  #define UVD_LMI_CTRL                                  0xf598
  #define UVD_LMI_SWAP_CNTL                             0xf5b4
  #define UVD_MPC_SET_MUX                                       0xf5f4
  #define UVD_MPC_SET_ALU                                       0xf5f8
  
+ #define UVD_VCPU_CACHE_OFFSET0                                0xf608
+ #define UVD_VCPU_CACHE_SIZE0                          0xf60c
+ #define UVD_VCPU_CACHE_OFFSET1                                0xf610
+ #define UVD_VCPU_CACHE_SIZE1                          0xf614
+ #define UVD_VCPU_CACHE_OFFSET2                                0xf618
+ #define UVD_VCPU_CACHE_SIZE2                          0xf61c
  #define UVD_VCPU_CNTL                                 0xf660
  #define UVD_SOFT_RESET                                        0xf680
  #define               RBC_SOFT_RESET                                  (1<<0)
  
  #define UVD_CONTEXT_ID                                        0xf6f4
  
+ /* rs780 only */
+ #define       GFX_MACRO_BYPASS_CNTL                           0x30c0
+ #define               SPLL_BYPASS_CNTL                        (1 << 0)
+ #define               UPLL_BYPASS_CNTL                        (1 << 1)
+ #define CG_UPLL_FUNC_CNTL                             0x7e0
+ #     define UPLL_RESET_MASK                          0x00000001
+ #     define UPLL_SLEEP_MASK                          0x00000002
+ #     define UPLL_BYPASS_EN_MASK                      0x00000004
  #     define UPLL_CTLREQ_MASK                         0x00000008
+ #     define UPLL_FB_DIV(x)                           ((x) << 4)
+ #     define UPLL_FB_DIV_MASK                         0x0000FFF0
+ #     define UPLL_REF_DIV(x)                          ((x) << 16)
+ #     define UPLL_REF_DIV_MASK                        0x003F0000
+ #     define UPLL_REFCLK_SRC_SEL_MASK                 0x20000000
  #     define UPLL_CTLACK_MASK                         0x40000000
  #     define UPLL_CTLACK2_MASK                        0x80000000
+ #define CG_UPLL_FUNC_CNTL_2                           0x7e4
+ #     define UPLL_SW_HILEN(x)                         ((x) << 0)
+ #     define UPLL_SW_LOLEN(x)                         ((x) << 4)
+ #     define UPLL_SW_HILEN2(x)                        ((x) << 8)
+ #     define UPLL_SW_LOLEN2(x)                        ((x) << 12)
+ #     define UPLL_DIVEN_MASK                          0x00010000
+ #     define UPLL_DIVEN2_MASK                         0x00020000
+ #     define UPLL_SW_MASK                             0x0003FFFF
+ #     define VCLK_SRC_SEL(x)                          ((x) << 20)
+ #     define VCLK_SRC_SEL_MASK                        0x01F00000
+ #     define DCLK_SRC_SEL(x)                          ((x) << 25)
+ #     define DCLK_SRC_SEL_MASK                        0x3E000000
  
  /*
   * PM4
index 3247bfd144106d54fa24161f898f24ae538f9a3c,e01424fe284877a9b697dbc1b0234a42b62c385e..f7c4b226a284a162fcd088210d85b7b3ff57a838
@@@ -65,6 -65,8 +65,8 @@@
  #include <linux/list.h>
  #include <linux/kref.h>
  #include <linux/interval_tree.h>
+ #include <linux/hashtable.h>
+ #include <linux/fence.h>
  
  #include <ttm/ttm_bo_api.h>
  #include <ttm/ttm_bo_driver.h>
@@@ -72,6 -74,8 +74,8 @@@
  #include <ttm/ttm_module.h>
  #include <ttm/ttm_execbuf_util.h>
  
+ #include <drm/drm_gem.h>
  #include "radeon_family.h"
  #include "radeon_mode.h"
  #include "radeon_reg.h"
@@@ -106,7 -110,6 +110,7 @@@ extern int radeon_vm_block_size
  extern int radeon_deep_color;
  extern int radeon_use_pflipirq;
  extern int radeon_bapm;
 +extern int radeon_backlight;
  
  /*
   * Copy from radeon_drv.h so we don't have to include both and have conflicting
  #define RADEONFB_CONN_LIMIT                   4
  #define RADEON_BIOS_NUM_SCRATCH                       8
  
- /* fence seq are set to this number when signaled */
- #define RADEON_FENCE_SIGNALED_SEQ             0LL
  /* internal ring indices */
  /* r1xx+ has gfx CP ring */
  #define RADEON_RING_TYPE_GFX_INDEX            0
@@@ -350,28 -350,32 +351,32 @@@ extern void evergreen_tiling_fields(uns
   * Fences.
   */
  struct radeon_fence_driver {
+       struct radeon_device            *rdev;
        uint32_t                        scratch_reg;
        uint64_t                        gpu_addr;
        volatile uint32_t               *cpu_addr;
        /* sync_seq is protected by ring emission lock */
        uint64_t                        sync_seq[RADEON_NUM_RINGS];
        atomic64_t                      last_seq;
-       bool                            initialized;
+       bool                            initialized, delayed_irq;
+       struct delayed_work             lockup_work;
  };
  
  struct radeon_fence {
+       struct fence base;
        struct radeon_device            *rdev;
-       struct kref                     kref;
-       /* protected by radeon_fence.lock */
        uint64_t                        seq;
        /* RB, DMA, etc. */
        unsigned                        ring;
+       wait_queue_t                    fence_wake;
  };
  
  int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
  int radeon_fence_driver_init(struct radeon_device *rdev);
  void radeon_fence_driver_fini(struct radeon_device *rdev);
- void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
  int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
  void radeon_fence_process(struct radeon_device *rdev, int ring);
  bool radeon_fence_signaled(struct radeon_fence *fence);
@@@ -469,7 -473,7 +474,7 @@@ struct radeon_bo 
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             initial_domain;
-       u32                             placements[3];
+       struct ttm_place                placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
  
        struct ttm_bo_kmap_obj          dma_buf_vmap;
        pid_t                           pid;
+       struct radeon_mn                *mn;
+       struct interval_tree_node       mn_it;
  };
  #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
  
@@@ -580,8 -587,12 +588,12 @@@ bool radeon_semaphore_emit_signal(struc
                                  struct radeon_semaphore *semaphore);
  bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
                                struct radeon_semaphore *semaphore);
- void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
-                             struct radeon_fence *fence);
+ void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
+                                struct radeon_fence *fence);
+ int radeon_semaphore_sync_resv(struct radeon_device *rdev,
+                              struct radeon_semaphore *semaphore,
+                              struct reservation_object *resv,
+                              bool shared);
  int radeon_semaphore_sync_rings(struct radeon_device *rdev,
                                struct radeon_semaphore *semaphore,
                                int waiting_ring);
@@@ -702,7 -713,7 +714,7 @@@ struct radeon_flip_work 
        uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct radeon_bo                *old_rbo;
-       struct radeon_fence             *fence;
+       struct fence                    *fence;
  };
  
  struct r500_irq_stat_regs {
@@@ -780,6 -791,7 +792,7 @@@ struct radeon_irq 
  int radeon_irq_kms_init(struct radeon_device *rdev);
  void radeon_irq_kms_fini(struct radeon_device *rdev);
  void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+ bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
  void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
  void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
  void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
@@@ -1642,7 -1654,8 +1655,8 @@@ int radeon_uvd_get_create_msg(struct ra
                              uint32_t handle, struct radeon_fence **fence);
  int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
- void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+                                      uint32_t allowed_domains);
  void radeon_uvd_free_handles(struct radeon_device *rdev,
                             struct drm_file *filp);
  int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
@@@ -1731,6 -1744,11 +1745,11 @@@ void radeon_test_ring_sync(struct radeo
                           struct radeon_ring *cpB);
  void radeon_test_syncing(struct radeon_device *rdev);
  
+ /*
+  * MMU Notifier
+  */
+ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
+ void radeon_mn_unregister(struct radeon_bo *bo);
  
  /*
   * Debugfs
@@@ -1845,24 -1863,24 +1864,24 @@@ struct radeon_asic 
        } display;
        /* copy functions for bo handling */
        struct {
-               int (*blit)(struct radeon_device *rdev,
-                           uint64_t src_offset,
-                           uint64_t dst_offset,
-                           unsigned num_gpu_pages,
-                           struct radeon_fence **fence);
+               struct radeon_fence *(*blit)(struct radeon_device *rdev,
+                                            uint64_t src_offset,
+                                            uint64_t dst_offset,
+                                            unsigned num_gpu_pages,
+                                            struct reservation_object *resv);
                u32 blit_ring_index;
-               int (*dma)(struct radeon_device *rdev,
-                          uint64_t src_offset,
-                          uint64_t dst_offset,
-                          unsigned num_gpu_pages,
-                          struct radeon_fence **fence);
+               struct radeon_fence *(*dma)(struct radeon_device *rdev,
+                                           uint64_t src_offset,
+                                           uint64_t dst_offset,
+                                           unsigned num_gpu_pages,
+                                           struct reservation_object *resv);
                u32 dma_ring_index;
                /* method used for bo copy */
-               int (*copy)(struct radeon_device *rdev,
-                           uint64_t src_offset,
-                           uint64_t dst_offset,
-                           unsigned num_gpu_pages,
-                           struct radeon_fence **fence);
+               struct radeon_fence *(*copy)(struct radeon_device *rdev,
+                                            uint64_t src_offset,
+                                            uint64_t dst_offset,
+                                            unsigned num_gpu_pages,
+                                            struct reservation_object *resv);
                /* ring used for bo copies */
                u32 copy_ring_index;
        } copy;
@@@ -2144,6 -2162,8 +2163,8 @@@ int radeon_gem_info_ioctl(struct drm_de
                          struct drm_file *filp);
  int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *filp);
+ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *filp);
  int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
  int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@@ -2300,6 -2320,7 +2321,7 @@@ struct radeon_device 
        struct radeon_mman              mman;
        struct radeon_fence_driver      fence_drv[RADEON_NUM_RINGS];
        wait_queue_head_t               fence_queue;
+       unsigned                        fence_context;
        struct mutex                    ring_lock;
        struct radeon_ring              ring[RADEON_NUM_RINGS];
        bool                            ib_pool_ready;
        bool                            need_dma32;
        bool                            accel_working;
        bool                            fastfb_working; /* IGP feature*/
-       bool                            needs_reset;
+       bool                            needs_reset, in_reset;
        struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
        const struct firmware *me_fw;   /* all family ME firmware */
        const struct firmware *pfp_fw;  /* r6/700 PFP firmware */
        struct radeon_mec mec;
        struct work_struct hotplug_work;
        struct work_struct audio_work;
-       struct work_struct reset_work;
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
        bool has_uvd;
        /* tracking pinned memory */
        u64 vram_pin_size;
        u64 gart_pin_size;
+       struct mutex    mn_lock;
+       DECLARE_HASHTABLE(mn_hash, 7);
  };
  
  bool radeon_is_px(struct drm_device *dev);
@@@ -2431,7 -2454,17 +2455,17 @@@ void cik_mm_wdoorbell(struct radeon_dev
  /*
   * Cast helper
   */
- #define to_radeon_fence(p) ((struct radeon_fence *)(p))
+ extern const struct fence_ops radeon_fence_ops;
+ static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+ {
+       struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
+       if (__f->base.ops == &radeon_fence_ops)
+               return __f;
+       return NULL;
+ }
  
  /*
   * Registers read & write functions.
@@@ -2751,18 -2784,25 +2785,25 @@@ void radeon_atombios_fini(struct radeon
  /*
   * RING helpers.
   */
- #if DRM_DEBUG_CODE == 0
+ /**
+  * radeon_ring_write - write a value to the ring
+  *
+  * @ring: radeon_ring structure holding ring information
+  * @v: dword (dw) value to write
+  *
+  * Write a value to the requested ring buffer (all asics).
+  */
  static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
  {
+       if (ring->count_dw <= 0)
+               DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
        ring->ring[ring->wptr++] = v;
        ring->wptr &= ring->ptr_mask;
        ring->count_dw--;
        ring->ring_free_dw--;
  }
- #else
- /* With debugging this is just too big to inline */
- void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
- #endif
  
  /*
   * ASICs macro.
  #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
  #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
  #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
- #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
- #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
- #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+ #define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
+ #define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
+ #define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
  #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
  #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
  #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
@@@ -2877,6 -2917,10 +2918,10 @@@ extern void radeon_legacy_set_clock_gat
  extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
  extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
  extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+ extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+                                    uint32_t flags);
+ extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
+ extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
  extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
  extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@@ -2934,10 -2978,10 +2979,10 @@@ struct r600_audio_pin *r600_audio_get_p
  struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
  void r600_audio_enable(struct radeon_device *rdev,
                       struct r600_audio_pin *pin,
-                      bool enable);
+                      u8 enable_mask);
  void dce6_audio_enable(struct radeon_device *rdev,
                       struct r600_audio_pin *pin,
-                      bool enable);
+                      u8 enable_mask);
  
  /*
   * R600 vram scratch functions
index 2dd5847f9b98e8a428e63b9ff9563f52b94d2709,d91f965e82190375a9dd13c4a3bf22f9bcd28878..850de57069bec0effcadd4a55c6b1abead242314
@@@ -185,6 -185,7 +185,6 @@@ static struct radeon_asic_ring r100_gfx
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
 -      .hdp_flush = &r100_ring_hdp_flush,
  };
  
  static struct radeon_asic r100_asic = {
@@@ -331,6 -332,7 +331,6 @@@ static struct radeon_asic_ring r300_gfx
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
 -      .hdp_flush = &r100_ring_hdp_flush,
  };
  
  static struct radeon_asic r300_asic = {
@@@ -963,6 -965,19 +963,19 @@@ static struct radeon_asic r600_asic = 
        },
  };
  
+ static struct radeon_asic_ring rv6xx_uvd_ring = {
+       .ib_execute = &uvd_v1_0_ib_execute,
+       .emit_fence = &uvd_v1_0_fence_emit,
+       .emit_semaphore = &uvd_v1_0_semaphore_emit,
+       .cs_parse = &radeon_uvd_cs_parse,
+       .ring_test = &uvd_v1_0_ring_test,
+       .ib_test = &uvd_v1_0_ib_test,
+       .is_lockup = &radeon_ring_test_lockup,
+       .get_rptr = &uvd_v1_0_get_rptr,
+       .get_wptr = &uvd_v1_0_get_wptr,
+       .set_wptr = &uvd_v1_0_set_wptr,
+ };
  static struct radeon_asic rv6xx_asic = {
        .init = &r600_init,
        .fini = &r600_fini,
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
                [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+               [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
        },
        .irq = {
                .set = &r600_irq_set,
@@@ -1072,6 -1088,7 +1086,7 @@@ static struct radeon_asic rs780_asic = 
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
                [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+               [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
        },
        .irq = {
                .set = &r600_irq_set,
@@@ -2296,7 -2313,15 +2311,15 @@@ int radeon_asic_init(struct radeon_devi
        case CHIP_RS780:
        case CHIP_RS880:
                rdev->asic = &rs780_asic;
-               rdev->has_uvd = true;
+               /* 760G/780V/880V don't have UVD */
+               if ((rdev->pdev->device == 0x9616)||
+                   (rdev->pdev->device == 0x9611)||
+                   (rdev->pdev->device == 0x9613)||
+                   (rdev->pdev->device == 0x9711)||
+                   (rdev->pdev->device == 0x9713))
+                       rdev->has_uvd = false;
+               else
+                       rdev->has_uvd = true;
                break;
        case CHIP_RV770:
        case CHIP_RV730:
index 7756bc1e1cd3ef088ac4bb96fa04a9ab632ad088,c41363f4fc1a8739b1b8a1fd534d6b8530ffe115..d8ace5b28a5b2e1455b5776129022300b3d1be3e
@@@ -81,11 -81,11 +81,11 @@@ bool r100_semaphore_ring_emit(struct ra
  int r100_cs_parse(struct radeon_cs_parser *p);
  void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
  uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset,
-                  uint64_t dst_offset,
-                  unsigned num_gpu_pages,
-                  struct radeon_fence **fence);
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+                                   uint64_t src_offset,
+                                   uint64_t dst_offset,
+                                   unsigned num_gpu_pages,
+                                   struct reservation_object *resv);
  int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
@@@ -148,15 -148,16 +148,15 @@@ u32 r100_gfx_get_wptr(struct radeon_dev
                      struct radeon_ring *ring);
  void r100_gfx_set_wptr(struct radeon_device *rdev,
                       struct radeon_ring *ring);
 -void r100_ring_hdp_flush(struct radeon_device *rdev,
 -                       struct radeon_ring *ring);
 +
  /*
   * r200,rv250,rs300,rv280
   */
extern int r200_copy_dma(struct radeon_device *rdev,
-                        uint64_t src_offset,
-                        uint64_t dst_offset,
-                        unsigned num_gpu_pages,
-                        struct radeon_fence **fence);
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
+                                  uint64_t src_offset,
+                                  uint64_t dst_offset,
+                                  unsigned num_gpu_pages,
+                                  struct reservation_object *resv);
  void r200_set_safe_registers(struct radeon_device *rdev);
  
  /*
@@@ -340,12 -341,14 +340,14 @@@ int r600_dma_ib_test(struct radeon_devi
  void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
  int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
  int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
- int r600_copy_cpdma(struct radeon_device *rdev,
-                   uint64_t src_offset, uint64_t dst_offset,
-                   unsigned num_gpu_pages, struct radeon_fence **fence);
- int r600_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset, uint64_t dst_offset,
-                 unsigned num_gpu_pages, struct radeon_fence **fence);
+ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+                                    uint64_t src_offset, uint64_t dst_offset,
+                                    unsigned num_gpu_pages,
+                                    struct reservation_object *resv);
+ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+                                  uint64_t src_offset, uint64_t dst_offset,
+                                  unsigned num_gpu_pages,
+                                  struct reservation_object *resv);
  void r600_hpd_init(struct radeon_device *rdev);
  void r600_hpd_fini(struct radeon_device *rdev);
  bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@@ -389,7 -392,6 +391,6 @@@ void r600_disable_interrupts(struct rad
  void r600_rlc_stop(struct radeon_device *rdev);
  /* r600 audio */
  int r600_audio_init(struct radeon_device *rdev);
- struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
  void r600_audio_fini(struct radeon_device *rdev);
  void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
  void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
@@@ -461,10 -463,10 +462,10 @@@ bool rv770_page_flip_pending(struct rad
  void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  void r700_cp_stop(struct radeon_device *rdev);
  void r700_cp_fini(struct radeon_device *rdev);
int rv770_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset, uint64_t dst_offset,
-                 unsigned num_gpu_pages,
-                  struct radeon_fence **fence);
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
+                                   uint64_t src_offset, uint64_t dst_offset,
+                                   unsigned num_gpu_pages,
+                                   struct reservation_object *resv);
  u32 rv770_get_xclk(struct radeon_device *rdev);
  int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
  int rv770_get_temp(struct radeon_device *rdev);
@@@ -535,10 -537,10 +536,10 @@@ void evergreen_dma_fence_ring_emit(stru
                                   struct radeon_fence *fence);
  void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
                                   struct radeon_ib *ib);
int evergreen_copy_dma(struct radeon_device *rdev,
-                      uint64_t src_offset, uint64_t dst_offset,
-                      unsigned num_gpu_pages,
-                      struct radeon_fence **fence);
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
+                                       uint64_t src_offset, uint64_t dst_offset,
+                                       unsigned num_gpu_pages,
+                                       struct reservation_object *resv);
  void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
  void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
  int evergreen_get_temp(struct radeon_device *rdev);
@@@ -700,10 -702,10 +701,10 @@@ int si_vm_init(struct radeon_device *rd
  void si_vm_fini(struct radeon_device *rdev);
  void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
  int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev,
-               uint64_t src_offset, uint64_t dst_offset,
-               unsigned num_gpu_pages,
-               struct radeon_fence **fence);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
+                                uint64_t src_offset, uint64_t dst_offset,
+                                unsigned num_gpu_pages,
+                                struct reservation_object *resv);
  
  void si_dma_vm_copy_pages(struct radeon_device *rdev,
                          struct radeon_ib *ib,
@@@ -759,14 -761,14 +760,14 @@@ bool cik_sdma_semaphore_ring_emit(struc
                                  struct radeon_semaphore *semaphore,
                                  bool emit_wait);
  void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cik_copy_dma(struct radeon_device *rdev,
-                uint64_t src_offset, uint64_t dst_offset,
-                unsigned num_gpu_pages,
-                struct radeon_fence **fence);
int cik_copy_cpdma(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_gpu_pages,
-                  struct radeon_fence **fence);
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+                                 uint64_t src_offset, uint64_t dst_offset,
+                                 unsigned num_gpu_pages,
+                                 struct reservation_object *resv);
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+                                   uint64_t src_offset, uint64_t dst_offset,
+                                   unsigned num_gpu_pages,
+                                   struct reservation_object *resv);
  int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
  int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
  bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@@ -882,6 -884,7 +883,7 @@@ uint32_t uvd_v1_0_get_wptr(struct radeo
                             struct radeon_ring *ring);
  void uvd_v1_0_set_wptr(struct radeon_device *rdev,
                         struct radeon_ring *ring);
+ int uvd_v1_0_resume(struct radeon_device *rdev);
  
  int uvd_v1_0_init(struct radeon_device *rdev);
  void uvd_v1_0_fini(struct radeon_device *rdev);
@@@ -889,6 -892,8 +891,8 @@@ int uvd_v1_0_start(struct radeon_devic
  void uvd_v1_0_stop(struct radeon_device *rdev);
  
  int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+ void uvd_v1_0_fence_emit(struct radeon_device *rdev,
+                        struct radeon_fence *fence);
  int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
  bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
                             struct radeon_ring *ring,
index 12c8329644c4af4e8fe5224fbbad5d888d580f28,6fbab15821128b44bc88d36a14f2e3894df857d7..f41cc1538e4851fcafcec22e29ffcfb07d7b0c98
@@@ -123,10 -123,6 +123,10 @@@ static struct radeon_px_quirk radeon_px
         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
         */
        { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
 +      /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
 +       * https://bugzilla.kernel.org/show_bug.cgi?id=51381
 +       */
 +      { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
        /* macbook pro 8.2 */
        { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
        { 0, 0, 0, 0, 0 },
@@@ -434,7 -430,7 +434,7 @@@ int radeon_wb_init(struct radeon_devic
  
        if (rdev->wb.wb_obj == NULL) {
                r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
-                                    RADEON_GEM_DOMAIN_GTT, 0, NULL,
+                                    RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
                                     &rdev->wb.wb_obj);
                if (r) {
                        dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@@ -1257,6 -1253,7 +1257,7 @@@ int radeon_device_init(struct radeon_de
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
                rdev->ring[i].idx = i;
        }
+       rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
  
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
                radeon_family_name[rdev->family], pdev->vendor, pdev->device,
        init_rwsem(&rdev->pm.mclk_lock);
        init_rwsem(&rdev->exclusive_lock);
        init_waitqueue_head(&rdev->irq.vblank_queue);
+       mutex_init(&rdev->mn_lock);
+       hash_init(rdev->mn_hash);
        r = radeon_gem_init(rdev);
        if (r)
                return r;
  
        r = radeon_init(rdev);
        if (r)
 -              return r;
 +              goto failed;
  
-       r = radeon_ib_ring_tests(rdev);
-       if (r)
-               DRM_ERROR("ib ring test failed (%d).\n", r);
        r = radeon_gem_debugfs_init(rdev);
        if (r) {
                DRM_ERROR("registering gem debugfs failed (%d).\n", r);
                radeon_agp_disable(rdev);
                r = radeon_init(rdev);
                if (r)
 -                      return r;
 +                      goto failed;
        }
  
+       r = radeon_ib_ring_tests(rdev);
+       if (r)
+               DRM_ERROR("ib ring test failed (%d).\n", r);
        if ((radeon_testing & 1)) {
                if (rdev->accel_working)
                        radeon_test_moves(rdev);
                        DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
        }
        return 0;
 +
 +failed:
 +      if (runtime)
 +              vga_switcheroo_fini_domain_pm_ops(rdev->dev);
 +      return r;
  }
  
  static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@@ -1464,8 -1458,6 +1467,8 @@@ void radeon_device_fini(struct radeon_d
        radeon_bo_evict_vram(rdev);
        radeon_fini(rdev);
        vga_switcheroo_unregister_client(rdev->pdev);
 +      if (rdev->flags & RADEON_IS_PX)
 +              vga_switcheroo_fini_domain_pm_ops(rdev->dev);
        vga_client_register(rdev->pdev, NULL, NULL, NULL);
        if (rdev->rio_mem)
                pci_iounmap(rdev->pdev, rdev->rio_mem);
@@@ -1497,7 -1489,6 +1500,6 @@@ int radeon_suspend_kms(struct drm_devic
        struct drm_crtc *crtc;
        struct drm_connector *connector;
        int i, r;
-       bool force_completion = false;
  
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
                r = radeon_fence_wait_empty(rdev, i);
                if (r) {
                        /* delay GPU reset to resume */
-                       force_completion = true;
+                       radeon_fence_driver_force_completion(rdev, i);
                }
        }
-       if (force_completion) {
-               radeon_fence_driver_force_completion(rdev);
-       }
  
        radeon_save_bios_scratch_regs(rdev);
  
@@@ -1686,8 -1674,6 +1685,6 @@@ int radeon_gpu_reset(struct radeon_devi
                return 0;
        }
  
-       rdev->needs_reset = false;
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
                }
        }
  
- retry:
        r = radeon_asic_reset(rdev);
        if (!r) {
                dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
  
        radeon_restore_bios_scratch_regs(rdev);
  
-       if (!r) {
-               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (!r && ring_data[i]) {
                        radeon_ring_restore(rdev, &rdev->ring[i],
                                            ring_sizes[i], ring_data[i]);
-                       ring_sizes[i] = 0;
-                       ring_data[i] = NULL;
-               }
-               r = radeon_ib_ring_tests(rdev);
-               if (r) {
-                       dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
-                       if (saved) {
-                               saved = false;
-                               radeon_suspend(rdev);
-                               goto retry;
-                       }
-               }
-       } else {
-               radeon_fence_driver_force_completion(rdev);
-               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               } else {
+                       radeon_fence_driver_force_completion(rdev, i);
                        kfree(ring_data[i]);
                }
        }
        /* reset hpd state */
        radeon_hpd_init(rdev);
  
+       ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+       rdev->in_reset = true;
+       rdev->needs_reset = false;
+       downgrade_write(&rdev->exclusive_lock);
        drm_helper_resume_force_mode(rdev->ddev);
  
        /* set the power state here in case we are a PX system or headless */
        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
                radeon_pm_compute_clocks(rdev);
  
-       ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-       if (r) {
+       if (!r) {
+               r = radeon_ib_ring_tests(rdev);
+               if (r && saved)
+                       r = -EAGAIN;
+       } else {
                /* bad news, how to tell it to userspace ? */
                dev_info(rdev->dev, "GPU reset failed\n");
        }
  
-       up_write(&rdev->exclusive_lock);
+       rdev->needs_reset = r == -EAGAIN;
+       rdev->in_reset = false;
+       up_read(&rdev->exclusive_lock);
        return r;
  }
  
index f9d17b29b343443de2f986b6008a9991085ff518,69c6a835bcd5598e46dea006b83891336eddb67a..dcffa30ee2db3d2b990abc4089967c204984be5d
@@@ -38,6 -38,8 +38,8 @@@
  #include <linux/module.h>
  #include <linux/pm_runtime.h>
  #include <linux/vga_switcheroo.h>
+ #include <drm/drm_gem.h>
  #include "drm_crtc_helper.h"
  /*
   * KMS wrapper.
@@@ -83,7 -85,7 +85,7 @@@
   *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
   *   2.39.0 - Add INFO query for number of active CUs
   *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
 - *            CS to GPU
 + *            CS to GPU on >= r600
   */
  #define KMS_DRIVER_MAJOR      2
  #define KMS_DRIVER_MINOR      40
@@@ -114,6 -116,9 +116,9 @@@ int radeon_gem_object_open(struct drm_g
                                struct drm_file *file_priv);
  void radeon_gem_object_close(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
+ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+                                       struct drm_gem_object *gobj,
+                                       int flags);
  extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      unsigned int flags,
                                      int *vpos, int *hpos, ktime_t *stime,
@@@ -130,7 -135,7 +135,7 @@@ int radeon_mode_dumb_create(struct drm_
                            struct drm_mode_create_dumb *args);
  struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
  struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
-                                                       size_t size,
+                                                       struct dma_buf_attachment *,
                                                        struct sg_table *sg);
  int radeon_gem_prime_pin(struct drm_gem_object *obj);
  void radeon_gem_prime_unpin(struct drm_gem_object *obj);
@@@ -181,7 -186,6 +186,7 @@@ int radeon_vm_block_size = -1
  int radeon_deep_color = 0;
  int radeon_use_pflipirq = 2;
  int radeon_bapm = -1;
 +int radeon_backlight = -1;
  
  MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
  module_param_named(no_wb, radeon_no_wb, int, 0444);
@@@ -264,9 -268,6 +269,9 @@@ module_param_named(use_pflipirq, radeon
  MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(bapm, radeon_bapm, int, 0444);
  
 +MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
 +module_param_named(backlight, radeon_backlight, int, 0444);
 +
  static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
  };
@@@ -309,7 -310,7 +314,7 @@@ static const struct file_operations rad
        .open = drm_open,
        .release = drm_release,
        .unlocked_ioctl = drm_ioctl,
-       .mmap = drm_mmap,
+       .mmap = drm_legacy_mmap,
        .poll = drm_poll,
        .read = drm_read,
  #ifdef CONFIG_COMPAT
@@@ -329,6 -330,7 +334,7 @@@ static struct drm_driver driver_old = 
        .preclose = radeon_driver_preclose,
        .postclose = radeon_driver_postclose,
        .lastclose = radeon_driver_lastclose,
+       .set_busid = drm_pci_set_busid,
        .unload = radeon_driver_unload,
        .suspend = radeon_suspend,
        .resume = radeon_resume,
@@@ -444,7 -446,6 +450,7 @@@ static int radeon_pmops_runtime_suspend
        ret = radeon_suspend_kms(drm_dev, false, false);
        pci_save_state(pdev);
        pci_disable_device(pdev);
 +      pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3cold);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
  
@@@ -553,6 -554,7 +559,7 @@@ static struct drm_driver kms_driver = 
        .preclose = radeon_driver_preclose_kms,
        .postclose = radeon_driver_postclose_kms,
        .lastclose = radeon_driver_lastclose_kms,
+       .set_busid = drm_pci_set_busid,
        .unload = radeon_driver_unload_kms,
        .get_vblank_counter = radeon_get_vblank_counter_kms,
        .enable_vblank = radeon_enable_vblank_kms,
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_export = radeon_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
        .gem_prime_pin = radeon_gem_prime_pin,
        .gem_prime_unpin = radeon_gem_prime_unpin,
index 15edf23b465c2ba90cca2f2ffb7c242971bda2bf,109843dab5e596710ebeb3cf931bbc8523ab7c96..9a19e52cc655bf8b3c697a7116e52e81b8060096
@@@ -158,43 -158,10 +158,43 @@@ radeon_get_encoder_enum(struct drm_devi
        return ret;
  }
  
 +static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
 +                                       struct drm_connector *connector)
 +{
 +      struct drm_device *dev = radeon_encoder->base.dev;
 +      struct radeon_device *rdev = dev->dev_private;
 +      bool use_bl = false;
 +
 +      if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
 +              return;
 +
 +      if (radeon_backlight == 0) {
 +              return;
 +      } else if (radeon_backlight == 1) {
 +              use_bl = true;
 +      } else if (radeon_backlight == -1) {
 +              /* Quirks */
 +              /* Amilo Xi 2550 only works with acpi bl */
 +              if ((rdev->pdev->device == 0x9583) &&
 +                  (rdev->pdev->subsystem_vendor == 0x1734) &&
 +                  (rdev->pdev->subsystem_device == 0x1107))
 +                      use_bl = false;
 +              else
 +                      use_bl = true;
 +      }
 +
 +      if (use_bl) {
 +              if (rdev->is_atom_bios)
 +                      radeon_atom_backlight_init(radeon_encoder, connector);
 +              else
 +                      radeon_legacy_backlight_init(radeon_encoder, connector);
 +              rdev->mode_info.bl_encoder = radeon_encoder;
 +      }
 +}
 +
  void
  radeon_link_encoder_connector(struct drm_device *dev)
  {
 -      struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct drm_encoder *encoder;
                        radeon_encoder = to_radeon_encoder(encoder);
                        if (radeon_encoder->devices & radeon_connector->devices) {
                                drm_mode_connector_attach_encoder(connector, encoder);
 -                              if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 -                                      if (rdev->is_atom_bios)
 -                                              radeon_atom_backlight_init(radeon_encoder, connector);
 -                                      else
 -                                              radeon_legacy_backlight_init(radeon_encoder, connector);
 -                                      rdev->mode_info.bl_encoder = radeon_encoder;
 -                              }
 +                              if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
 +                                      radeon_encoder_add_backlight(radeon_encoder, connector);
                        }
                }
        }
@@@ -410,3 -382,24 +410,24 @@@ bool radeon_dig_monitor_is_duallink(str
        }
  }
  
+ bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+ {
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               return true;
+       default:
+               return false;
+       }
+ }
index 3a0b973e8a96ef9f9b9aefb6e39a0459fd03ceff,423a8cd052aafb4ab8b23dce1b949b3f75f5c637..eeea5b6a1775ee002f36682b7d092ab3b449d913
@@@ -4684,7 -4684,7 +4684,7 @@@ static int si_vm_packet3_compute_check(
  int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  {
        int ret = 0;
-       u32 idx = 0;
+       u32 idx = 0, i;
        struct radeon_cs_packet pkt;
  
        do {
                switch (pkt.type) {
                case RADEON_PACKET_TYPE0:
                        dev_err(rdev->dev, "Packet0 not allowed!\n");
+                       for (i = 0; i < ib->length_dw; i++) {
+                               if (i == idx)
+                                       printk("\t0x%08x <---\n", ib->ptr[i]);
+                               else
+                                       printk("\t0x%08x\n", ib->ptr[i]);
+                       }
                        ret = -EINVAL;
                        break;
                case RADEON_PACKET_TYPE2:
@@@ -6316,17 -6322,17 +6322,17 @@@ static inline u32 si_get_ih_wptr(struc
                wptr = RREG32(IH_RB_WPTR);
  
        if (wptr & RB_OVERFLOW) {
 +              wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
 -              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
 -                      wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
 +              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
 +                       wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
 -              wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
  }
@@@ -6664,13 -6670,13 +6670,13 @@@ restart_ih
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
 +              WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
 -      WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
  
        /* make sure wptr hasn't changed while processing */
index b188478277598e8c9079e3731f3168c14f04de97,16392b674d7950a1b631888ddf25c71a35670a90..9cb222e2996f5e5391f7832ce9649bff580fd0ce
@@@ -87,8 -87,6 +87,8 @@@ static int imx_drm_driver_unload(struc
        drm_vblank_cleanup(drm);
        drm_mode_config_cleanup(drm);
  
 +      platform_set_drvdata(drm->platformdev, NULL);
 +
        return 0;
  }
  
@@@ -429,7 -427,6 +429,7 @@@ static uint32_t imx_drm_find_crtc_mask(
  
        for (i = 0; i < MAX_CRTC; i++) {
                struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[i];
 +
                if (imx_drm_crtc && imx_drm_crtc->port == port)
                        return drm_crtc_mask(imx_drm_crtc->crtc);
        }
@@@ -441,7 -438,6 +441,7 @@@ static struct device_node *imx_drm_of_g
                const struct device_node *parent, struct device_node *prev)
  {
        struct device_node *node = of_graph_get_next_endpoint(parent, prev);
 +
        of_node_put(prev);
        return node;
  }
@@@ -475,7 -471,8 +475,7 @@@ int imx_drm_encoder_parse_of(struct drm
                crtc_mask |= mask;
        }
  
 -      if (ep)
 -              of_node_put(ep);
 +      of_node_put(ep);
        if (i == 0)
                return -ENOENT;
  
@@@ -531,6 -528,7 +531,7 @@@ static struct drm_driver imx_drm_drive
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
        .preclose               = imx_drm_driver_preclose,
+       .set_busid              = drm_platform_set_busid,
        .gem_free_object        = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
@@@ -650,36 -648,6 +651,36 @@@ static int imx_drm_platform_remove(stru
        return 0;
  }
  
 +#ifdef CONFIG_PM_SLEEP
 +static int imx_drm_suspend(struct device *dev)
 +{
 +      struct drm_device *drm_dev = dev_get_drvdata(dev);
 +
 +      /* The drm_dev is NULL before .load hook is called */
 +      if (drm_dev == NULL)
 +              return 0;
 +
 +      drm_kms_helper_poll_disable(drm_dev);
 +
 +      return 0;
 +}
 +
 +static int imx_drm_resume(struct device *dev)
 +{
 +      struct drm_device *drm_dev = dev_get_drvdata(dev);
 +
 +      if (drm_dev == NULL)
 +              return 0;
 +
 +      drm_helper_resume_force_mode(drm_dev);
 +      drm_kms_helper_poll_enable(drm_dev);
 +
 +      return 0;
 +}
 +#endif
 +
 +static SIMPLE_DEV_PM_OPS(imx_drm_pm_ops, imx_drm_suspend, imx_drm_resume);
 +
  static const struct of_device_id imx_drm_dt_ids[] = {
        { .compatible = "fsl,imx-display-subsystem", },
        { /* sentinel */ },
@@@ -692,7 -660,6 +693,7 @@@ static struct platform_driver imx_drm_p
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "imx-drm",
 +              .pm     = &imx_drm_pm_ops,
                .of_match_table = imx_drm_dt_ids,
        },
  };