Some nouveau regression fixes.
* 'linux-4.12' of git://github.com/skeggsb/linux:
drm/nouveau/fb/gf100-: Fix 32 bit wraparound in new ram detection
drm/nouveau/secboot/gm20b: fix the error return code in gm20b_secboot_tegra_read_wpr()
drm/nouveau/kms: Increase max retries in scanout position queries.
drm/nouveau/bios/bitP: check that table is long enough for optional pointers
drm/nouveau/fifo/nv40: no ctxsw for pre-nv44 mpeg engine
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Mark Brown <broonie@sirena.org.uk>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
kernel and module base offset ASLR (Address Space
Layout Randomization).
+ kasan_multi_shot
+ [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
+ report on every invalid memory access. Without this
+ parameter KASAN will print report only for the first
+ invalid access.
+
keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC]
--- /dev/null
+Amlogic specific extensions to the Synopsys Designware HDMI Controller
+======================================================================
+
+The Amlogic Meson Synopsys Designware Integration is composed of :
+- A Synopsys DesignWare HDMI Controller IP
+- A TOP control block controlling the Clocks and PHY
+- A custom HDMI PHY in order to convert video to TMDS signal
+ ___________________________________
+| HDMI TOP |<= HPD
+|___________________________________|
+| | |
+| Synopsys HDMI | HDMI PHY |=> TMDS
+| Controller |________________|
+|___________________________________|<=> DDC
+
+The HDMI TOP block only supports HPD sensing.
+The Synopsys HDMI Controller interrupt is routed through the
+TOP Block interrupt.
+Communication to the TOP Block and the Synopsys HDMI Controller is done
+via a pair of dedicated addr+read/write registers.
+The HDMI PHY is configured by registers in the HHI register block.
+
+Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux
+selects either the ENCI encoder for the 576i or 480i formats or the ENCP
+encoder for all the other formats including interlaced HD formats.
+
+The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
+DVI timings for the HDMI controller.
+
+Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
+HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
+audio source interfaces.
+
+Required properties:
+- compatible: value should be different for each SoC family as :
+ - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi"
+ - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi"
+ - GXM (S912) : "amlogic,meson-gxm-dw-hdmi"
+ followed by the common "amlogic,meson-gx-dw-hdmi"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The HDMI interrupt number
+- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
+ and the Amlogic Meson venci clocks as described in
+ Documentation/devicetree/bindings/clock/clock-bindings.txt,
+ the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci"
+- resets, resets-names: must have the phandles to the HDMI apb, glue and phy
+ resets as described in :
+ Documentation/devicetree/bindings/reset/reset.txt,
+ the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy"
+
+Required nodes:
+
+The connections to the HDMI ports are modeled using the OF graph
+bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+The following table lists for each supported model the port number
+corresponding to each HDMI output and input.
+
+ Port 0 Port 1
+-----------------------------------------
+ S905 (GXBB) VENC Input TMDS Output
+ S905X (GXL) VENC Input TMDS Output
+ S905D (GXL) VENC Input TMDS Output
+ S912 (GXM) VENC Input TMDS Output
+
+Example:
+
+hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+};
+
+hdmi_tx: hdmi-tx@c883a000 {
+ compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+ reg = <0x0 0xc883a000 0x0 0x1c>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
+ resets = <&reset RESET_HDMITX_CAPB3>,
+ <&reset RESET_HDMI_SYSTEM_RESET>,
+ <&reset RESET_HDMI_TX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ clock-names = "isfr", "iahb", "venci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* VPU VENC Input */
+ hdmi_tx_venc_port: port@0 {
+ reg = <0>;
+
+ hdmi_tx_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+
+ /* TMDS Output */
+ hdmi_tx_tmds_port: port@1 {
+ reg = <1>;
+
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+};
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
"mediatek,<chip>-disp-ovl"
"mediatek,<chip>-disp-rdma"
"mediatek,<chip>-disp-wdma"
+ the supported chips are mt2701 and mt8173.
- larb: Should contain a phandle pointing to the local arbiter device as defined
in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
- iommus: Should point to the respective IOMMU block with master port as
Required properties:
- compatible: "mediatek,<chip>-dsi"
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
Required properties:
- compatible: "mediatek,<chip>-mipi-tx"
+ the supported chips are mt2701 and mt8173.
- reg: Physical base address and length of the controller's registers
- clocks: PLL reference clock
- clock-output-names: name of the output clock line to the DSI encoder
--- /dev/null
+Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "ampire,am-480272h3tmqw-t01h"
+
+Optional properties:
+- power-supply: regulator to provide the supply voltage
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for RGB input.
+
+Example:
+ panel_rgb: panel-rgb {
+ compatible = "ampire,am-480272h3tmqw-t01h";
+ enable-gpios = <&gpioa 8 1>;
+ port {
+ panel_in_rgb: endpoint {
+ remote-endpoint = <&controller_out_rgb>;
+ };
+ };
+ };
--- /dev/null
+Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
+
+Required properties:
+ - compatible: "samsung,s6e3ha2"
+ - reg: the virtual channel number of a DSI peripheral
+ - vdd3-supply: I/O voltage supply
+ - vci-supply: voltage supply for analog circuits
+ - reset-gpios: a GPIO spec for the reset pin (active low)
+ - enable-gpios: a GPIO spec for the panel enable pin (active high)
+
+Optional properties:
+ - te-gpios: a GPIO spec for the tearing effect synchronization signal
+ gpio pin (active high)
+
+Example:
+&dsi {
+ ...
+
+ panel@0 {
+ compatible = "samsung,s6e3ha2";
+ reg = <0>;
+ vdd3-supply = <&ldo27_reg>;
+ vci-supply = <&ldo28_reg>;
+ reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
+ te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
+ };
+};
--- /dev/null
+Sitronix ST7789V RGB panel with SPI control bus
+
+Required properties:
+ - compatible: "sitronix,st7789v"
+ - reg: Chip select of the panel on the SPI bus
+ - reset-gpios: a GPIO phandle for the reset pin
+ - power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+ - backlight: phandle to the backlight used
+
+The generic bindings for the SPI slaves documented in [1] also applies
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in [2]. This
+node should describe panel's video bus.
+
+[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
+[2]: Documentation/devicetree/bindings/graph.txt
+
+Example:
+
+panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
+ backlight = <&pwm_bl>;
+ spi-max-frequency = <100000>;
+ spi-cpol;
+ spi-cpha;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&tcon0_out_panel>;
+ };
+ };
+};
--- /dev/null
+Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
+
+Required properties:
+- compatible: should be "winstar,wf35ltiacd"
+- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Example:
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 31 63 95 127 159 191 223 255>;
+ default-brightness-level = <191>;
+ power-supply = <&bl_reg>;
+ };
+
+ bl_reg: backlight_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "backlight-power-supply";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ panel: panel {
+ compatible = "winstar,wf35ltiacd", "simple-panel";
+ backlight = <&backlight>;
+ power-supply = <&panel_reg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel_input: endpoint {
+ remote-endpoint = <&hlcdc_panel_output>;
+ };
+ };
+ };
+
+ panel_reg: panel_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "panel-power-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
Optional properties:
- power-domains: a phandle to mipi dsi power domain node.
+- resets: list of phandle + reset specifier pairs, as described in [3].
+- reset-names: string reset name, must be "apb".
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/media/video-interfaces.txt
+[3] Documentation/devicetree/bindings/reset/reset.txt
Example:
mipi_dsi: mipi@ff960000 {
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
clock-names = "ref", "pclk";
+ resets = <&cru SRST_MIPIDSI0>;
+ reset-names = "apb";
rockchip,grf = <&grf>;
status = "okay";
- reg : Offset and length of the register set for the module
- interrupts : the interrupt number for the RNG module.
Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
-- clocks: the trng clock source
+- clocks: the trng clock source. Only mandatory for the
+ "inside-secure,safexcel-eip76" compatible.
Example:
/* AM335x */
wetek WeTek Electronics, limited.
wexler Wexler
winbond Winbond Electronics corp.
+winstar Winstar Display Corp.
wlf Wolfson Microelectronics
wm Wondermedia Technologies, Inc.
x-powers X-Powers
int (*permission) (struct inode *, int, unsigned int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct dentry *, struct kstat *,
- u32, unsigned int);
+ int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
void (*update_time)(struct inode *, struct timespec *, int);
[recommended]
->readlink is optional for symlinks. Don't set, unless filesystem needs
to fake something for readlink(2).
+--
+[mandatory]
+ ->getattr() is now passed a struct path rather than a vfsmount and
+ dentry separately, and it now has request_mask and query_flags arguments
+ to specify the fields and sync type requested by statx. Filesystems not
+ supporting any statx-specific features may ignore the new arguments.
int (*permission) (struct inode *, int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct dentry *, struct kstat *,
- u32, unsigned int);
+ int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
--- /dev/null
+=======================================================
+ drm/bridge/dw-hdmi Synopsys DesignWare HDMI Controller
+=======================================================
+
+Synopsys DesignWare HDMI Controller
+===================================
+
+This section covers everything related to the Synopsys DesignWare HDMI
+Controller implemented as a DRM bridge.
+
+Supported Input Formats and Encodings
+-------------------------------------
+
+.. kernel-doc:: include/drm/bridge/dw_hdmi.h
+ :doc: Supported input formats and encodings
.. kernel-doc:: drivers/gpu/drm/drm_file.c
:export:
-IOCTLs
-------
-
-struct drm_ioctl_desc \*ioctls; int num_ioctls;
- Driver-specific ioctls descriptors table.
-
-Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
-descriptors table is indexed by the ioctl number offset from the base
-value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize
-the table entries.
-
-::
-
- DRM_IOCTL_DEF_DRV(ioctl, func, flags)
-
-``ioctl`` is the ioctl name. Drivers must define the DRM_##ioctl and
-DRM_IOCTL_##ioctl macros to the ioctl number offset from
-DRM_COMMAND_BASE and the ioctl number respectively. The first macro is
-private to the device while the second must be exposed to userspace in a
-public header.
-
-``func`` is a pointer to the ioctl handler function compatible with the
-``drm_ioctl_t`` type.
-
-::
-
- typedef int drm_ioctl_t(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-``flags`` is a bitmask combination of the following values. It restricts
-how the ioctl is allowed to be called.
-
-- DRM_AUTH - Only authenticated callers allowed
-
-- DRM_MASTER - The ioctl can only be called on the master file handle
-
-- DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
-
-- DRM_CONTROL_ALLOW - The ioctl can only be called on a control
- device
-
-- DRM_UNLOCKED - The ioctl handler will be called without locking the
- DRM global mutex. This is the enforced default for kms drivers (i.e.
- using the DRIVER_MODESET flag) and hence shouldn't be used any more
- for new drivers.
-
-.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
- :export:
-
-
Misc Utilities
==============
visible to user-space and accessible beyond open-file boundaries, they
cannot support render nodes.
+IOCTL Support on Device Nodes
+=============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+ :doc: driver specific ioctls
+
+.. kernel-doc:: include/drm/drm_ioctl.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioc32.c
+ :export:
Testing and validation
======================
.. kernel-doc:: drivers/gpu/drm/drm_debugfs.c
:export:
+Sysfs Support
+=============
+
+.. kernel-doc:: drivers/gpu/drm/drm_sysfs.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_sysfs.c
+ :export:
+
+
VBlank event handling
=====================
drm-kms-helpers
drm-uapi
i915
+ meson
tinydrm
vc4
vga-switcheroo
vgaarbiter
+ bridge/dw-hdmi
todo
.. only:: subproject and html
--- /dev/null
+=============================================
+drm/meson AmLogic Meson Video Processing Unit
+=============================================
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_drv.c
+ :doc: Video Processing Unit
+
+Video Processing Unit
+=====================
+
+The Amlogic Meson Display controller is composed of several components
+that are going to be documented below:
+
+.. code::
+
+ DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
+ | vd1 _______ _____________ _________________ | |
+ D |-------| |----| | | | | HDMI PLL |
+ D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
+ R |-------| |----| Processing | | | | |
+ | osd2 | | | |---| Enci ----------|----|-----VDAC------|
+ R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
+ A | osd1 | | | Blenders | | Encl ----------|----|---------------|
+ M |-------|______|----|____________| |________________| | |
+ ___|__________________________________________________________|_______________|
+
+Video Input Unit
+================
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_viu.c
+ :doc: Video Input Unit
+
+Video Post Processing
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_vpp.c
+ :doc: Video Post Processing
+
+Video Encoder
+=============
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_venc.c
+ :doc: Video Encoder
+
+Video Canvas Management
+=======================
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_canvas.c
+ :doc: Canvas
+
+Video Clocks
+============
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_vclk.c
+ :doc: Video Clocks
+
+HDMI Video Output
+=================
+
+.. kernel-doc:: drivers/gpu/drm/meson/meson_dw_hdmi.c
+ :doc: HDMI Output
With the recent ``drm_bus`` cleanup patches for 3.17 it is no longer required
to have a ``drm_bus`` structure set up. Drivers can directly set up the
``drm_device`` structure instead of relying on bus methods in ``drm_usb.c``
-and ``drm_platform.c``. The goal is to get rid of the driver's ``->load`` /
+and ``drm_pci.c``. The goal is to get rid of the driver's ``->load`` /
``->unload`` callbacks and open-code the load/unload sequence properly, using
the new two-stage ``drm_device`` setup/teardown.
following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
``udl``.
-Contact: Daniel Vetter
+Contact: Daniel Vetter, respective driver maintainers
Switch to drm_connector_list_iter for any connector_list walking
----------------------------------------------------------------
the kerneldoc better. This should also allow more fine-grained ``#include``
directives.
+In the end no .c file should need to include ``drmP.h`` anymore.
+
Contact: Daniel Vetter
Add missing kerneldoc for exported functions
prevent security issues in those legacy IOCTLs from being exploited on modern
drivers. This has multiple possible subtasks:
-* Make sure legacy IOCTLs can't be used on modern drivers.
* Extract support code for legacy features into a ``drm-legacy.ko`` kernel
module and compile it only when one of the legacy drivers is enabled.
-* Extract legacy functions into their own headers and remove it that from the
- monolithic ``drmP.h`` header.
-* Remove any lingering cruft from the OS abstraction layer from modern
- drivers.
This is mostly done, the only thing left is to split up ``drm_irq.c`` into
legacy cruft and the parts needed by modern KMS drivers.
Outside DRM
===========
-
-Better kerneldoc
-----------------
-
-This is pretty much done, but there's some advanced topics:
-
-Come up with a way to hyperlink to struct members. Currently you can hyperlink
-to the struct using ``#struct_name``, but not to a member within. Would need
-buy-in from kerneldoc maintainers, and the big question is how to make it work
-without totally unsightly
-``drm_foo_bar_really_long_structure->even_longer_memeber`` all over the text
-which breaks text flow.
-
-Figure out how to integrate the asciidoc support for ascii-diagrams. We have a
-few of those (e.g. to describe mode timings), and asciidoc supports converting
-some ascii-art dialect into pngs. Would be really pretty to make that work.
-
-Contact: Daniel Vetter, Jani Nikula
-
-Jani is working on this already, hopefully lands in 4.8.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RGB101010-1X30:
+
+ - MEDIA_BUS_FMT_RGB101010_1X30
+ - 0x1018
+ -
+ - 0
+ - 0
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
.. raw:: latex
\endgroup
-On LVDS buses, usually each sample is transferred serialized in seven
-time slots per pixel clock, on three (18-bit) or four (24-bit)
-differential data pairs at the same time. The remaining bits are used
-for control signals as defined by SPWG/PSWG/VESA or JEIDA standards. The
-24-bit RGB format serialized in seven time slots on four lanes using
-JEIDA defined bit mapping will be named
-``MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA``, for example.
-.. raw:: latex
+The following table list existing packed 36bit wide RGB formats.
- \begin{adjustbox}{width=\columnwidth}
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
-.. _v4l2-mbus-pixelcode-rgb-lvds:
+.. _v4l2-mbus-pixelcode-rgb-36:
-.. flat-table:: LVDS RGB formats
+.. raw:: latex
+
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
+
+.. flat-table:: 36bit RGB formats
:header-rows: 2
:stub-columns: 0
+ :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
* - Identifier
- Code
-
- -
- - :cspan:`3` Data organization
+ - :cspan:`35` Data organization
* -
-
- - Timeslot
- - Lane
+ - Bit
+ - 35
+ - 34
+ - 33
+ - 32
+ - 31
+ - 30
+ - 29
+ - 28
+ - 27
+ - 26
+ - 25
+ - 24
+ - 23
+ - 22
+ - 21
+ - 20
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
+ - 6
+ - 5
+ - 4
- 3
- 2
- 1
- 0
- * .. _MEDIA-BUS-FMT-RGB666-1X7X3-SPWG:
+ * .. _MEDIA-BUS-FMT-RGB121212-1X36:
- - MEDIA_BUS_FMT_RGB666_1X7X3_SPWG
- - 0x1010
- - 0
+ - MEDIA_BUS_FMT_RGB121212_1X36
+ - 0x1019
-
- -
- - d
- - b\ :sub:`1`
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
- g\ :sub:`0`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+.. raw:: latex
+
+ \endgroup
+
+
+The following table list existing packed 48bit wide RGB formats.
+
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+
+.. _v4l2-mbus-pixelcode-rgb-48:
+
+.. raw:: latex
+
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
+
+.. flat-table:: 48bit RGB formats
+ :header-rows: 3
+ :stub-columns: 0
+ :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
+
+ * - Identifier
+ - Code
+ -
+ - :cspan:`31` Data organization
* -
-
- - 1
+ - Bit
-
-
- - d
- - b\ :sub:`0`
- - r\ :sub:`5`
- * -
-
- - 2
-
-
- - d
- - g\ :sub:`5`
- - r\ :sub:`4`
- * -
-
- - 3
-
-
- - b\ :sub:`5`
- - g\ :sub:`4`
- - r\ :sub:`3`
- * -
-
- - 4
-
-
- - b\ :sub:`4`
- - g\ :sub:`3`
- - r\ :sub:`2`
- * -
-
- - 5
-
-
- - b\ :sub:`3`
- - g\ :sub:`2`
- - r\ :sub:`1`
+ -
+ -
+ - 47
+ - 46
+ - 45
+ - 44
+ - 43
+ - 42
+ - 41
+ - 40
+ - 39
+ - 38
+ - 37
+ - 36
+ - 35
+ - 34
+ - 33
+ - 32
* -
-
+ -
+ - 31
+ - 30
+ - 29
+ - 28
+ - 27
+ - 26
+ - 25
+ - 24
+ - 23
+ - 22
+ - 21
+ - 20
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
- 6
+ - 5
+ - 4
+ - 3
+ - 2
+ - 1
+ - 0
+ * .. _MEDIA-BUS-FMT-RGB161616-1X48:
+
+ - MEDIA_BUS_FMT_RGB161616_1X48
+ - 0x101a
-
-
- - b\ :sub:`2`
- - g\ :sub:`1`
- - r\ :sub:`0`
- * .. _MEDIA-BUS-FMT-RGB888-1X7X4-SPWG:
-
- - MEDIA_BUS_FMT_RGB888_1X7X4_SPWG
- - 0x1011
- - 0
-
- - d
- - d
- - b\ :sub:`1`
- - g\ :sub:`0`
- * -
-
- - 1
-
- - b\ :sub:`7`
- - d
- - b\ :sub:`0`
- - r\ :sub:`5`
- * -
-
- - 2
-
- - b\ :sub:`6`
- - d
- - g\ :sub:`5`
- - r\ :sub:`4`
- * -
-
- - 3
-
- - g\ :sub:`7`
- - b\ :sub:`5`
- - g\ :sub:`4`
- - r\ :sub:`3`
- * -
-
- - 4
-
- - g\ :sub:`6`
- - b\ :sub:`4`
- - g\ :sub:`3`
- - r\ :sub:`2`
- * -
-
- - 5
-
- - r\ :sub:`7`
- - b\ :sub:`3`
- - g\ :sub:`2`
- - r\ :sub:`1`
- * -
-
- - 6
-
- - r\ :sub:`6`
- - b\ :sub:`2`
- - g\ :sub:`1`
+ -
+ -
+ - r\ :sub:`15`
+ - r\ :sub:`14`
+ - r\ :sub:`13`
+ - r\ :sub:`12`
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * -
+ -
+ -
+ - g\ :sub:`15`
+ - g\ :sub:`14`
+ - g\ :sub:`13`
+ - g\ :sub:`12`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - b\ :sub:`15`
+ - b\ :sub:`14`
+ - b\ :sub:`13`
+ - b\ :sub:`12`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+.. raw:: latex
+
+ \endgroup
+
+On LVDS buses, usually each sample is transferred serialized in seven
+time slots per pixel clock, on three (18-bit) or four (24-bit)
+differential data pairs at the same time. The remaining bits are used
+for control signals as defined by SPWG/PSWG/VESA or JEIDA standards. The
+24-bit RGB format serialized in seven time slots on four lanes using
+JEIDA defined bit mapping will be named
+``MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA``, for example.
+
+.. raw:: latex
+
+ \begin{adjustbox}{width=\columnwidth}
+
+.. _v4l2-mbus-pixelcode-rgb-lvds:
+
+.. flat-table:: LVDS RGB formats
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Identifier
+ - Code
+ -
+ -
+ - :cspan:`3` Data organization
+ * -
+ -
+ - Timeslot
+ - Lane
+ - 3
+ - 2
+ - 1
+ - 0
+ * .. _MEDIA-BUS-FMT-RGB666-1X7X3-SPWG:
+
+ - MEDIA_BUS_FMT_RGB666_1X7X3_SPWG
+ - 0x1010
+ - 0
+ -
+ -
+ - d
+ - b\ :sub:`1`
+ - g\ :sub:`0`
+ * -
+ -
+ - 1
+ -
+ -
+ - d
+ - b\ :sub:`0`
+ - r\ :sub:`5`
+ * -
+ -
+ - 2
+ -
+ -
+ - d
+ - g\ :sub:`5`
+ - r\ :sub:`4`
+ * -
+ -
+ - 3
+ -
+ -
+ - b\ :sub:`5`
+ - g\ :sub:`4`
+ - r\ :sub:`3`
+ * -
+ -
+ - 4
+ -
+ -
+ - b\ :sub:`4`
+ - g\ :sub:`3`
+ - r\ :sub:`2`
+ * -
+ -
+ - 5
+ -
+ -
+ - b\ :sub:`3`
+ - g\ :sub:`2`
+ - r\ :sub:`1`
+ * -
+ -
+ - 6
+ -
+ -
+ - b\ :sub:`2`
+ - g\ :sub:`1`
+ - r\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RGB888-1X7X4-SPWG:
+
+ - MEDIA_BUS_FMT_RGB888_1X7X4_SPWG
+ - 0x1011
+ - 0
+ -
+ - d
+ - d
+ - b\ :sub:`1`
+ - g\ :sub:`0`
+ * -
+ -
+ - 1
+ -
+ - b\ :sub:`7`
+ - d
+ - b\ :sub:`0`
+ - r\ :sub:`5`
+ * -
+ -
+ - 2
+ -
+ - b\ :sub:`6`
+ - d
+ - g\ :sub:`5`
+ - r\ :sub:`4`
+ * -
+ -
+ - 3
+ -
+ - g\ :sub:`7`
+ - b\ :sub:`5`
+ - g\ :sub:`4`
+ - r\ :sub:`3`
+ * -
+ -
+ - 4
+ -
+ - g\ :sub:`6`
+ - b\ :sub:`4`
+ - g\ :sub:`3`
+ - r\ :sub:`2`
+ * -
+ -
+ - 5
+ -
+ - r\ :sub:`7`
+ - b\ :sub:`3`
+ - g\ :sub:`2`
+ - r\ :sub:`1`
+ * -
+ -
+ - 6
+ -
+ - r\ :sub:`6`
+ - b\ :sub:`2`
+ - g\ :sub:`1`
- r\ :sub:`0`
* .. _MEDIA-BUS-FMT-RGB888-1X7X4-JEIDA:
- The number of bus samples per pixel. Pixels that are wider than the
bus width must be transferred in multiple samples. Common values are
- 1, 1.5 (encoded as 1_5) and 2.
+ 0.5 (encoded as 0_5; in this case two pixels are transferred per bus
+ sample), 1, 1.5 (encoded as 1_5) and 2.
- The bus width. When the bus width is larger than the number of bits
per pixel component, several components are packed in a single bus
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UV8-1X8:
-
- - MEDIA_BUS_FMT_UV8_1X8
- - 0x2015
+ * .. _MEDIA-BUS-FMT-UV8-1X8:
+
+ - MEDIA_BUS_FMT_UV8_1X8
+ - 0x2015
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY8-1_5X8:
+
+ - MEDIA_BUS_FMT_UYVY8_1_5X8
+ - 0x2002
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYUY8-1_5X8:
+
+ - MEDIA_BUS_FMT_VYUY8_1_5X8
+ - 0x2003
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
-
-
-
-
-
-
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY8-1_5X8:
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YUYV8-1_5X8:
- - MEDIA_BUS_FMT_UYVY8_1_5X8
- - 0x2002
+ - MEDIA_BUS_FMT_YUYV8_1_5X8
+ - 0x2004
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
-
-
-
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * -
- -
+ * .. _MEDIA-BUS-FMT-YVYU8-1_5X8:
+
+ - MEDIA_BUS_FMT_YVYU8_1_5X8
+ - 0x2005
-
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY8-1_5X8:
-
- - MEDIA_BUS_FMT_VYUY8_1_5X8
- - 0x2003
+ * -
+ -
-
-
-
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY8-2X8:
+
+ - MEDIA_BUS_FMT_UYVY8_2X8
+ - 0x2006
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
* -
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
* -
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV8-1_5X8:
+ * .. _MEDIA-BUS-FMT-VYUY8-2X8:
- - MEDIA_BUS_FMT_YUYV8_1_5X8
- - 0x2004
+ - MEDIA_BUS_FMT_VYUY8_2X8
+ - 0x2007
-
-
-
-
-
-
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
* -
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * -
- -
+ * .. _MEDIA-BUS-FMT-YUYV8-2X8:
+
+ - MEDIA_BUS_FMT_YUYV8_2X8
+ - 0x2008
-
-
-
-
-
-
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU8-1_5X8:
-
- - MEDIA_BUS_FMT_YVYU8_1_5X8
- - 0x2005
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * -
+ -
-
-
-
-
-
-
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YVYU8-2X8:
+
+ - MEDIA_BUS_FMT_YVYU8_2X8
+ - 0x2009
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * -
- -
- -
- -
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y10-1X10:
+
+ - MEDIA_BUS_FMT_Y10_1X10
+ - 0x200a
-
-
-
-
-
-
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY8-2X8:
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY10-2X10:
- - MEDIA_BUS_FMT_UYVY8_2X8
- - 0x2006
- -
- -
+ - MEDIA_BUS_FMT_UYVY10_2X10
+ - 0x2018
-
-
-
-
-
-
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY8-2X8:
+ * .. _MEDIA-BUS-FMT-VYUY10-2X10:
- - MEDIA_BUS_FMT_VYUY8_2X8
- - 0x2007
- -
- -
+ - MEDIA_BUS_FMT_VYUY10_2X10
+ - 0x2019
-
-
-
-
-
-
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV8-2X8:
+ * .. _MEDIA-BUS-FMT-YUYV10-2X10:
- - MEDIA_BUS_FMT_YUYV8_2X8
- - 0x2008
- -
- -
+ - MEDIA_BUS_FMT_YUYV10_2X10
+ - 0x200b
-
-
-
-
-
-
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU8-2X8:
+ * .. _MEDIA-BUS-FMT-YVYU10-2X10:
- - MEDIA_BUS_FMT_YVYU8_2X8
- - 0x2009
- -
- -
+ - MEDIA_BUS_FMT_YVYU10_2X10
+ - 0x200c
-
-
-
-
-
-
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
-
-
-
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
-
-
-
- -
- -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-Y10-1X10:
+ * .. _MEDIA-BUS-FMT-Y12-1X12:
- - MEDIA_BUS_FMT_Y10_1X10
- - 0x200a
- -
- -
+ - MEDIA_BUS_FMT_Y12_1X12
+ - 0x2013
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY10-2X10:
+ * .. _MEDIA-BUS-FMT-UYVY12-2X12:
- - MEDIA_BUS_FMT_UYVY10_2X10
- - 0x2018
- -
- -
+ - MEDIA_BUS_FMT_UYVY12_2X12
+ - 0x201c
-
-
-
-
-
-
+ - u\ :sub:`11`
+ - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
-
-
-
- -
- -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
-
-
-
- -
- -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY10-2X10:
+ * .. _MEDIA-BUS-FMT-VYUY12-2X12:
- - MEDIA_BUS_FMT_VYUY10_2X10
- - 0x2019
- -
- -
+ - MEDIA_BUS_FMT_VYUY12_2X12
+ - 0x201d
-
-
-
-
-
-
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
-
-
-
- -
- -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * -
- -
- -
+ * -
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV10-2X10:
+ * .. _MEDIA-BUS-FMT-YUYV12-2X12:
- - MEDIA_BUS_FMT_YUYV10_2X10
- - 0x200b
- -
- -
+ - MEDIA_BUS_FMT_YUYV12_2X12
+ - 0x201e
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
-
-
-
- -
- -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU10-2X10:
+ * .. _MEDIA-BUS-FMT-YVYU12-2X12:
- - MEDIA_BUS_FMT_YVYU10_2X10
- - 0x200c
- -
- -
+ - MEDIA_BUS_FMT_YVYU12_2X12
+ - 0x201f
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
-
-
-
- -
- -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-Y12-1X12:
-
- - MEDIA_BUS_FMT_Y12_1X12
- - 0x2013
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY12-2X12:
+ * .. _MEDIA-BUS-FMT-UYVY8-1X16:
- - MEDIA_BUS_FMT_UYVY12_2X12
- - 0x201c
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_UYVY8_1X16
+ - 0x200f
-
-
-
-
-
-
- - u\ :sub:`11`
- - u\ :sub:`10`
- - u\ :sub:`9`
- - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * -
- -
- -
- -
- -
- -
+ * .. _MEDIA-BUS-FMT-VYUY8-1X16:
+
+ - MEDIA_BUS_FMT_VYUY8_1X16
+ - 0x2010
-
-
-
-
-
-
- - v\ :sub:`11`
- - v\ :sub:`10`
- - v\ :sub:`9`
- - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY12-2X12:
+ * .. _MEDIA-BUS-FMT-YUYV8-1X16:
- - MEDIA_BUS_FMT_VYUY12_2X12
- - 0x201d
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_YUYV8_1X16
+ - 0x2011
-
-
-
-
-
-
- - v\ :sub:`11`
- - v\ :sub:`10`
- - v\ :sub:`9`
- - v\ :sub:`8`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * -
- -
- -
- -
- -
- -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YVYU8-1X16:
+
+ - MEDIA_BUS_FMT_YVYU8_1X16
+ - 0x2012
-
-
-
-
-
-
- - u\ :sub:`11`
- - u\ :sub:`10`
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV12-2X12:
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YDYUYDYV8-1X16:
- - MEDIA_BUS_FMT_YUYV12_2X12
- - 0x201e
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_YDYUYDYV8_1X16
+ - 0x2014
-
-
-
-
-
-
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
* -
-
-
-
-
-
- -
- -
- -
- -
- - u\ :sub:`11`
- - u\ :sub:`10`
- - u\ :sub:`9`
- - u\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
-
-
-
- -
- -
- -
- -
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
+ - d
* -
-
-
-
-
-
- -
- -
- -
- -
- - v\ :sub:`11`
- - v\ :sub:`10`
- - v\ :sub:`9`
- - v\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU12-2X12:
+ * .. _MEDIA-BUS-FMT-UYVY10-1X20:
- - MEDIA_BUS_FMT_YVYU12_2X12
- - 0x201f
- -
- -
- -
- -
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_UYVY10_1X20
+ - 0x201a
-
-
-
-
-
-
- - y\ :sub:`11`
- - y\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
- -
- -
- -
- -
- -
- -
- - v\ :sub:`11`
- - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYUY10-1X20:
+
+ - MEDIA_BUS_FMT_VYUY10_1X20
+ - 0x201b
-
-
-
-
-
-
- - y\ :sub:`11`
- - y\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
-
-
-
- -
- -
- -
- -
- -
- -
- -
- -
- - u\ :sub:`11`
- - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY8-1X16:
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YUYV10-1X20:
- - MEDIA_BUS_FMT_UYVY8_1X16
- - 0x200f
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_YUYV10_1X20
+ - 0x200d
-
-
-
-
-
-
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY8-1X16:
-
- - MEDIA_BUS_FMT_VYUY8_1X16
- - 0x2010
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * -
- -
- -
- -
- -
- -
+ * .. _MEDIA-BUS-FMT-YVYU10-1X20:
+
+ - MEDIA_BUS_FMT_YVYU10_1X20
+ - 0x200e
-
-
-
-
-
-
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV8-1X16:
-
- - MEDIA_BUS_FMT_YUYV8_1X16
- - 0x2011
- -
- -
- -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
-
-
-
-
-
-
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * -
+ * .. _MEDIA-BUS-FMT-VUY8-1X24:
+
+ - MEDIA_BUS_FMT_VUY8_1X24
+ - 0x201a
-
-
-
-
-
-
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YUV8-1X24:
+
+ - MEDIA_BUS_FMT_YUV8_1X24
+ - 0x2025
-
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU8-1X16:
+ * .. _MEDIA-BUS-FMT-UYYVYY8-0-5X24:
- - MEDIA_BUS_FMT_YVYU8_1X16
- - 0x2012
- -
- -
- -
- -
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ - 0x2026
-
-
-
-
-
-
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
* -
-
-
-
-
-
- -
- -
- -
- -
- -
- -
- -
- -
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY12-1X24:
+
+ - MEDIA_BUS_FMT_UYVY12_1X24
+ - 0x2020
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YDYUYDYV8-1X16:
-
- - MEDIA_BUS_FMT_YDYUYDYV8_1X16
- - 0x2014
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
-
-
-
-
-
-
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYUY12-1X24:
+
+ - MEDIA_BUS_FMT_VYUY12_1X24
+ - 0x2021
+ -
-
-
-
-
-
-
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - d
- - d
- - d
- - d
- - d
- - d
- - d
- - d
* -
-
-
-
-
-
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YUYV12-1X24:
+
+ - MEDIA_BUS_FMT_YUYV12_1X24
+ - 0x2022
+ -
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`6`
- u\ :sub:`5`
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YVYU12-1X24:
+
+ - MEDIA_BUS_FMT_YVYU12_1X24
+ - 0x2023
+ -
-
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - d
- - d
- - d
- - d
- - d
- - d
- - d
- - d
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
* -
-
-
-
-
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-YUV10-1X30:
+
+ - MEDIA_BUS_FMT_YUV10_1X30
+ - 0x2016
-
-
-
- -
- -
- -
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY10-1X20:
+ * .. _MEDIA-BUS-FMT-UYYVYY10-0-5X30:
- - MEDIA_BUS_FMT_UYVY10_1X20
- - 0x201a
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ - 0x2027
-
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
-
-
-
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY10-1X20:
-
- - MEDIA_BUS_FMT_VYUY10_1X20
- - 0x201b
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-AYUV8-1X32:
+
+ - MEDIA_BUS_FMT_AYUV8_1X32
+ - 0x2017
+ -
+ - a\ :sub:`7`
+ - a\ :sub:`6`
+ - a\ :sub:`5`
+ - a\ :sub:`4`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+
+
+.. raw:: latex
+
+ \endgroup
+
+
+The following table list existing packed 36bit wide YUV formats.
+
+.. raw:: latex
+
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
+
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+
+.. _v4l2-mbus-pixelcode-yuv8-36bit:
+
+.. flat-table:: 36bit YUV Formats
+ :header-rows: 2
+ :stub-columns: 0
+ :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
+
+ * - Identifier
+ - Code
+ -
+ - :cspan:`35` Data organization
+ * -
+ -
+ - Bit
+ - 35
+ - 34
+ - 33
+ - 32
+ - 31
+ - 30
+ - 29
+ - 28
+ - 27
+ - 26
+ - 25
+ - 24
+ - 23
+ - 22
+ - 21
+ - 10
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
+ - 6
+ - 5
+ - 4
+ - 3
+ - 2
+ - 1
+ - 0
+ * .. _MEDIA-BUS-FMT-UYYVYY12-0-5X36:
+
+ - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ - 0x2028
+ -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
-
-
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV10-1X20:
+ * .. _MEDIA-BUS-FMT-YUV12-1X36:
- - MEDIA_BUS_FMT_YUYV10_1X20
- - 0x200d
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - MEDIA_BUS_FMT_YUV12_1X36
+ - 0x2029
-
+ - y\ :sub:`11`
+ - y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`8`
- u\ :sub:`7`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- * -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`8`
- v\ :sub:`7`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU10-1X20:
- - MEDIA_BUS_FMT_YVYU10_1X20
- - 0x200e
- -
- -
- -
- -
- -
- -
- -
+
+.. raw:: latex
+
+ \endgroup
+
+
+The following table list existing packed 48bit wide YUV formats.
+
+.. raw:: latex
+
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
+
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+
+.. _v4l2-mbus-pixelcode-yuv8-48bit:
+
+.. flat-table:: 48bit YUV Formats
+ :header-rows: 3
+ :stub-columns: 0
+ :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
+
+ * - Identifier
+ - Code
-
+ - :cspan:`31` Data organization
+ * -
-
+ - Bit
-
-
-
-
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- - v\ :sub:`9`
- - v\ :sub:`8`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * -
-
-
-
-
-
-
+ - 47
+ - 46
+ - 45
+ - 44
+ - 43
+ - 42
+ - 41
+ - 40
+ - 39
+ - 38
+ - 37
+ - 36
+ - 35
+ - 34
+ - 33
+ - 32
+ * -
-
-
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VUY8-1X24:
+ - 31
+ - 30
+ - 29
+ - 28
+ - 27
+ - 26
+ - 25
+ - 24
+ - 23
+ - 22
+ - 21
+ - 10
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
+ - 6
+ - 5
+ - 4
+ - 3
+ - 2
+ - 1
+ - 0
+ * .. _MEDIA-BUS-FMT-YUV16-1X48:
- - MEDIA_BUS_FMT_VUY8_1X24
- - 0x201a
- -
+ - MEDIA_BUS_FMT_YUV16_1X48
+ - 0x202a
-
-
-
-
-
-
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUV8-1X24:
-
- - MEDIA_BUS_FMT_YUV8_1X24
- - 0x2025
-
-
-
-
-
-
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`8`
+ - y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`5`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-UYVY12-1X24:
-
- - MEDIA_BUS_FMT_UYVY12_1X24
- - 0x2020
- -
- -
- -
- -
- -
- -
- -
+ * -
-
-
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
- u\ :sub:`11`
- u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
- v\ :sub:`11`
- v\ :sub:`10`
- v\ :sub:`9`
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-VYUY12-1X24:
+ * .. _MEDIA-BUS-FMT-UYYVYY16-0-5X48:
- - MEDIA_BUS_FMT_VYUY12_1X24
- - 0x2021
- -
- -
+ - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ - 0x202b
-
-
-
-
-
-
- - v\ :sub:`11`
- - v\ :sub:`10`
- - v\ :sub:`9`
- - v\ :sub:`8`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- * -
-
-
-
-
-
-
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
- u\ :sub:`11`
- u\ :sub:`10`
- u\ :sub:`9`
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
- y\ :sub:`11`
- y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUYV12-1X24:
-
- - MEDIA_BUS_FMT_YUYV12_1X24
- - 0x2022
- -
- -
- -
- -
- -
- -
- -
- -
- -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
- y\ :sub:`11`
- y\ :sub:`10`
- - y\ :sub:`9`
+ - y\ :sub:`8`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - u\ :sub:`11`
- - u\ :sub:`10`
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
* -
-
-
-
-
-
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- - v\ :sub:`11`
- - v\ :sub:`10`
- - v\ :sub:`9`
- - v\ :sub:`8`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YVYU12-1X24:
-
- - MEDIA_BUS_FMT_YVYU12_1X24
- - 0x2023
- -
-
-
-
-
-
-
- - y\ :sub:`11`
- - y\ :sub:`10`
- - y\ :sub:`9`
- - y\ :sub:`8`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
- v\ :sub:`11`
- v\ :sub:`10`
- v\ :sub:`9`
* -
-
-
- -
- -
- -
- -
- -
- -
- -
- -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
- y\ :sub:`11`
- y\ :sub:`10`
- y\ :sub:`9`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - u\ :sub:`11`
- - u\ :sub:`10`
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- * .. _MEDIA-BUS-FMT-YUV10-1X30:
-
- - MEDIA_BUS_FMT_YUV10_1X30
- - 0x2016
- -
- -
- -
- - y\ :sub:`9`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`8`
- y\ :sub:`8`
- y\ :sub:`7`
- y\ :sub:`6`
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
- - u\ :sub:`9`
- - u\ :sub:`8`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- - v\ :sub:`9`
- - v\ :sub:`8`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
- * .. _MEDIA-BUS-FMT-AYUV8-1X32:
-
- - MEDIA_BUS_FMT_AYUV8_1X32
- - 0x2017
- -
- - a\ :sub:`7`
- - a\ :sub:`6`
- - a\ :sub:`5`
- - a\ :sub:`4`
- - a\ :sub:`3`
- - a\ :sub:`2`
- - a\ :sub:`1`
- - a\ :sub:`0`
- - y\ :sub:`7`
- - y\ :sub:`6`
- - y\ :sub:`5`
- - y\ :sub:`4`
- - y\ :sub:`3`
- - y\ :sub:`2`
- - y\ :sub:`1`
- - y\ :sub:`0`
- - u\ :sub:`7`
- - u\ :sub:`6`
- - u\ :sub:`5`
- - u\ :sub:`4`
- - u\ :sub:`3`
- - u\ :sub:`2`
- - u\ :sub:`1`
- - u\ :sub:`0`
- - v\ :sub:`7`
- - v\ :sub:`6`
- - v\ :sub:`5`
- - v\ :sub:`4`
- - v\ :sub:`3`
- - v\ :sub:`2`
- - v\ :sub:`1`
- - v\ :sub:`0`
.. raw:: latex
int __init foo_probe(void)
{
+ int error;
+
struct pinctrl_dev *pctl;
- return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+ error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+ if (error)
+ return error;
+
+ return pinctrl_enable(pctl);
}
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
.. code-block:: none
- Cc: <stable@vger.kernel.org> # 3.3.x-
+ Cc: <stable@vger.kernel.org> # 3.3.x
The tag has the meaning of:
__u32 pad;
};
+4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: system ioctl
+Parameters: u64 mce_cap (out)
+Returns: 0 on success, -1 on error
+
+Returns supported MCE capabilities. The u64 mce_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register. Supported
+capabilities will have the corresponding bits set.
+
+4.105 KVM_X86_SETUP_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: u64 mcg_cap (in)
+Returns: 0 on success,
+ -EFAULT if u64 mcg_cap cannot be read,
+ -EINVAL if the requested number of banks is invalid,
+ -EINVAL if requested MCE capability is not supported.
+
+Initializes MCE support for use. The u64 mcg_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register and
+specifies which capabilities should be enabled. The maximum
+supported number of error-reporting banks can be retrieved when
+checking for KVM_CAP_MCE. The supported capabilities can be
+retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
+
+4.106 KVM_X86_SET_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_x86_mce (in)
+Returns: 0 on success,
+ -EFAULT if struct kvm_x86_mce cannot be read,
+ -EINVAL if the bank number is invalid,
+ -EINVAL if VAL bit is not set in status field.
+
+Inject a machine check error (MCE) into the guest. The input
+parameter is:
+
+struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+};
+
+If the MCE being reported is an uncorrected error, KVM will
+inject it as an MCE exception into the guest. If the guest
+MCG_STATUS register reports that an MCE is in progress, KVM
+causes an KVM_EXIT_SHUTDOWN vmexit.
+
+Otherwise, if the MCE is a corrected error, KVM will just
+store it in the corresponding bank (provided this bank is
+not holding a previously reported uncorrected error).
+
5. The kvm_run structure
------------------------
Bits for undefined preemption levels are RAZ/WI.
+ For historical reasons and to provide ABI compatibility with userspace we
+ export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
+ field in the lower 5 bits of a word, meaning that userspace must always
+ use the lower 5 bits to communicate with the KVM device and must shift the
+ value left by 3 places to obtain the actual priority mask level.
+
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
F: lib/lru_cache.c
F: Documentation/blockdev/drbd/
-DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
+DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
F: fs/debugfs/
-F: fs/kernfs/
F: fs/sysfs/
F: include/linux/debugfs.h
F: include/linux/kobj*
S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git
DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
S: Supported
F: drivers/gpu/drm/meson/
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
-T: git git://anongit.freedesktop.org/drm/drm-meson
+F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+F: Documentation/gpu/meson.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR EXYNOS
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
M: Vincent Abriou <vincent.abriou@st.com>
L: dri-devel@lists.freedesktop.org
-T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/sti
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
S: Maintained
F: drivers/edac/mpc85xx_edac.[ch]
+EDAC-PND2
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/pnd2_edac.[ch]
+
EDAC-PASEMI
M: Egor Martovetsky <egor@pasemi.com>
L: linux-edac@vger.kernel.org
F: net/bridge/
ETHERNET PHY LIBRARY
+M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
+M: Masahiro Yamada <yamada.masahiro@socionext.com>
M: Michal Marek <mmarek@suse.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
L: linux-kbuild@vger.kernel.org
S: Maintained
F: Documentation/kbuild/
F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
+KERNFS
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Tejun Heo <tj@kernel.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+S: Supported
+F: include/linux/kernfs.h
+F: fs/kernfs/
+
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
W: http://kernel.org/pub/linux/utils/kernel/kexec/
F: block/partitions/ibm.c
S390 NETWORK DRIVERS
+M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/scsi/zfcp_*
S390 IUCV NETWORK LAYER
+M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c
-F: include/linux/virtio_*.h
+F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
+CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
- offsetof(struct timex32, time)))
+ offsetof(struct timex32, tick)))
return -EFAULT;
ret = do_adjtimex(&txc);
device_type = "cpu";
compatible = "snps,arc770d";
reg = <0>;
+ clocks = <&core_clk>;
};
};
device_type = "cpu";
compatible = "snps,archs38";
reg = <0>;
+ clocks = <&core_clk>;
};
};
cpu@0 {
device_type = "cpu";
- compatible = "snps,archs38xN";
+ compatible = "snps,archs38";
reg = <0>;
+ clocks = <&core_clk>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <1>;
+ clocks = <&core_clk>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <2>;
+ clocks = <&core_clk>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <3>;
+ clocks = <&core_clk>;
};
};
interrupts = <7>;
bus-width = <4>;
};
+ };
- /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */
- uio_ev: uio@0xD0000000 {
- compatible = "generic-uio";
- reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
- reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
- interrupts = <23>;
- };
+ /*
+ * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
+ *
+ * This node is intentionally put outside of MB above becase
+ * it maps areas outside of MB's 0xEz-0xFz.
+ */
+ uio_ev: uio@0xD0000000 {
+ compatible = "generic-uio";
+ reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
+ reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
+ interrupt-parent = <&mb_intc>;
+ interrupts = <23>;
};
};
void kretprobe_trampoline(void);
void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
#else
-static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
-{
-}
+#define trap_is_kprobe(address, regs)
#endif /* CONFIG_KPROBES */
#endif /* _ARC_KPROBES_H */
;################### Non TLB Exception Handling #############################
ENTRY(EV_SWI)
- flag 1
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
END(EV_SWI)
ENTRY(EV_DivZero)
- flag 1
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
END(EV_DivZero)
ENTRY(EV_DCError)
- flag 1
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
END(EV_DCError)
; ---------------------------------------------
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/root_dev.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/console.h>
{
char *str;
int cpu_id = ptr_to_cpu(v);
- struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk");
- u32 freq = 0;
+ struct device *cpu_dev = get_cpu_device(cpu_id);
+ struct clk *cpu_clk;
+ unsigned long freq = 0;
if (!cpu_online(cpu_id)) {
seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
- of_property_read_u32(core_clk, "clock-frequency", &freq);
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
+ cpu_id);
+ } else {
+ freq = clk_get_rate(cpu_clk);
+ }
if (freq)
- seq_printf(m, "CPU speed\t: %u.%02u Mhz\n",
+ seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
freq / 1000000, (freq / 10000) % 100);
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(r);
+
/* Important to wait for flush to complete */
while (read_aux_reg(r) & SLC_CTRL_BUSY);
}
phy1: ethernet-phy@1 {
reg = <7>;
+ eee-broken-100tx;
+ eee-broken-1000t;
};
};
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
+ keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;
&i2c3 {
clock-frequency = <400000>;
at24@50 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};
opp-microvolt = <1200000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
-
- opp@1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1320000>;
- clock-latency-ns = <244144>; /* 8 32k periods */
- };
};
cpus {
operating-points-v2 = <&cpu0_opp_table>;
};
+ cpu@1 {
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <2>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <3>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
};
simple-audio-card,mclk-fs = <512>;
simple-audio-card,aux-devs = <&codec_analog>;
simple-audio-card,routing =
- "Left DAC", "Digital Left DAC",
- "Right DAC", "Digital Right DAC";
+ "Left DAC", "AIF1 Slot 0 Left",
+ "Right DAC", "AIF1 Slot 0 Right";
status = "disabled";
simple-audio-card,cpu {
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
+
+ if (vgic_present)
+ kvm_vgic_init_cpu_hardware();
}
static void cpu_hyp_reset(void)
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
+ assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (next != end)
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
int idx;
idx = srcu_read_lock(&kvm->srcu);
+ down_read(¤t->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+ up_read(¤t->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
if (kvm->arch.pgd == NULL)
return;
+ spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ spin_unlock(&kvm->mmu_lock);
+
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(¤t->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(¤t->mm->mmap_sem);
return ret;
}
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
if (omap_secure_apis_support())
- boot_cpu = omap_read_auxcoreboot0();
+ boot_cpu = omap_read_auxcoreboot0() >> 9;
else
boot_cpu =
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
#include "prm-regbits-44xx.h"
static void __iomem *sar_base;
+static u32 old_cpu1_ns_pa_addr;
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
{}
#endif
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+ return old_cpu1_ns_pa_addr;
+}
+
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
void __init omap4_mpuss_early_init(void)
{
unsigned long startup_pa;
+ void __iomem *ns_pa_addr;
- if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
+ if (!(soc_is_omap44xx() || soc_is_omap54xx()))
return;
sar_base = omap4_get_sar_ram_base();
- if (cpu_is_omap443x())
+ /* Save old NS_PA_ADDR for validity checks later on */
+ if (soc_is_omap44xx())
+ ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ else
+ ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
+
+ if (soc_is_omap443x())
startup_pa = __pa_symbol(omap4_secondary_startup);
- else if (cpu_is_omap446x())
+ else if (soc_is_omap446x())
startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else
startup_pa = __pa_symbol(omap5_secondary_startup);
- if (cpu_is_omap44xx())
+ if (soc_is_omap44xx())
writel_relaxed(startup_pa, sar_base +
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
else
ldr r12, =0x103
dsb
smc #0
- mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/sections.h>
#include <asm/smp_scu.h>
#include <asm/virt.h>
#define OMAP5_CORE_COUNT 0x2
+#define AUX_CORE_BOOT0_GP_RELEASE 0x020
+#define AUX_CORE_BOOT0_HS_RELEASE 0x200
+
struct omap_smp_config {
unsigned long cpu1_rstctrl_pa;
void __iomem *cpu1_rstctrl_va;
void __iomem *scu_base;
+ void __iomem *wakeupgen_base;
void *startup_addr;
};
static struct clockdomain *cpu1_clkdm;
static bool booted;
static struct powerdomain *cpu1_pwrdm;
- void __iomem *base = omap_get_wakeupgen_base();
/*
* Set synchronisation state between this boot processor
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
- omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
+ 0xfffffdff);
else
- writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
+ writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
if (!cpu1_clkdm && !cpu1_pwrdm) {
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
set_cpu_possible(i, true);
}
+/*
+ * For now, just make sure the start-up address is not within the booting
+ * kernel space as that means we just overwrote whatever secondary_startup()
+ * code there was.
+ */
+static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
+{
+ if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
+ return false;
+
+ return true;
+}
+
+/*
+ * We may need to reset CPU1 before configuring, otherwise kexec boot can end
+ * up trying to use old kernel startup address or suspend-resume will
+ * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
+ * idle states.
+ */
+static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
+{
+ unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
+ bool needs_reset = false;
+ u32 released;
+
+ if (omap_secure_apis_support())
+ released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
+ else
+ released = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_0) &
+ AUX_CORE_BOOT0_GP_RELEASE;
+ if (released) {
+ pr_warn("smp: CPU1 not parked?\n");
+
+ return;
+ }
+
+ cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_1);
+ cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+
+ /* Did the configured secondary_startup() get overwritten? */
+ if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
+ needs_reset = true;
+
+ /*
+ * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
+ * deeper idle state in WFI and will wake to an invalid address.
+ */
+ if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
+ !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+ needs_reset = true;
+
+ if (!needs_reset || !c->cpu1_rstctrl_va)
+ return;
+
+ pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
+ cpu1_startup_pa, cpu1_ns_pa_addr);
+
+ writel_relaxed(1, c->cpu1_rstctrl_va);
+ readl_relaxed(c->cpu1_rstctrl_va);
+ writel_relaxed(0, c->cpu1_rstctrl_va);
+}
+
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
- void __iomem *base = omap_get_wakeupgen_base();
const struct omap_smp_config *c = NULL;
if (soc_is_omap443x())
/* Must preserve cfg.scu_base set earlier */
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
cfg.startup_addr = c->startup_addr;
+ cfg.wakeupgen_base = omap_get_wakeupgen_base();
if (soc_is_dra74x() || soc_is_omap54xx()) {
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
if (cfg.scu_base)
scu_enable(cfg.scu_base);
- /*
- * Reset CPU1 before configuring, otherwise kexec will
- * end up trying to use old kernel startup address.
- */
- if (cfg.cpu1_rstctrl_va) {
- writel_relaxed(1, cfg.cpu1_rstctrl_va);
- readl_relaxed(cfg.cpu1_rstctrl_va);
- writel_relaxed(0, cfg.cpu1_rstctrl_va);
- }
+ omap4_smp_maybe_reset_cpu1(&cfg);
/*
* Write the address of secondary startup routine into the
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else
writel_relaxed(__pa_symbol(cfg.startup_addr),
- base + OMAP_AUX_CORE_BOOT_1);
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {
dev_err(dev, "failed to idle\n");
}
break;
+ case BUS_NOTIFY_BIND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+ pm_runtime_status_suspended(dev)) {
+ od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+ pm_runtime_set_active(dev);
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
select GPIOLIB
select MVEBU_MBUS
select PCI
+ select PHYLIB if NETDEVICES
select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable. This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ * a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ * as we will try to flush the memory through a different alias to that
+ * actually being used (and the flushes are redundant.)
+ */
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ unsigned long pfn = dma_to_pfn(dev, handle);
+ struct page *page;
int ret;
+ /* If the PFN is not valid, we do not have a struct page */
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ page = pfn_to_page(pfn);
+
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;
*/
static inline bool security_extensions_enabled(void)
{
- return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ /* Check CPUID Identification Scheme before ID_PFR1 read */
+ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
+ return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ return 0;
}
static unsigned long __init setup_vectors_base(void)
eth_data, &orion_ge11);
}
+#ifdef CONFIG_ARCH_ORION5X
/*****************************************************************************
* Ethernet switch
****************************************************************************/
struct mdio_board_info *bd;
unsigned int i;
+ if (!IS_BUILTIN(CONFIG_PHYLIB))
+ return;
+
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
if (!strcmp(d->port_names[i], "cpu"))
break;
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
+#endif
/*****************************************************************************
* I2C
#endif
if (p) {
- if (cur) {
+ if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ /*
+ * Probe hit but conditional execution check failed,
+ * so just skip the instruction and continue as if
+ * nothing had happened.
+ * In this case, we can skip recursing check too.
+ */
+ singlestep_skip(p, regs);
+ } else if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_SS:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
+ case KPROBE_REENTER:
+ /* A nested probe was hit in FIQ, it is a BUG */
+ pr_warn("Unrecoverable kprobe detected at %p.\n",
+ p->addr);
+ /* fall through */
default:
/* impossible cases */
BUG();
}
- } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ } else {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
}
reset_current_kprobe();
}
- } else {
- /*
- * Probe hit but conditional execution check failed,
- * so just skip the instruction and continue as if
- * nothing had happened.
- */
- singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/* another task is sharing our hash bucket */
continue;
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
- orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
break;
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
void __naked __kprobes_test_case_start(void)
{
__asm__ __volatile__ (
- "stmdb sp!, {r4-r11} \n\t"
+ "mov r2, sp \n\t"
+ "bic r3, r2, #7 \n\t"
+ "mov sp, r3 \n\t"
+ "stmdb sp!, {r2-r11} \n\t"
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
"movne pc, r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
- "ldmia sp!, {r4-r11} \n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
"mov pc, r0 \n\t"
);
}
"bxne r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
- "ldmia sp!, {r4-r11} \n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
"bx r0 \n\t"
);
}
usbphy: phy@01c19400 {
compatible = "allwinner,sun50i-a64-usb-phy";
reg = <0x01c19400 0x14>,
+ <0x01c1a800 0x4>,
<0x01c1b800 0x4>;
reg-names = "phy_ctrl",
+ "pmu0",
"pmu1";
clocks = <&ccu CLK_USB_PHY0>,
<&ccu CLK_USB_PHY1>;
#include <linux/compiler.h>
-#include <asm/sysreg.h>
-
#ifndef __ASSEMBLY__
struct task_struct;
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
- if (cpu_ops[any_cpu]->cpu_die)
+ if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-static const char *fault_name(unsigned int esr);
+struct fault_info {
+ int (*fn)(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+};
+
+static const struct fault_info fault_info[];
+
+static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+{
+ return fault_info + (esr & 63);
+}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
struct pt_regs *regs)
{
struct siginfo si;
+ const struct fault_info *inf;
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
+ inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
- tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
+ tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
show_pte(tsk->mm, addr);
show_regs(regs);
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
+ const struct fault_info *inf;
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
- if (user_mode(regs))
- __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
- else
+ if (user_mode(regs)) {
+ inf = esr_to_fault_info(esr);
+ __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
+ } else
__do_kernel_fault(mm, addr, esr, regs);
}
return 1;
}
-static const struct fault_info {
- int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
- int sig;
- int code;
- const char *name;
-} fault_info[] = {
+static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
{ do_bad, SIGBUS, 0, "unknown 63" },
};
-static const char *fault_name(unsigned int esr)
-{
- const struct fault_info *inf = fault_info + (esr & 63);
- return inf->name;
-}
-
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- const struct fault_info *inf = fault_info + (esr & 63);
+ const struct fault_info *inf = esr_to_fault_info(esr);
struct siginfo info;
if (!inf->fn(addr, esr, regs))
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else if (ps == (PAGE_SIZE * CONT_PTES)) {
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- } else if (ps == (PMD_SIZE * CONT_PMDS)) {
- hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
- if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
0, sizeof(*regs));
}
-static int gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
- struct pt_regs *regs = task_pt_regs(target);
-
- /* Don't copyin TSR or CSR */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- 0, PT_TSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_TSR * sizeof(long),
- (PT_TSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- (PT_TSR + 1) * sizeof(long),
- PT_CSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_CSR * sizeof(long),
- (PT_CSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- (PT_CSR + 1) * sizeof(long), -1);
- return ret;
-}
-
enum c6x_regset {
REGSET_GPR,
};
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
- .set = gpr_set
},
};
long *reg = (long *)®s;
/* build user regs in buffer */
- for (r = 0; r < ARRAY_SIZE(register_offset); r++)
+ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+ for (r = 0; r < sizeof(regs) / sizeof(long); r++)
*reg++ = h8300_get_reg(target, r);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
long *reg;
/* build user regs in buffer */
- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
+ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
*reg++ = h8300_get_reg(target, r);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
return ret;
/* write back to pt_regs */
- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
+ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
h8300_put_reg(target, r, *reg++);
return 0;
}
--- /dev/null
+#ifndef _ASM_IA64_ASM_PROTOTYPES_H
+#define _ASM_IA64_ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/esi.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+#include <asm/unwind.h>
+#include <asm/xor.h>
+
+extern const char ia64_ivt[];
+
+signed int __divsi3(signed int, unsigned int);
+signed int __modsi3(signed int, unsigned int);
+
+signed long long __divdi3(signed long long, unsigned long long);
+signed long long __moddi3(signed long long, unsigned long long);
+
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+
+unsigned long long __udivdi3(unsigned long long, unsigned long long);
+unsigned long long __umoddi3(unsigned long long, unsigned long long);
+
+#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
- $(call if_changed_dep,as_o_S)
+ $(call if_changed_rule,as_o_S)
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
CONFIG_MVME147_NET=y
CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68030=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3X=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
#include <uapi/asm/unistd.h>
-#define NR_syscalls 379
+#define NR_syscalls 380
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __NR_copy_file_range 376
#define __NR_preadv2 377
#define __NR_pwritev2 378
+#define __NR_statx 379
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
+ .long sys_statx
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
* user_regset definitions.
*/
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+ unsigned long data = (unsigned long)regs->ctx.Flags;
+
+ if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+ data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+ return data;
+}
+
int metag_gp_regs_copyout(const struct pt_regs *regs,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
if (ret)
goto out;
/* TXSTATUS */
- data = (unsigned long)regs->ctx.Flags;
- if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
- data |= USER_GP_REGS_STATUS_CATCH_BIT;
+ data = user_txstatus(regs);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
if (ret)
goto out;
/* TXSTATUS */
+ data = user_txstatus(regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
unsigned long long *ptr;
int ret, i;
+ if (count < 4*13)
+ return -EINVAL;
/* Read the entire pipeline before making any changes */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&rp, 0, 4*13);
const void *kbuf, const void __user *ubuf)
{
int ret;
- void __user *tls;
+ void __user *tls = target->thread.tls_ptr;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"23:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #32\n" \
"24:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "25:\n" \
+ "27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
"SUB %3, %3, #32\n" \
- "27:\n" \
+ "30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
+ "31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %0, %0, #8\n" \
- "29:\n" \
+ "33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
- " .long 29b,4b\n" \
+ " .long 29b,3b\n" \
+ " .long 30b,3b\n" \
+ " .long 31b,3b\n" \
+ " .long 32b,3b\n" \
+ " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"23:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "24:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
- "25:\n" \
+ "24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
+ "SUB %3, %3, #16\n" \
+ "30:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "29:\n" \
+ "33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "30:\n" \
+ "34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "35:\n" \
"SUB %3, %3, #16\n" \
- "31:\n" \
+ "36:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "32:\n" \
+ "37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38:\n" \
"SUB %3, %3, #16\n" \
- "33:\n" \
+ "39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "34:\n" \
+ "40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41:\n" \
"SUB %3, %3, #16\n" \
- "35:\n" \
+ "42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "36:\n" \
+ "43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44:\n" \
"SUB %0, %0, #4\n" \
- "37:\n" \
+ "45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
- " .long 37b,4b\n" \
+ " .long 37b,3b\n" \
+ " .long 38b,3b\n" \
+ " .long 39b,3b\n" \
+ " .long 40b,3b\n" \
+ " .long 41b,3b\n" \
+ " .long 42b,3b\n" \
+ " .long 43b,3b\n" \
+ " .long 44b,3b\n" \
+ " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 8 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*8 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #8\n")
+ "LSR D0Ar2, D0Ar2, #5\n" \
+ "ANDS D0Ar2, D0Ar2, #0x38\n" \
+ "ADDZ D0Ar2, D0Ar2, #32\n" \
+ "SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 4 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*4 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #4\n")
+ "LSR D0Ar2, D0Ar2, #6\n" \
+ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
+ "ADDZ D0Ar2, D0Ar2, #16\n" \
+ "SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
- /* We only need one check after the unalignment-adjustments,
- because if both adjustments were done, either both or
- neither reference had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
-
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
#include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
+#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/current.h>
#include <asm/msa.h>
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
extern void *irq_stack[NR_CPUS];
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
#define __NR_pkey_mprotect (__NR_Linux + 363)
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
+#define __NR_statx (__NR_Linux + 366)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 365
+#define __NR_Linux_syscalls 366
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 365
+#define __NR_O32_Linux_syscalls 366
#if _MIPS_SIM == _MIPS_SIM_ABI64
#define __NR_pkey_mprotect (__NR_Linux + 323)
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
+#define __NR_statx (__NR_Linux + 326)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 325
+#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 325
+#define __NR_64_Linux_syscalls 326
#if _MIPS_SIM == _MIPS_SIM_NABI32
#define __NR_pkey_mprotect (__NR_Linux + 327)
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
+#define __NR_statx (__NR_Linux + 330)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 329
+#define __NR_Linux_syscalls 330
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 329
+#define __NR_N32_Linux_syscalls 330
#endif /* _UAPI_ASM_UNISTD_H */
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
END(mips_cps_get_bootcfg)
LEAF(mips_cps_boot_vpes)
- PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
+ lw ta2, COREBOOTCFG_VPEMASK(a0)
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
#if defined(CONFIG_CPU_MIPSR6)
}
decode_configs(c);
- c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jalr v0
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
- LEAF(handle_ri_rdhwr_vivt)
+ LEAF(handle_ri_rdhwr_tlbp)
.set push
.set noat
.set noreorder
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
- END(handle_ri_rdhwr_vivt)
+ END(handle_ri_rdhwr_tlbp)
LEAF(handle_ri_rdhwr)
.set push
unsigned long pc,
unsigned long *ra)
{
+ unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
+ struct pt_regs *regs;
int leaf;
- extern void ret_from_irq(void);
- extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
- * If we reached the bottom of interrupt context,
- * return saved pc in pt_regs.
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
*/
- if (pc == (unsigned long)ret_from_irq ||
- pc == (unsigned long)ret_from_exception) {
- struct pt_regs *regs;
- if (*sp >= stack_page &&
- *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
- regs = (struct pt_regs *)*sp;
- pc = regs->cp0_epc;
- if (!user_mode(regs) && __kernel_text_address(pc)) {
- *sp = regs->regs[29];
- *ra = regs->regs[31];
- return pc;
- }
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
}
return 0;
}
if (leaf < 0)
return 0;
- if (*sp < stack_page ||
- *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
+ if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
&target->thread.fpu,
0, sizeof(elf_fpregset_t));
- for (i = 0; i < NUM_FPU_REGS; i++) {
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
+ PTR sys_statx
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
+ PTR sys_statx
.size sys_call_table,.-sys_call_table
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free
+ PTR sys_statx /* 6330 */
.size sysn32_call_table,.-sysn32_call_table
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
+ PTR sys_statx
.size sys32_call_table,.-sys32_call_table
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
- set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
- (cpu_has_vtag_icache ?
- handle_ri_rdhwr_vivt : handle_ri_rdhwr));
+
+ if (rdhwr_noopt)
+ set_except_vector(EXCCODE_RI, handle_ri);
+ else {
+ if (cpu_has_vtag_icache)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else if (current_cpu_type() == CPU_LOONGSON3)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+ }
+
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
- if (of_address_to_resource(np_pmu, 0, &res_xbar))
+ if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name))
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
+ c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
scache_size *= 4;
c->scache.waybit = 0;
+ c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr,
+ unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
return alloc_bootmem_align(size, align);
}
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool nomap)
+{
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+ return 0;
+}
+
void __init early_init_devtree(void *params)
{
__be32 *dtb = (u32 *)__dtb_start;
}
#endif /* CONFIG_BLK_DEV_INITRD */
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
unflatten_and_copy_device_tree();
setup_cpuinfo();
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val __asm__ ("r9") = 0; \
+ register long __gu_val; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
})
#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
- __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
- "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#endif /* !defined(CONFIG_64BIT) */
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+ "1: " stx " %2,0(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
+ : "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr2,%1)" \
- "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+ "1: stw %2,0(%%sr2,%1)\n" \
+ "2: stw %R2,4(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
-/* Global fixups - defined as int to avoid creation of function pointers */
-extern int fixup_get_user_skip_1;
-extern int fixup_get_user_skip_2;
-extern int fixup_put_user_skip_1;
-extern int fixup_put_user_skip_2;
-EXPORT_SYMBOL(fixup_get_user_skip_1);
-EXPORT_SYMBOL(fixup_get_user_skip_2);
-EXPORT_SYMBOL(fixup_put_user_skip_1);
-EXPORT_SYMBOL(fixup_put_user_skip_2);
-
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+ /* prevent soft lockup/stalled CPU messages for endless loop. */
+ rcu_sysrq_start();
for (;;);
}
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o
+++ /dev/null
-/*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Fixup routines for kernel exception handling.
- */
-#include <asm/asm-offsets.h>
-#include <asm/assembly.h>
-#include <asm/errno.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_SMP
- .macro get_fault_ip t1 t2
- loadgp
- addil LT%__per_cpu_offset,%r27
- LDREG RT%__per_cpu_offset(%r1),\t1
- /* t2 = smp_processor_id() */
- mfctl 30,\t2
- ldw TI_CPU(\t2),\t2
-#ifdef CONFIG_64BIT
- extrd,u \t2,63,32,\t2
-#endif
- /* t2 = &__per_cpu_offset[smp_processor_id()]; */
- LDREGX \t2(\t1),\t2
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t1
- /* t1 = this_cpu_ptr(&exception_data) */
- add,l \t1,\t2,\t1
- /* %r27 = t1->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t1), %r27
- /* t1 = t1->fault_ip */
- LDREG EXCDATA_IP(\t1), \t1
- .endm
-#else
- .macro get_fault_ip t1 t2
- loadgp
- /* t1 = this_cpu_ptr(&exception_data) */
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t2
- /* %r27 = t2->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t2), %r27
- /* t1 = t2->fault_ip */
- LDREG EXCDATA_IP(\t2), \t1
- .endm
-#endif
-
- .level LEVEL
-
- .text
- .section .fixup, "ax"
-
- /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY_CFI(fixup_get_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_1)
-
-ENTRY_CFI(fixup_get_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_2)
-
- /* put_user() fixups, store -EFAULT in r8 */
-ENTRY_CFI(fixup_put_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_1)
-
-ENTRY_CFI(fixup_put_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_2)
-
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
*
*
* This program is free software; you can redistribute it and/or modify
.procend
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers. Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ * additional interlocks. Assumption is that those were only efficient on old
+ * machines (pre PA8000 processors)
+ */
+
+ dst = arg0
+ src = arg1
+ len = arg2
+ end = arg3
+ t1 = r19
+ t2 = r20
+ t3 = r21
+ t4 = r22
+ srcspc = sr1
+ dstspc = sr2
+
+ t0 = r1
+ a1 = t1
+ a2 = t2
+ a3 = t3
+ a0 = t4
+
+ save_src = ret0
+ save_dst = ret1
+ save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /* Last destination address */
+ add dst,len,end
+
+ /* short copy with less than 16 bytes? */
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+ /* same alignment? */
+ xor src,dst,t0
+ extru t0,31,2,t1
+ cmpib,<>,n 0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+ /* only do 64-bit copies if we can get aligned. */
+ extru t0,31,3,t1
+ cmpib,<>,n 0,t1,.Lalign_loop32
+
+ /* loop until we are 64-bit aligned */
+.Lalign_loop64:
+ extru dst,31,3,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_16_start
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop64
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_loop_16_start:
+ ldi 31,t0
+.Lcopy_loop_16:
+ cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10: ldd 0(srcspc,src),t1
+11: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+12: std,ma t1,8(dstspc,dst)
+13: std,ma t2,8(dstspc,dst)
+14: ldd 0(srcspc,src),t1
+15: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+16: std,ma t1,8(dstspc,dst)
+17: std,ma t2,8(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_16
+ ldo -32(len),len
+
+.Lword_loop:
+ cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20: ldw,ma 4(srcspc,src),t1
+21: stw,ma t1,4(dstspc,dst)
+ b .Lword_loop
+ ldo -4(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+ /* loop until we are 32-bit aligned */
+.Lalign_loop32:
+ extru dst,31,2,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_8
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop32
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_8:
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10: ldw 0(srcspc,src),t1
+11: ldw 4(srcspc,src),t2
+12: stw,ma t1,4(dstspc,dst)
+13: stw,ma t2,4(dstspc,dst)
+14: ldw 8(srcspc,src),t1
+15: ldw 12(srcspc,src),t2
+ ldo 16(src),src
+16: stw,ma t1,4(dstspc,dst)
+17: stw,ma t2,4(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_8
+ ldo -16(len),len
+
+.Lbyte_loop:
+ cmpclr,COND(<>) len,%r0,%r0
+ b,n .Lcopy_done
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lbyte_loop
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+ bv %r0(%r2)
+ sub end,dst,ret0
+
+
+ /* src and dst are not aligned the same way. */
+ /* need to go the hard way */
+.Lunaligned_copy:
+ /* align until dst is 32bit-word-aligned */
+ extru dst,31,2,t1
+ cmpib,=,n 0,t1,.Lcopy_dstaligned
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lunaligned_copy
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+ /* store src, dst and len in safe place */
+ copy src,save_src
+ copy dst,save_dst
+ copy len,save_len
+
+ /* len now needs give number of words to copy */
+ SHRREG len,2,len
+
+ /*
+ * Copy from a not-aligned src to an aligned dst using shifts.
+ * Handles 4 words per loop.
+ */
+
+ depw,z src,28,2,t0
+ subi 32,t0,t0
+ mtsar t0
+ extru len,31,2,t0
+ cmpib,= 2,t0,.Lcase2
+ /* Make src aligned by rounding it down. */
+ depi 0,31,2,src
+
+ cmpiclr,<> 3,t0,%r0
+ b,n .Lcase3
+ cmpiclr,<> 1,t0,%r0
+ b,n .Lcase1
+.Lcase0:
+ cmpb,COND(=) %r0,len,.Lcda_finish
+ nop
+
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b,n .Ldo3
+.Lcase1:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ ldo -1(len),len
+ cmpb,COND(=),n %r0,len,.Ldo0
+.Ldo4:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a3, a0, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a0, a1, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a1, a2, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+ ldo -4(len),len
+ cmpb,COND(<>) %r0,len,.Ldo4
+ nop
+.Ldo0:
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+ /* calculate new src, dst and len and jump to byte-copy loop */
+ sub dst,save_dst,t0
+ add save_src,t0,src
+ b .Lbyte_loop
+ sub save_len,t0,len
+
+.Lcase3:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo2
+ ldo 1(len),len
+.Lcase2:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo1
+ ldo 2(len),len
+
+
+ /* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+ b .Lcopy_done
+10: std,ma t1,8(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+ b .Lcopy_done
+10: stw,ma t1,4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+ .exit
+ENDPROC_CFI(pa_memcpy)
+ .procend
+
.end
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* Portions derived from the GNU C Library
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
*
- * Several strategies are tried to try to get the best performance for various
- * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
- * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
- * general registers. Unaligned copies are handled either by aligning the
- * destination and then using shift-and-write method, or in a few cases by
- * falling back to a byte-at-a-time copy.
- *
- * I chose to implement this in C because it is easier to maintain and debug,
- * and in my experiments it appears that the C code generated by gcc (3.3/3.4
- * at the time of writing) is fairly optimal. Unfortunately some of the
- * semantics of the copy routine (exception handling) is difficult to express
- * in C, so we have to play some tricks to get it to work.
- *
- * All the loads and stores are done via explicit asm() code in order to use
- * the right space registers.
- *
- * Testing with various alignments and buffer sizes shows that this code is
- * often >10x faster than a simple byte-at-a-time copy, even for strangely
- * aligned operands. It is interesting to note that the glibc version
- * of memcpy (written in C) is actually quite fast already. This routine is
- * able to beat it by 30-40% for aligned copies because of the loop unrolling,
- * but in some cases the glibc version is still slightly faster. This lends
- * more credibility that gcc can generate very good code as long as we are
- * careful.
- *
- * TODO:
- * - cache prefetching needs more experimentation to get optimal settings
- * - try not to use the post-increment address modifiers; they create additional
- * interlocks
- * - replace byte-copy loops with stybs sequences
*/
-#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
-#define s_space "%%sr1"
-#define d_space "%%sr2"
-#else
-#include "memcpy.h"
-#define s_space "%%sr0"
-#define d_space "%%sr0"
-#define pa_memcpy new2_copy
-#endif
DECLARE_PER_CPU(struct exception_data, exception_data);
-#define preserve_branch(label) do { \
- volatile int dummy = 0; \
- /* The following branch is never taken, it's just here to */ \
- /* prevent gcc from optimizing away our exception code. */ \
- if (unlikely(dummy != dummy)) \
- goto label; \
-} while (0)
-
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
#define get_kernel_space() (0)
-#define MERGE(w0, sh_1, w1, sh_2) ({ \
- unsigned int _r; \
- asm volatile ( \
- "mtsar %3\n" \
- "shrpw %1, %2, %%sar, %0\n" \
- : "=r"(_r) \
- : "r"(w0), "r"(w1), "r"(sh_2) \
- ); \
- _r; \
-})
-#define THRESHOLD 16
-
-#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
-#else
-#define DPRINTF(fmt, args...)
-#endif
-
-#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t), "+r"(_a) \
- : \
- : "r8")
-
-#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : "+r"(_a) \
- : _tt(_t) \
- : "r8")
-
-#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
-#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
-#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
-#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
-#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
-#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
-
-#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t) \
- : "r"(_a) \
- : "r8")
-
-#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : \
- : _tt(_t), "r"(_a) \
- : "r8")
-
-#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
-#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
-
-#ifdef CONFIG_PREFETCH
-static inline void prefetch_src(const void *addr)
-{
- __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
-}
-
-static inline void prefetch_dst(const void *addr)
-{
- __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
-}
-#else
-#define prefetch_src(addr) do { } while(0)
-#define prefetch_dst(addr) do { } while(0)
-#endif
-
-#define PA_MEMCPY_OK 0
-#define PA_MEMCPY_LOAD_ERROR 1
-#define PA_MEMCPY_STORE_ERROR 2
-
-/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
- * per loop. This code is derived from glibc.
- */
-static noinline unsigned long copy_dstaligned(unsigned long dst,
- unsigned long src, unsigned long len)
-{
- /* gcc complains that a2 and a3 may be uninitialized, but actually
- * they cannot be. Initialize a2/a3 to shut gcc up.
- */
- register unsigned int a0, a1, a2 = 0, a3 = 0;
- int sh_1, sh_2;
-
- /* prefetch_src((const void *)src); */
-
- /* Calculate how to shift a word read at the memory operation
- aligned srcp to make it aligned for copy. */
- sh_1 = 8 * (src % sizeof(unsigned int));
- sh_2 = 8 * sizeof(unsigned int) - sh_1;
-
- /* Make src aligned by rounding it down. */
- src &= -sizeof(unsigned int);
-
- switch (len % 4)
- {
- case 2:
- /* a1 = ((unsigned int *) src)[0];
- a2 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a1, cda_ldw_exc);
- ldw(s_space, 4, src, a2, cda_ldw_exc);
- src -= 1 * sizeof(unsigned int);
- dst -= 3 * sizeof(unsigned int);
- len += 2;
- goto do1;
- case 3:
- /* a0 = ((unsigned int *) src)[0];
- a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- src -= 0 * sizeof(unsigned int);
- dst -= 2 * sizeof(unsigned int);
- len += 1;
- goto do2;
- case 0:
- if (len == 0)
- return PA_MEMCPY_OK;
- /* a3 = ((unsigned int *) src)[0];
- a0 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a3, cda_ldw_exc);
- ldw(s_space, 4, src, a0, cda_ldw_exc);
- src -=-1 * sizeof(unsigned int);
- dst -= 1 * sizeof(unsigned int);
- len += 0;
- goto do3;
- case 1:
- /* a2 = ((unsigned int *) src)[0];
- a3 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a2, cda_ldw_exc);
- ldw(s_space, 4, src, a3, cda_ldw_exc);
- src -=-2 * sizeof(unsigned int);
- dst -= 0 * sizeof(unsigned int);
- len -= 1;
- if (len == 0)
- goto do0;
- goto do4; /* No-op. */
- }
-
- do
- {
- /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
-do4:
- /* a0 = ((unsigned int *) src)[0]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-do3:
- /* a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
- stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
-do2:
- /* a2 = ((unsigned int *) src)[2]; */
- ldw(s_space, 8, src, a2, cda_ldw_exc);
- /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
- stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
-do1:
- /* a3 = ((unsigned int *) src)[3]; */
- ldw(s_space, 12, src, a3, cda_ldw_exc);
- /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
- stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
-
- src += 4 * sizeof(unsigned int);
- dst += 4 * sizeof(unsigned int);
- len -= 4;
- }
- while (len != 0);
-
-do0:
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- return PA_MEMCPY_OK;
-
-handle_load_error:
- __asm__ __volatile__ ("cda_ldw_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("cda_stw_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
-/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
- * In case of an access fault the faulty address can be read from the per_cpu
- * exception data struct. */
-static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
- unsigned long len)
-{
- register unsigned long src, dst, t1, t2, t3;
- register unsigned char *pcs, *pcd;
- register unsigned int *pws, *pwd;
- register double *pds, *pdd;
- unsigned long ret;
-
- src = (unsigned long)srcp;
- dst = (unsigned long)dstp;
- pcs = (unsigned char *)srcp;
- pcd = (unsigned char *)dstp;
-
- /* prefetch_src((const void *)srcp); */
-
- if (len < THRESHOLD)
- goto byte_copy;
-
- /* Check alignment */
- t1 = (src ^ dst);
- if (unlikely(t1 & (sizeof(double)-1)))
- goto unaligned_copy;
-
- /* src and dst have same alignment. */
-
- /* Copy bytes till we are double-aligned. */
- t2 = src & (sizeof(double) - 1);
- if (unlikely(t2 != 0)) {
- t2 = sizeof(double) - t2;
- while (t2 && len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- len--;
- stbma(d_space, t3, pcd, pmc_store_exc);
- t2--;
- }
- }
-
- pds = (double *)pcs;
- pdd = (double *)pcd;
-
-#if 0
- /* Copy 8 doubles at a time */
- while (len >= 8*sizeof(double)) {
- register double r1, r2, r3, r4, r5, r6, r7, r8;
- /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
- flddma(s_space, pds, r1, pmc_load_exc);
- flddma(s_space, pds, r2, pmc_load_exc);
- flddma(s_space, pds, r3, pmc_load_exc);
- flddma(s_space, pds, r4, pmc_load_exc);
- fstdma(d_space, r1, pdd, pmc_store_exc);
- fstdma(d_space, r2, pdd, pmc_store_exc);
- fstdma(d_space, r3, pdd, pmc_store_exc);
- fstdma(d_space, r4, pdd, pmc_store_exc);
-
-#if 0
- if (L1_CACHE_BYTES <= 32)
- prefetch_src((char *)pds + L1_CACHE_BYTES);
-#endif
- flddma(s_space, pds, r5, pmc_load_exc);
- flddma(s_space, pds, r6, pmc_load_exc);
- flddma(s_space, pds, r7, pmc_load_exc);
- flddma(s_space, pds, r8, pmc_load_exc);
- fstdma(d_space, r5, pdd, pmc_store_exc);
- fstdma(d_space, r6, pdd, pmc_store_exc);
- fstdma(d_space, r7, pdd, pmc_store_exc);
- fstdma(d_space, r8, pdd, pmc_store_exc);
- len -= 8*sizeof(double);
- }
-#endif
-
- pws = (unsigned int *)pds;
- pwd = (unsigned int *)pdd;
-
-word_copy:
- while (len >= 8*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
- /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
-
- ldwma(s_space, pws, r5, pmc_load_exc);
- ldwma(s_space, pws, r6, pmc_load_exc);
- ldwma(s_space, pws, r7, pmc_load_exc);
- ldwma(s_space, pws, r8, pmc_load_exc);
- stwma(d_space, r5, pwd, pmc_store_exc);
- stwma(d_space, r6, pwd, pmc_store_exc);
- stwma(d_space, r7, pwd, pmc_store_exc);
- stwma(d_space, r8, pwd, pmc_store_exc);
- len -= 8*sizeof(unsigned int);
- }
-
- while (len >= 4*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4;
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
- len -= 4*sizeof(unsigned int);
- }
-
- pcs = (unsigned char *)pws;
- pcd = (unsigned char *)pwd;
-
-byte_copy:
- while (len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- }
-
- return PA_MEMCPY_OK;
-
-unaligned_copy:
- /* possibly we are aligned on a word, but not on a double... */
- if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
- t2 = src & (sizeof(unsigned int) - 1);
-
- if (unlikely(t2 != 0)) {
- t2 = sizeof(unsigned int) - t2;
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- }
-
- pws = (unsigned int *)pcs;
- pwd = (unsigned int *)pcd;
- goto word_copy;
- }
-
- /* Align the destination. */
- if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
- t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- dst = (unsigned long)pcd;
- src = (unsigned long)pcs;
- }
-
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
- if (ret)
- return ret;
-
- pcs += (len & -sizeof(unsigned int));
- pcd += (len & -sizeof(unsigned int));
- len %= sizeof(unsigned int);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- goto byte_copy;
-
-handle_load_error:
- __asm__ __volatile__ ("pmc_load_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("pmc_store_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
-{
- unsigned long ret, fault_addr, reference;
- struct exception_data *d;
-
- ret = pa_memcpy_internal(dstp, srcp, len);
- if (likely(ret == PA_MEMCPY_OK))
- return 0;
-
- /* if a load or store fault occured we can get the faulty addr */
- d = this_cpu_ptr(&exception_data);
- fault_addr = d->fault_addr;
-
- /* error in load or store? */
- if (ret == PA_MEMCPY_LOAD_ERROR)
- reference = (unsigned long) srcp;
- else
- reference = (unsigned long) dstp;
+extern unsigned long pa_memcpy(void *dst, const void *src,
+ unsigned long len);
- DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
- ret, len, fault_addr, reference);
-
- if (fault_addr >= reference)
- return len - (fault_addr - reference);
- else
- return len;
-}
-
-#ifdef __KERNEL__
unsigned long __copy_to_user(void __user *dst, const void *src,
unsigned long len)
{
return __probe_kernel_read(dst, src, size);
}
-
-#endif
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
+ /*
+ * Fix up get_user() and put_user().
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+ * bit in the relative address of the fixup routine to indicate
+ * that %r8 should be loaded with -EFAULT to report a userspace
+ * access error.
+ */
+ if (fix->fixup & 1) {
+ regs->gr[8] = -EFAULT;
+
+ /* zero target register for get_user() */
+ if (parisc_acctyp(0, regs->iir) == VM_READ) {
+ int treg = regs->iir & 0x1f;
+ regs->gr[treg] = 0;
+ }
+ }
+
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
regs->iaoq[0] &= ~3;
/*
}
if (len & ~VMX_ALIGN_MASK) {
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+ disable_kernel_altivec();
pagefault_enable();
+ preempt_enable();
}
tail = len & VMX_ALIGN_MASK;
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch (get_xop(instruction)) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
* flush all bytes from start through stop-1 inclusive
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL_TOC(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
*
* flush all bytes from start to stop-1 inclusive
*/
-_GLOBAL(flush_dcache_range)
+_GLOBAL_TOC(flush_dcache_range)
/*
* Flush the data cache to memory
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
+ /*
+ * Fixup HFSCR:TM based on CPU features. The bit is set by our
+ * early asm init because at that point we haven't updated our
+ * CPU features from firmware and device-tree. Here we have,
+ * so let's do it.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
/* start new resize */
resize = kzalloc(sizeof(*resize), GFP_KERNEL);
+ if (!resize) {
+ ret = -ENOMEM;
+ goto out;
+ }
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
+ unsigned int use_local;
+
+ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
} pte_iterate_hashed_end();
}
- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && local) {
+ if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif
+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
#include <asm-generic/sections.h>
extern char _eshared[], _ehead[];
-extern char __start_ro_after_init[], __end_ro_after_init[];
#endif
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "=Q" (*(to)) \
+ : "=d" (__rc), "+Q" (*(to)) \
: "d" (size), "Q" (*(from)), \
"d" (__reg0), "K" (-EFAULT) \
: "cc"); \
{
struct pcpu *pcpu = pcpu_devices;
+ WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->address = stap();
pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
- set_cpu_present(0, true);
- set_cpu_online(0, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_setup_processor_id(void)
{
+ pcpu_devices[0].address = stap();
S390_lowcore.cpu_nr = 0;
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
}
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
- __start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
- __end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
unsigned long z : 1; /* Zero Bit */
unsigned long i : 1; /* Page-Invalid Bit */
unsigned long p : 1; /* DAT-Protection Bit */
- unsigned long co : 1; /* Change-Recording Override */
- unsigned long : 8;
+ unsigned long : 9;
};
};
return PGM_PAGE_TRANSLATION;
if (pte.z)
return PGM_TRANSLATION_SPEC;
- if (pte.co && !edat1)
- return PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
raddr.pfra = pte.pfra;
real_address:
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
- if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
+ if (!rc && pte.z)
rc = PGM_TRANSLATION_SPEC;
shadow_page:
pte.p |= dat_protection;
#define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
+#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
-#define HUGE_MAX_HSTATE 3
+#define HUGE_MAX_HSTATE 4
#endif
#ifndef __ASSEMBLY__
return pte_pfn(pte);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline unsigned long pmd_dirty(pmd_t pmd)
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_dirty(pte);
+ return pte_write(pte);
}
-static inline unsigned long pmd_young(pmd_t pmd)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long pmd_dirty(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_young(pte);
+ return pte_dirty(pte);
}
-static inline unsigned long pmd_write(pmd_t pmd)
+static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_write(pte);
+ return pte_young(pte);
}
static inline unsigned long pmd_trans_huge(pmd_t pmd)
#include <asm/signal.h>
#include <asm/page.h>
-/*
- * The sparc has no problems with write protection
- */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
* That one page is used to protect kernel from intruders, so that
* we can make our access_ok test faster
#include <asm/ptrace.h>
#include <asm/page.h>
-/* The sparc has no problems with write protection */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
/*
* User lives in his very own context, and cannot reference us. Note
* that TASK_SIZE is a misnomer, it really gives maximum user virtual
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
+ nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
nop
ba,a,pt %xcc, 80f
+ nop
niagara4_patch:
call niagara4_patch_copyops
nop
nop
ba,a,pt %xcc, 80f
+ nop
niagara2_patch:
call niagara2_patch_copyops
nop
ba,a,pt %xcc, 80f
+ nop
niagara_patch:
call niagara_patch_copyops
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
+ nop
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
}
if (!ret) {
- unsigned long y;
+ unsigned long y = regs->y;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
+ nop
user_rtt_fill_fixup_dax:
ba,pt %xcc, user_rtt_fill_fixup_common
rd %pc, %g7
ba,a,pt %xcc, 2f
+ nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
+ nop
/* Privileged Action. */
sun4v_privact:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
1: call spitfire_data_access_exception
nop
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
blu 170f
nop
ba,a,pt %xcc, 180f
+ nop
4: /* 32 <= low bits < 48 */
blu 150f
nop
ba,a,pt %xcc, 160f
+ nop
5: /* 0 < low bits < 32 */
blu,a 6f
cmp %g2, 8
blu 130f
nop
ba,a,pt %xcc, 140f
+ nop
6: /* 0 < low bits < 16 */
bgeu 120f
nop
brz,pt %o2, 85f
sub %o0, %o1, GLOBAL_SPARE
ba,a,pt %XCC, 90f
+ nop
.align 64
75: /* 16 < len <= 64 */
bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
+ nop
.size FUNC_NAME, .-FUNC_NAME
bne,pt %icc, 1b
add %o0, 0x30, %o0
ba,a,pt %icc, .Lpostloop
+ nop
.size NG4bzero,.-NG4bzero
brz,pt %i2, 85f
sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
+ nop
.align 64
70: /* 16 < len <= 64 */
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) {
+ case HPAGE_2GB_SHIFT:
+ hugepage_size = _PAGE_SZ2GB_4V;
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
case HPAGE_256MB_SHIFT:
hugepage_size = _PAGE_SZ256MB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
unsigned int shift;
switch (tte_szbits) {
+ case _PAGE_SZ2GB_4V:
+ shift = HPAGE_2GB_SHIFT;
+ break;
case _PAGE_SZ256MB_4V:
shift = HPAGE_256MB_SHIFT;
break;
if (!pmd)
return NULL;
- if (sz == PMD_SHIFT)
+ if (sz >= PMD_SIZE)
pte = (pte_t *)pmd;
else
pte = pte_alloc_map(mm, pmd, addr);
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
+ case HPAGE_2GB_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_2GB;
+ hv_pgsz_idx = HV_PGSZ_IDX_2GB;
+ break;
case HPAGE_256MB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_256MB;
hv_pgsz_idx = HV_PGSZ_IDX_256MB;
if ((long)addr < 0L) {
unsigned long pa = __pa(addr);
- if ((addr >> max_phys_bits) != 0UL)
+ if ((pa >> max_phys_bits) != 0UL)
return false;
return pfn_valid(pa >> PAGE_SHIFT);
enum mbus_module srmmu_modtype;
static unsigned int hwbug_bitmask;
int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
int vac_line_size;
extern struct resource sparc_iomap;
if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte);
- tlb_batch_add_one(mm, vaddr, exec, false);
+ tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
}
pte++;
vaddr += PAGE_SIZE;
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
- tlb_batch_add_one(mm, addr, exec, true);
+ tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
- true);
+ REAL_HPAGE_SHIFT);
} else {
tlb_batch_pmd_scan(mm, addr, orig);
}
spin_lock_irqsave(&mm->context.lock, flags);
- if (tb->hugepage_shift < HPAGE_SHIFT) {
+ if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
spin_lock_irqsave(&mm->context.lock, flags);
- if (hugepage_shift < HPAGE_SHIFT) {
+ if (hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
# -funit-at-a-time shrinks the kernel .text considerably
# unfortunately it makes reading oopses harder.
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
-
- # this works around some issues with generating unwind tables in older gccs
- # newer gccs do it by default
- KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
endif
ifdef CONFIG_X86_X32
KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
endif
+#
+# If the function graph tracer is used with mcount instead of fentry,
+# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
+#
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ifndef CONFIG_HAVE_FENTRY
+ ACCUMULATE_OUTGOING_ARGS := 1
+ else
+ ifeq ($(call cc-option-yn, -mfentry), n)
+ ACCUMULATE_OUTGOING_ARGS := 1
+ endif
+ endif
+endif
+
+#
+# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
+# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
+# to test for this bug at compile-time because the test case needs to execute,
+# which is a no-go for cross compilers. So check the GCC version instead.
+#
+ifdef CONFIG_JUMP_LABEL
+ ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
+ ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
+ endif
+endif
+
+ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
+ KBUILD_CFLAGS += -maccumulate-outgoing-args
+endif
+
# Stackpointer is addressed different for 32 bit and 64 bit x86
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
# cpu entries
cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
-# Work around the pentium-mmx code generator madness of gcc4.4.x which
-# does stack alignment by generating horrible code _before_ the mcount
-# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
-# tracer assumptions. For i686, generic, core2 this is set by the
-# compiler anyway
-ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
-ADD_ACCUMULATE_OUTGOING_ARGS := y
-endif
-
-# Work around to a bug with asm goto with first implementations of it
-# in gcc causing gcc to mess up the push and pop of the stack in some
-# uses of asm goto.
-ifeq ($(CONFIG_JUMP_LABEL), y)
-ADD_ACCUMULATE_OUTGOING_ARGS := y
-endif
-
-cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
-
# Bug fix for binutils: this option is required in order to keep
# binutils from generating NOPL instructions against our will.
ifneq ($(CONFIG_X86_P6_NOP),y)
* memcpy() and memmove() are defined for the compressed boot environment.
*/
#include "misc.h"
+#include "error.h"
void warn(char *m)
{
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
- if (vdso32_enabled > 1)
+ if (vdso32_enabled > 1) {
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+ vdso32_enabled = 0;
+ }
return 1;
}
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
+static const int zero;
+static const int one = 1;
+
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (int *)&zero,
+ .extra2 = (int *)&one,
},
{}
};
struct perf_event_mmap_page *userpg, u64 now)
{
struct cyc2ns_data *data;
+ u64 offset;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
!!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!sched_clock_stable())
+ if (!using_native_sched_clock() || !sched_clock_stable())
return;
data = cyc2ns_read_begin();
+ offset = data->cyc2ns_offset + __sched_clock_offset;
+
/*
* Internal timekeeping for enabled/running/stopped times
* is always in the local_clock domain.
userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift;
- userpg->time_offset = data->cyc2ns_offset - now;
+ userpg->time_offset = offset - now;
/*
* cap_user_time_zero doesn't make sense when we're using a different
*/
if (!event->attr.use_clockid) {
userpg->cap_user_time_zero = 1;
- userpg->time_zero = data->cyc2ns_offset;
+ userpg->time_zero = offset;
}
cyc2ns_read_end(data);
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
+ cpuc->lbr_entries[i].in_tx = 0;
+ cpuc->lbr_entries[i].abort = 0;
+ cpuc->lbr_entries[i].cycles = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
#define ARCH_DLINFO_IA32 \
do { \
- if (vdso32_enabled) { \
+ if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
};
void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
- if (__iter_needs_pmem_wb(i))
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ arch_wb_cache_pmem(addr, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
arch_wb_cache_pmem(addr, bytes);
return len;
extern int no_timer_check;
+extern bool using_native_sched_clock(void);
+
/*
* We use the full linear equation: f(x) = a + b*x, in order to allow
* a continuous function in the face of dynamic freq changes.
if (paddr < uv_hub_info->lowmem_remap_top)
paddr |= uv_hub_info->lowmem_remap_base;
- paddr |= uv_hub_info->gnode_upper;
- if (m_val)
+
+ if (m_val) {
+ paddr |= uv_hub_info->gnode_upper;
paddr = ((paddr << uv_hub_info->m_shift)
>> uv_hub_info->m_shift) |
((paddr >> uv_hub_info->m_val)
<< uv_hub_info->n_lshift);
- else
+ } else {
paddr |= uv_soc_phys_ram_to_nasid(paddr)
<< uv_hub_info->gpa_shift;
+ }
return paddr;
}
node_id.v = uv_read_local_mmr(UVH_NODE_ID);
uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
- hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val;
+ if (mn.m_val)
+ hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
if (uv_gp_table) {
hi->global_mmr_base = uv_gp_table->mmr_base;
}
out:
- rdtgroup_kn_unlock(of->kn);
for_each_enabled_rdt_resource(r) {
kfree(r->tmp_cbms);
r->tmp_cbms = NULL;
}
+ rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
static DEFINE_MUTEX(mce_chrdev_read_mutex);
+static int mce_chrdev_open_count; /* #times opened */
+
#define mce_log_get_idx_check(p) \
({ \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
if (atomic_read(&num_notifiers) > 2)
return NOTIFY_DONE;
+ /* Don't print when mcelog is running */
+ if (mce_chrdev_open_count > 0)
+ return NOTIFY_DONE;
+
__print_mce(m);
return NOTIFY_DONE;
*/
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count; /* #times opened */
static int mce_chrdev_open_exclu; /* already open exclusive? */
static int mce_chrdev_open(struct inode *inode, struct file *file)
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
#include <asm/ftrace.h>
#include <asm/nops.h>
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
+ !defined(CC_USING_FENTRY) && \
+ !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
+# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_arch_code_modify_prepare(void)
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
me->comm, me->pid, where, frame,
regs->ip, regs->sp, regs->orig_ax);
- print_vma_addr(" in ", regs->ip);
+ print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
if (from->si_signo == SIGSEGV) {
if (from->si_code == SEGV_BNDERR) {
- compat_uptr_t lower = (unsigned long)&to->si_lower;
- compat_uptr_t upper = (unsigned long)&to->si_upper;
+ compat_uptr_t lower = (unsigned long)from->si_lower;
+ compat_uptr_t upper = (unsigned long)from->si_upper;
put_user_ex(lower, &to->si_lower);
put_user_ex(upper, &to->si_upper);
}
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
tsk->comm, tsk->pid, str,
regs->ip, regs->sp, error_code);
- print_vma_addr(" in ", regs->ip);
+ print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, error_code);
- print_vma_addr(" in ", regs->ip);
+ print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
return paravirt_sched_clock();
}
-static inline bool using_native_sched_clock(void)
+bool using_native_sched_clock(void)
{
return pv_time_ops.sched_clock == native_sched_clock;
}
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
-static inline bool using_native_sched_clock(void) { return true; }
+bool using_native_sched_clock(void) { return true; }
#endif
int check_tsc_unstable(void)
{
struct kvm_pic *vpic = kvm->arch.vpic;
+ if (!vpic)
+ return;
+
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ if (!ioapic)
+ return;
+
cancel_delayed_work_sync(&ioapic->eoi_inject);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL;
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
}
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+ cleanup_srcu_struct(&head->track_srcu);
+}
+
void kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
unsigned long flags;
struct kvm_arch *vm_data = &kvm->arch;
+ if (!avic)
+ return;
+
avic_free_vm_id(vm_data->avic_vm_id);
if (vm_data->avic_logical_id_table_page)
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}
+static inline bool cpu_has_vmx_invvpid(void)
+{
+ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
* though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse.
*/
- if (enable_vpid)
+ if (enable_vpid) {
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_VPID;
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SUPPORTED_MASK;
- else
+ } else
vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
}
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+ if (enable_ept)
+ vmx_flush_tlb(vcpu);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
- if (!cpu_has_vmx_vpid())
+ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
+
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PREEMPTION_TIMER:
return false;
+ case EXIT_REASON_PML_FULL:
+ /* We don't expose PML support to L1. */
+ return false;
default:
return true;
}
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
} else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ vmx_flush_tlb_ept_only(vcpu);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
*/
if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa);
+ vmx_flush_tlb_ept_only(vcpu);
+ }
}
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
- bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs12->guest_intr_status);
}
- nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
-
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or
}
+ if (enable_pml) {
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit, this happens
+ * to be equivalent to simply resetting the fields in vmcs02.
+ */
+ ASSERT(vmx->pml_pg);
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
+ } else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/*
vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */
- if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
return 1;
- kvm_mmu_reset_context(vcpu);
-
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
kvm_iommu_unmap_guest(kvm);
- kfree(kvm->arch.vpic);
- kfree(kvm->arch.vioapic);
+ kvm_pic_destroy(kvm);
+ kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm_mmu_uninit_vm(kvm);
+ kvm_page_track_cleanup(kvm);
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
{
struct x86_exception fault;
- trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
- _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr < 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
+ return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
#if defined(CONFIG_X86_ESPFIX64)
static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
#elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_START;
+static const unsigned long vaddr_end = EFI_VA_END;
#else
static const unsigned long vaddr_end = __START_KERNEL_map;
#endif
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
- vaddr_end >= EFI_VA_START);
+ vaddr_end >= EFI_VA_END);
BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
return;
}
+ /* No need to reserve regions that will never be freed. */
+ if (md.attribute & EFI_MEMORY_RUNTIME)
+ return;
+
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
+KASAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+ return off + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
#define __pa(x) \
((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
#define __va(x) \
((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
#define pfn_valid(pfn) \
#define __NR_pkey_free 350
__SYSCALL(350, sys_pkey_free, 1)
-#define __NR_syscall_count 351
+#define __NR_statx 351
+__SYSCALL(351, sys_statx, 5)
+
+#define __NR_syscall_count 352
/*
* sysxtensa syscall handler
static int show_trace_cb(struct stackframe *frame, void *data)
{
- if (kernel_text_address(frame->pc)) {
- pr_cont(" [<%08lx>]", frame->pc);
- print_symbol(" %s\n", frame->pc);
- }
+ if (kernel_text_address(frame->pc))
+ pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
return 0;
}
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
- struct elevator_queue *e = hctx->queue->elevator;
+ struct request_queue *q = hctx->queue;
+ struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
bool did_work = false;
LIST_HEAD(rq_list);
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
+ did_work = blk_mq_dispatch_rq_list(q, &rq_list);
} else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(hctx, &rq_list);
+ blk_mq_dispatch_rq_list(q, &rq_list);
}
/*
if (!rq)
break;
list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(hctx, &rq_list));
+ } while (blk_mq_dispatch_rq_list(q, &rq_list));
}
}
return true;
}
-static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (blk_mq_hctx_has_pending(hctx))
+ if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
+ return true;
+ }
}
+ return false;
}
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
-{
- struct request_queue *q = hctx->queue;
- unsigned int i;
+/**
+ * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
+ * @pos: loop cursor.
+ * @skip: the list element that will not be examined. Iteration starts at
+ * @skip->next.
+ * @head: head of the list to examine. This list must have at least one
+ * element, namely @skip.
+ * @member: name of the list_head structure within typeof(*pos).
+ */
+#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
+ for ((pos) = (skip); \
+ (pos = (pos)->member.next != (head) ? list_entry_rcu( \
+ (pos)->member.next, typeof(*pos), member) : \
+ list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
+ (pos) != (skip); )
- if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
- if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_restart_hctx(hctx);
+/*
+ * Called after a driver tag has been freed to check whether a hctx needs to
+ * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
+ * queues in a round-robin fashion if the tag set of @hctx is shared with other
+ * hardware queues.
+ */
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
+{
+ struct blk_mq_tags *const tags = hctx->tags;
+ struct blk_mq_tag_set *const set = hctx->queue->tag_set;
+ struct request_queue *const queue = hctx->queue, *q;
+ struct blk_mq_hw_ctx *hctx2;
+ unsigned int i, j;
+
+ if (set->flags & BLK_MQ_F_TAG_SHARED) {
+ rcu_read_lock();
+ list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
+ tag_set_list) {
+ queue_for_each_hw_ctx(q, hctx2, i)
+ if (hctx2->tags == tags &&
+ blk_mq_sched_restart_hctx(hctx2))
+ goto done;
+ }
+ j = hctx->queue_num + 1;
+ for (i = 0; i < queue->nr_hw_queues; i++, j++) {
+ if (j == queue->nr_hw_queues)
+ j = 0;
+ hctx2 = queue->queue_hw_ctx[j];
+ if (hctx2->tags == tags &&
+ blk_mq_sched_restart_hctx(hctx2))
+ break;
}
+done:
+ rcu_read_unlock();
} else {
blk_mq_sched_restart_hctx(hctx);
}
}
}
-int blk_mq_sched_setup(struct request_queue *q)
+static int blk_mq_sched_alloc_tags(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+ int ret;
+
+ hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
+ set->reserved_tags);
+ if (!hctx->sched_tags)
+ return -ENOMEM;
+
+ ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
+ if (ret)
+ blk_mq_sched_free_tags(set, hctx, hctx_idx);
+
+ return ret;
+}
+
+static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
- int ret, i;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_sched_free_tags(set, hctx, i);
+}
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return 0;
+
+ return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
+}
+
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!e)
+ return;
+
+ blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
+}
+
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret;
+
+ if (!e) {
+ q->elevator = NULL;
+ return 0;
+ }
/*
* Default to 256, since we don't split into sync/async like the
*/
q->nr_requests = 2 * BLKDEV_MAX_RQ;
- /*
- * We're switching to using an IO scheduler, so setup the hctx
- * scheduler tags and switch the request map from the regular
- * tags to scheduler tags. First allocate what we need, so we
- * can safely fail and fallback, if needed.
- */
- ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
- q->nr_requests, set->reserved_tags);
- if (!hctx->sched_tags) {
- ret = -ENOMEM;
- break;
- }
- ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
+ ret = blk_mq_sched_alloc_tags(q, hctx, i);
if (ret)
- break;
+ goto err;
}
- /*
- * If we failed, free what we did allocate
- */
- if (ret) {
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->sched_tags)
- continue;
- blk_mq_sched_free_tags(set, hctx, i);
- }
-
- return ret;
- }
+ ret = e->ops.mq.init_sched(q, e);
+ if (ret)
+ goto err;
return 0;
+
+err:
+ blk_mq_sched_tags_teardown(q);
+ q->elevator = NULL;
+ return ret;
}
-void blk_mq_sched_teardown(struct request_queue *q)
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
- struct blk_mq_tag_set *set = q->tag_set;
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ if (e->type->ops.mq.exit_sched)
+ e->type->ops.mq.exit_sched(e);
+ blk_mq_sched_tags_teardown(q);
+ q->elevator = NULL;
}
int blk_mq_sched_init(struct request_queue *q)
struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block);
struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
-int blk_mq_sched_setup(struct request_queue *q);
-void blk_mq_sched_teardown(struct request_queue *q);
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
int blk_mq_sched_init(struct request_queue *q);
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
-/*
- * Mark a hardware queue and the request queue it belongs to as needing a
- * restart.
- */
-static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
-{
- struct request_queue *q = hctx->queue;
-
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
- set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
-}
-
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
- blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
blk_mq_sched_completed_request(hctx, rq);
- blk_mq_sched_restart_queues(hctx);
+ blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
};
- if (rq->tag != -1) {
-done:
- if (hctx)
- *hctx = data.hctx;
- return true;
- }
+ if (rq->tag != -1)
+ goto done;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
atomic_inc(&data.hctx->nr_active);
}
data.hctx->tags->rqs[rq->tag] = rq;
- goto done;
}
- return false;
+done:
+ if (hctx)
+ *hctx = data.hctx;
+ return rq->tag != -1;
}
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
return true;
}
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
- struct request_queue *q = hctx->queue;
+ struct blk_mq_hw_ctx *hctx;
struct request *rq;
LIST_HEAD(driver_list);
struct list_head *dptr;
- int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+ int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+
+ if (list_empty(list))
+ return false;
/*
* Start off with dptr being NULL, so we start the first request
/*
* Now process all the entries, sending them to the driver.
*/
- queued = 0;
- while (!list_empty(list)) {
+ errors = queued = 0;
+ do {
struct blk_mq_queue_data bd;
rq = list_first_entry(list, struct request, queuelist);
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
case BLK_MQ_RQ_QUEUE_ERROR:
+ errors++;
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
break;
*/
if (!dptr && list->next != list->prev)
dptr = &driver_list;
- }
+ } while (!list_empty(list));
hctx->dispatched[queued_to_index(queued)]++;
blk_mq_run_hw_queue(hctx, true);
}
- return queued != 0;
+ return (queued + errors) != 0;
}
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
return hctx->next_cpu;
}
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+ unsigned long msecs)
{
if (unlikely(blk_mq_hctx_stopped(hctx) ||
!blk_mq_hw_queue_mapped(hctx)))
put_cpu();
}
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+ if (msecs == 0)
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work);
+ else
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->delayed_run_work,
+ msecs_to_jiffies(msecs));
+}
+
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+ __blk_mq_delay_run_hw_queue(hctx, true, msecs);
+}
+EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ __blk_mq_delay_run_hw_queue(hctx, async, 0);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
__blk_mq_run_hw_queue(hctx);
}
+static void blk_mq_delayed_run_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
+
+ __blk_mq_run_hw_queue(hctx);
+}
+
static void blk_mq_delay_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
node = hctx->numa_node = set->numa_node;
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
+ if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+ goto exit_hctx;
+
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto exit_hctx;
+ goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
free_fq:
kfree(hctx->fq);
+ sched_exit_hctx:
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- blk_mq_sched_teardown(q);
-
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
return 0;
}
+static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+{
+ if (set->ops->map_queues)
+ return set->ops->map_queues(set);
+ else
+ return blk_mq_map_queues(set);
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
if (!set->mq_map)
goto out_free_tags;
- if (set->ops->map_queues)
- ret = set->ops->map_queues(set);
- else
- ret = blk_mq_map_queues(set);
+ ret = blk_mq_update_queue_map(set);
if (ret)
goto out_free_mq_map;
blk_mq_freeze_queue(q);
set->nr_hw_queues = nr_hw_queues;
+ blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
+bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
if (q->elevator) {
ioc_clear_queue(q);
- elevator_exit(q->elevator);
+ elevator_exit(q, q->elevator);
}
blk_exit_rl(&q->root_rl);
}
}
- if (e->uses_mq) {
- err = blk_mq_sched_setup(q);
- if (!err)
- err = e->ops.mq.init_sched(q, e);
- } else
+ if (e->uses_mq)
+ err = blk_mq_init_sched(q, e);
+ else
err = e->ops.sq.elevator_init_fn(q, e);
- if (err) {
- if (e->uses_mq)
- blk_mq_sched_teardown(q);
+ if (err)
elevator_put(e);
- }
return err;
}
EXPORT_SYMBOL(elevator_init);
-void elevator_exit(struct elevator_queue *e)
+void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->uses_mq && e->type->ops.mq.exit_sched)
- e->type->ops.mq.exit_sched(e);
+ blk_mq_exit_sched(q, e);
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
}
EXPORT_SYMBOL_GPL(elv_unregister);
+static int elevator_switch_mq(struct request_queue *q,
+ struct elevator_type *new_e)
+{
+ int ret;
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ if (q->elevator) {
+ if (q->elevator->registered)
+ elv_unregister_queue(q);
+ ioc_clear_queue(q);
+ elevator_exit(q, q->elevator);
+ }
+
+ ret = blk_mq_init_sched(q, new_e);
+ if (ret)
+ goto out;
+
+ if (new_e) {
+ ret = elv_register_queue(q);
+ if (ret) {
+ elevator_exit(q, q->elevator);
+ goto out;
+ }
+ }
+
+ if (new_e)
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+ else
+ blk_add_trace_msg(q, "elv switch: none");
+
+out:
+ blk_mq_unfreeze_queue(q);
+ blk_mq_start_stopped_hw_queues(q, true);
+ return ret;
+
+}
+
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we
bool old_registered = false;
int err;
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- }
+ if (q->mq_ops)
+ return elevator_switch_mq(q, new_e);
/*
* Turn on BYPASS and drain all requests w/ elevator private data.
if (old) {
old_registered = old->registered;
- if (old->uses_mq)
- blk_mq_sched_teardown(q);
-
- if (!q->mq_ops)
- blk_queue_bypass_start(q);
+ blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */
if (old_registered)
}
/* allocate, init and register new elevator */
- if (new_e) {
- if (new_e->uses_mq) {
- err = blk_mq_sched_setup(q);
- if (!err)
- err = new_e->ops.mq.init_sched(q, new_e);
- } else
- err = new_e->ops.sq.elevator_init_fn(q, new_e);
- if (err)
- goto fail_init;
+ err = new_e->ops.sq.elevator_init_fn(q, new_e);
+ if (err)
+ goto fail_init;
- err = elv_register_queue(q);
- if (err)
- goto fail_register;
- } else
- q->elevator = NULL;
+ err = elv_register_queue(q);
+ if (err)
+ goto fail_register;
/* done, kill the old one and finish */
if (old) {
- elevator_exit(old);
- if (!q->mq_ops)
- blk_queue_bypass_end(q);
+ elevator_exit(q, old);
+ blk_queue_bypass_end(q);
}
- if (q->mq_ops) {
- blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
- }
-
- if (new_e)
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
- else
- blk_add_trace_msg(q, "elv switch: none");
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- if (q->mq_ops)
- blk_mq_sched_teardown(q);
- elevator_exit(q->elevator);
+ elevator_exit(q, q->elevator);
fail_init:
/* switch failed, restore and re-register old elevator */
if (old) {
q->elevator = old;
elv_register_queue(q);
- if (!q->mq_ops)
- blk_queue_bypass_end(q);
- }
- if (q->mq_ops) {
- blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
+ blk_queue_bypass_end(q);
}
return err;
subreq->cryptlen = LRW_BUFFER_SIZE;
if (req->cryptlen > LRW_BUFFER_SIZE) {
- subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
- rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+ rctx->ext = kmalloc(n, gfp);
+ if (rctx->ext)
+ subreq->cryptlen = n;
}
rctx->src = req->src;
subreq->cryptlen = XTS_BUFFER_SIZE;
if (req->cryptlen > XTS_BUFFER_SIZE) {
- subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
- rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+ rctx->ext = kmalloc(n, gfp);
+ if (rctx->ext)
+ subreq->cryptlen = n;
}
rctx->src = req->src;
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
ACPI_MODULE_NAME("platform");
static const struct acpi_device_id forbidden_id_list[] = {
- {"PNP0000", 0}, /* PIC */
- {"PNP0100", 0}, /* Timer */
- {"PNP0200", 0}, /* AT DMA Controller */
+ {"PNP0000", 0}, /* PIC */
+ {"PNP0100", 0}, /* Timer */
+ {"PNP0200", 0}, /* AT DMA Controller */
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"", 0},
};
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
- /*
- * The absolute minimum resource template is one end_tag descriptor.
- * However, we will treat a lone end_tag as just a simple buffer.
- */
+ /* The absolute minimum resource template is one end_tag descriptor */
+
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Invoke the user function */
if (user_function) {
- status = user_function(aml, length, offset,
- resource_index, context);
+ status =
+ user_function(aml, length, offset, resource_index,
+ context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
*context = aml;
}
- /* Check if buffer is defined to be longer than the resource length */
-
- if (aml_length > (offset + length)) {
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
- }
-
/* Normal exit */
return_ACPI_STATUS(AE_OK);
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
+ synchronize_rcu();
break;
case ACPI_HEST_NOTIFY_NMI:
ghes_nmi_remove(ghes);
return -ENODEV;
/*
- * If the device has a _HID (or _CID) returning a valid ACPI/PNP
- * device ID, it is better to make it look less attractive here, so that
- * the other device with the same _ADR value (that may not have a valid
- * device ID) can be matched going forward. [This means a second spec
- * violation in a row, so whatever we do here is best effort anyway.]
+ * If the device has a _HID returning a valid ACPI/PNP device ID, it is
+ * better to make it look less attractive here, so that the other device
+ * with the same _ADR value (that may not have a valid device ID) can be
+ * matched going forward. [This means a second spec violation in a row,
+ * so whatever we do here is best effort anyway.]
*/
- return sta_present && list_empty(&adev->pnp.ids) ?
+ return sta_present && !adev->pnp.type.platform_id ?
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct resource *res = data;
struct resource_win win;
+ /*
+ * We might assign this to 'res' later, make sure all pointers are
+ * cleared before the resource is added to the global list
+ */
+ memset(&win, 0, sizeof(win));
+
res->flags = 0;
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
return AE_OK;
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
- return map0->region_offset - map1->region_offset;
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
}
/* Retrieve the nth entry referencing this spa */
return;
device->flags.match_driver = true;
- if (!ret) {
- ret = device_attach(&device->dev);
- if (ret < 0)
- return;
-
- if (!ret && device->pnp.type.platform_id)
- acpi_default_enumeration(device);
+ if (ret > 0) {
+ acpi_device_set_enumerated(device);
+ goto ok;
}
+ ret = device_attach(&device->dev);
+ if (ret < 0)
+ return;
+
+ if (ret > 0 || !device->pnp.type.platform_id)
+ acpi_device_set_enumerated(device);
+ else
+ acpi_default_enumeration(device);
+
ok:
list_for_each_entry(child, &device->children, node)
acpi_bus_attach(child);
};
const struct ata_port_info *ppi[] = { &info, &info };
- /* SB600/700 don't have secondary port wired */
- if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
- (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
- ppi[1] = &ata_dummy_port_info;
-
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
- /* enable IRQ on hotplug */
- pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
- if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
- dev_dbg(&pdev->dev,
- "enabling SATA hotplug (0x%x)\n",
- (int) tmp8);
- tmp8 |= SATA_HOTPLUG;
- pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ if (board_id == vt6421) {
+ /* enable IRQ on hotplug */
+ pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+ if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+ dev_dbg(&pdev->dev,
+ "enabling SATA hotplug (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= SATA_HOTPLUG;
+ pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ }
}
/*
struct nbd_sock {
struct socket *sock;
struct mutex tx_lock;
+ struct request *pending;
+ int sent;
};
#define NBD_TIMEDOUT 0
static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
{
- bd_set_size(bdev, 0);
+ if (bdev->bd_openers <= 1)
+ bd_set_size(bdev, 0);
set_capacity(nbd->disk, 0);
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
- req->errors++;
+ req->errors = -EIO;
mutex_lock(&nbd->config_lock);
sock_shutdown(nbd);
* Send or receive packet.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
- struct iov_iter *iter, int msg_flags)
+ struct iov_iter *iter, int msg_flags, int *sent)
{
struct socket *sock = nbd->socks[index]->sock;
int result;
result = -EPIPE; /* short read */
break;
}
+ if (sent)
+ *sent += result;
} while (msg_data_left(&msg));
tsk_restore_flags(current, pflags, PF_MEMALLOC);
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct nbd_sock *nsock = nbd->socks[index];
int result;
struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
struct bio *bio;
u32 type;
u32 tag = blk_mq_unique_tag(req);
+ int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
return -EIO;
}
+ /* We did a partial send previously, and we at least sent the whole
+ * request struct, so just go and send the rest of the pages in the
+ * request.
+ */
+ if (sent) {
+ if (sent >= sizeof(request)) {
+ skip = sent - sizeof(request);
+ goto send_pages;
+ }
+ iov_iter_advance(&from, sent);
+ }
request.type = htonl(type);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
- (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
+ (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
if (result <= 0) {
+ if (result == -ERESTARTSYS) {
+ /* If we havne't sent anything we can just return BUSY,
+ * however if we have sent something we need to make
+ * sure we only allow this req to be sent until we are
+ * completely done.
+ */
+ if (sent) {
+ nsock->pending = req;
+ nsock->sent = sent;
+ }
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EIO;
}
-
+send_pages:
if (type != NBD_CMD_WRITE)
- return 0;
+ goto out;
bio = req->bio;
while (bio) {
cmd, bvec.bv_len);
iov_iter_bvec(&from, ITER_BVEC | WRITE,
&bvec, 1, bvec.bv_len);
- result = sock_xmit(nbd, index, 1, &from, flags);
+ if (skip) {
+ if (skip >= iov_iter_count(&from)) {
+ skip -= iov_iter_count(&from);
+ continue;
+ }
+ iov_iter_advance(&from, skip);
+ skip = 0;
+ }
+ result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result <= 0) {
+ if (result == -ERESTARTSYS) {
+ /* We've already sent the header, we
+ * have no choice but to set pending and
+ * return BUSY.
+ */
+ nsock->pending = req;
+ nsock->sent = sent;
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
}
bio = next;
}
+out:
+ nsock->pending = NULL;
+ nsock->sent = 0;
return 0;
}
reply.magic = 0;
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
- result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
!test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
- req->errors++;
+ req->errors = -EIO;
return cmd;
}
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, ITER_BVEC | READ,
&bvec, 1, bvec.bv_len);
- result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
- req->errors++;
+ req->errors = -EIO;
return cmd;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
if (!blk_mq_request_started(req))
return;
cmd = blk_mq_rq_to_pdu(req);
- req->errors++;
+ req->errors = -EIO;
nbd_end_request(cmd);
}
}
-static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
struct nbd_sock *nsock;
+ int ret;
if (index >= nbd->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
- goto error_out;
+ return -EINVAL;
}
if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
- goto error_out;
+ return -EINVAL;
}
req->errors = 0;
mutex_unlock(&nsock->tx_lock);
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
- goto error_out;
+ return -EINVAL;
}
- if (nbd_send_cmd(nbd, cmd, index) != 0) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed\n");
- req->errors++;
- nbd_end_request(cmd);
+ /* Handle the case that we have a pending request that was partially
+ * transmitted that _has_ to be serviced first. We need to call requeue
+ * here so that it gets put _after_ the request that is already on the
+ * dispatch list.
+ */
+ if (unlikely(nsock->pending && nsock->pending != req)) {
+ blk_mq_requeue_request(req, true);
+ ret = 0;
+ goto out;
}
-
+ ret = nbd_send_cmd(nbd, cmd, index);
+out:
mutex_unlock(&nsock->tx_lock);
-
- return;
-
-error_out:
- req->errors++;
- nbd_end_request(cmd);
+ return ret;
}
static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ int ret;
/*
* Since we look at the bio's to send the request over the network we
*/
init_completion(&cmd->send_complete);
blk_mq_start_request(bd->rq);
- nbd_handle_cmd(cmd, hctx->queue_num);
+
+ /* We can be called directly from the user space process, which means we
+ * could possibly have signals pending so our sendmsg will fail. In
+ * this case we need to return that we are busy, otherwise error out as
+ * appropriate.
+ */
+ ret = nbd_handle_cmd(cmd, hctx->queue_num);
+ if (ret < 0)
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
+ if (!ret)
+ ret = BLK_MQ_RQ_QUEUE_OK;
complete(&cmd->send_complete);
- return BLK_MQ_RQ_QUEUE_OK;
+ return ret;
}
static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
mutex_init(&nsock->tx_lock);
nsock->sock = sock;
+ nsock->pending = NULL;
+ nsock->sent = 0;
socks[nbd->num_connections++] = nsock;
if (max_part)
static void nbd_bdev_reset(struct block_device *bdev)
{
+ if (bdev->bd_openers > 1)
+ return;
set_device_ro(bdev, false);
bdev->bd_inode->i_size = 0;
if (max_part > 0) {
for (i = 0; i < nbd->num_connections; i++) {
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
- ret = sock_xmit(nbd, i, 1, &from, 0);
+ ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
{
sock_shutdown(nbd);
nbd_clear_que(nbd);
- kill_bdev(bdev);
+
+ __invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
/*
* We want to give the run thread a chance to wait for everybody
nbd_size_set(nbd, bdev, nbd->blksize, arg);
return 0;
case NBD_SET_TIMEOUT:
- nbd->tag_set.timeout = arg * HZ;
+ if (arg) {
+ nbd->tag_set.timeout = arg * HZ;
+ blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
+ }
return 0;
case NBD_SET_FLAGS:
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
- copy_page(mem, cmem);
+ memcpy(mem, cmem, PAGE_SIZE);
} else {
struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
- copy_page(cmem, src);
+ memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
- offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+ offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
bv.bv_page = page;
bv.bv_len = PAGE_SIZE;
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
vdev->config->reset(vdev);
- virtqueue_disable_cb(portdev->c_ivq);
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
cancel_work_sync(&portdev->control_work);
cancel_work_sync(&portdev->config_work);
/*
* Once more: if control_work_handler() was running, it would
* enable the cb as the last step.
*/
- virtqueue_disable_cb(portdev->c_ivq);
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
remove_controlq_data(portdev);
list_for_each_entry(port, &portdev->ports, list) {
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/clockchip.h>
+#include <linux/clockchips.h>
extern struct of_device_id __clkevt_of_table[];
.release = cpufreq_sysfs_release,
};
-static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
- struct device *dev)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
{
+ struct device *dev = get_cpu_device(cpu);
+
+ if (!dev)
+ return;
+
+ if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+ return;
+
dev_dbg(dev, "%s: Adding symlink\n", __func__);
- return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
+ dev_err(dev, "cpufreq symlink creation failed\n");
}
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->related_cpus)
+ for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ add_cpu_dev_symlink(policy, j);
+ }
} else {
policy->min = policy->user_policy.min;
policy->max = policy->user_policy.max;
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
+
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
out_free_policy:
cpufreq_policy_free(policy);
return ret;
}
-static int cpufreq_offline(unsigned int cpu);
-
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu);
- if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
- return 0;
-
- ret = add_cpu_dev_symlink(policy, dev);
- if (ret) {
- cpumask_clear_cpu(cpu, policy->real_cpus);
- cpufreq_offline(cpu);
- }
+ if (policy)
+ add_cpu_dev_symlink(policy, cpu);
- return ret;
+ return 0;
}
static int cpufreq_offline(unsigned int cpu)
*********************************************************************/
static enum cpuhp_state hp_online;
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+ cpufreq_online(cpu);
+
+ return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+ cpufreq_offline(cpu);
+
+ return 0;
+}
+
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
}
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
- cpufreq_online,
- cpufreq_offline);
+ cpuhp_cpufreq_online,
+ cpuhp_cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;
drv->state_count += 1;
}
+ /*
+ * On the PowerNV platform cpu_present may be less than cpu_possible in
+ * cases when firmware detects the CPU, but it is not available to the
+ * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
+ * run time and hence cpu_devices are not created for those CPUs by the
+ * generic topology_init().
+ *
+ * drv->cpumask defaults to cpu_possible_mask in
+ * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
+ * cpu_devices are not created for CPUs in cpu_possible_mask that
+ * cannot be hot-added later at run time.
+ *
+ * Trying cpuidle_register_device() on a CPU without a cpu_device is
+ * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
+ */
+
+ drv->cpumask = (struct cpumask *)cpu_present_mask;
+
return 0;
}
ctx->dev = caam_jr_alloc();
if (IS_ERR(ctx->dev)) {
- dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
+ pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->dev);
}
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
- if (ret || status) {
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
- int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- /* Remove platform devices for JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++)
- of_device_unregister(ctrlpriv->jrpdev[ring]);
+ /* Remove platform devices under the crypto node */
+ of_platform_depopulate(ctrldev);
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
+static const struct of_device_id caam_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0",
+ },
+ {
+ .compatible = "fsl,sec4.0",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
goto iounmap_ctrl;
}
- /*
- * Detect and enable JobRs
- * First, find out how many ring spec'ed, allocate references
- * for all, then go probe each one.
- */
- rspec = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
- rspec++;
-
- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
- if (ctrlpriv->jrpdev == NULL) {
- ret = -ENOMEM;
+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "JR platform devices creation error\n");
goto iounmap_ctrl;
}
ring = 0;
- ridx = 0;
- ctrlpriv->total_jobrs = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jrpdev[ring] =
- of_platform_device_create(np, NULL, dev);
- if (!ctrlpriv->jrpdev[ring]) {
- pr_warn("JR physical index %d: Platform device creation error\n",
- ridx);
- ridx++;
- continue;
- }
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
((__force uint8_t *)ctrl +
- (ridx + JR_BLOCK_NUMBER) *
+ (ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
- ridx++;
- }
+ }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
return ret;
}
-static struct of_device_id caam_match[] = {
- {
- .compatible = "fsl,sec-v4.0",
- },
- {
- .compatible = "fsl,sec4.0",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, caam_match);
-
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
struct caam_drv_private {
struct device *dev;
- struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */
const struct ccp_vdata ccpv5b = {
.version = CCP_VERSION(5, 0),
+ .dma_chan_attr = DMA_PRIVATE,
.setup = ccp5other_config,
.perform = &ccp5_actions,
.bar = 2,
/* ------------------------ General CCP Defines ------------------------ */
+#define CCP_DMA_DFLT 0x0
+#define CCP_DMA_PRIV 0x1
+#define CCP_DMA_PUB 0x2
+
#define CCP_DMAPOOL_MAX_SIZE 64
#define CCP_DMAPOOL_ALIGN BIT(5)
/* Structure to hold CCP version-specific values */
struct ccp_vdata {
const unsigned int version;
+ const unsigned int dma_chan_attr;
void (*setup)(struct ccp_device *);
const struct ccp_actions *perform;
const unsigned int bar;
* published by the Free Software Foundation.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/dmaengine.h>
#include <linux/spinlock.h>
(mask == 0) ? 64 : fls64(mask); \
})
+/* The CCP as a DMA provider can be configured for public or private
+ * channels. Default is specified in the vdata for the device (PCI ID).
+ * This module parameter will override for all channels on all devices:
+ * dma_chan_attr = 0x2 to force all channels public
+ * = 0x1 to force all channels private
+ * = 0x0 to defer to the vdata setting
+ * = any other value: warning, revert to 0x0
+ */
+static unsigned int dma_chan_attr = CCP_DMA_DFLT;
+module_param(dma_chan_attr, uint, 0444);
+MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
+
+unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+{
+ switch (dma_chan_attr) {
+ case CCP_DMA_DFLT:
+ return ccp->vdata->dma_chan_attr;
+
+ case CCP_DMA_PRIV:
+ return DMA_PRIVATE;
+
+ case CCP_DMA_PUB:
+ return 0;
+
+ default:
+ dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
+ dma_chan_attr);
+ return ccp->vdata->dma_chan_attr;
+ }
+}
+
static void ccp_free_cmd_resources(struct ccp_device *ccp,
struct list_head *list)
{
dma_cap_set(DMA_SG, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+ /* The DMA channels for this device can be set to public or private,
+ * and overridden by the module parameter dma_chan_attr.
+ * Default: according to the value in vdata (dma_chan_attr=0)
+ * dma_chan_attr=0x1: all channels private (override vdata)
+ * dma_chan_attr=0x2: all channels public (override vdata)
+ */
+ if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
INIT_LIST_HEAD(&dma_dev->channels);
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
tristate "DAX: direct access to differentiated memory"
default m if NVDIMM_DAX
depends on TRANSPARENT_HUGEPAGE
+ select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character
#include "dax.h"
static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
static int nr_dax = CONFIG_NR_DEV_DAX;
* @region - parent region
* @dev - device backing the character device
* @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
static int dax_dev_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
- int rc;
+ int rc, id;
struct file *filp = vmf->vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end);
- rcu_read_lock();
+ id = srcu_read_lock(&dax_srcu);
switch (pe_size) {
case PE_SIZE_PTE:
rc = __dax_dev_pte_fault(dax_dev, vmf);
default:
return VM_FAULT_FALLBACK;
}
- rcu_read_unlock();
+ srcu_read_unlock(&dax_srcu, id);
return rc;
}
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_rcu() has started will abort
+ * that start after synchronize_srcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev->alive = false;
- synchronize_rcu();
+ synchronize_srcu(&dax_srcu);
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
cdev_del(cdev);
device_unregister(dev);
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->kmap_atomic
- || !exp_info->ops->kmap
+ || !exp_info->ops->map_atomic
+ || !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
}
{
WARN_ON(!dmabuf);
- return dmabuf->ops->kmap_atomic(dmabuf, page_num);
+ return dmabuf->ops->map_atomic(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
{
WARN_ON(!dmabuf);
- if (dmabuf->ops->kunmap_atomic)
- dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
+ if (dmabuf->ops->unmap_atomic)
+ dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
{
WARN_ON(!dmabuf);
- return dmabuf->ops->kmap(dmabuf, page_num);
+ return dmabuf->ops->map(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);
{
WARN_ON(!dmabuf);
- if (dmabuf->ops->kunmap)
- dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
+ if (dmabuf->ops->unmap)
+ dmabuf->ops->unmap(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- int count = 0, attach_count;
+ struct reservation_object *robj;
+ struct reservation_object_list *fobj;
+ struct dma_fence *fence;
+ unsigned seq;
+ int count = 0, attach_count, shared_count, i;
size_t size = 0;
ret = mutex_lock_interruptible(&db_list.lock);
return ret;
seq_puts(s, "\nDma-buf Objects:\n");
- seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
+ "size", "flags", "mode", "count");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
file_count(buf_obj->file),
buf_obj->exp_name);
+ robj = buf_obj->resv;
+ while (true) {
+ seq = read_seqcount_begin(&robj->seq);
+ rcu_read_lock();
+ fobj = rcu_dereference(robj->fence);
+ shared_count = fobj ? fobj->shared_count : 0;
+ fence = rcu_dereference(robj->fence_excl);
+ if (!read_seqcount_retry(&robj->seq, seq))
+ break;
+ rcu_read_unlock();
+ }
+
+ if (fence)
+ seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ dma_fence_is_signaled(fence) ? "" : "un");
+ for (i = 0; i < shared_count; i++) {
+ fence = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(fence))
+ continue;
+ seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ dma_fence_is_signaled(fence) ? "" : "un");
+ }
+ rcu_read_unlock();
+
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
*/
/* have we filled in period_length yet? */
- if (*total_len + control_block->length < period_len)
+ if (*total_len + control_block->length < period_len) {
+ /* update number of bytes in this period so far */
+ *total_len += control_block->length;
return;
+ }
/* calculate the length that remains to reach period_length */
control_block->length = period_len - *total_len;
switch (order) {
case 0 ... 1:
return &unmap_pool[0];
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
case 2 ... 4:
return &unmap_pool[1];
case 5 ... 7:
return &unmap_pool[2];
case 8:
return &unmap_pool[3];
+#endif
default:
BUG();
return NULL;
config EDAC_DEBUG
bool "Debugging"
+ select DEBUG_FS
help
This turns on debugging information for the entire EDAC subsystem.
You do so by inserting edac_module with "edac_debug_level=x." Valid
Support for error detection and correction the Intel
Skylake server Integrated Memory Controllers.
+config EDAC_PND2
+ tristate "Intel Pondicherry2"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ help
+ Support for error detection and correction on the Intel
+ Pondicherry2 Integrated Memory Controller. This SoC IP is
+ first used on the Apollo Lake platform and Denverton
+ micro-server but may appear on others in the future.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
obj-$(CONFIG_EDAC_SKX) += skx_edac.o
+obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
dimm->mtype = MEM_FB_DDR2;
/* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
+ if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->dtype = DEV_X8;
else
dimm->dtype = DEV_X4;
dimm->nr_pages = size_mb << 8;
dimm->grain = 8;
- dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
+ DEV_X8 : DEV_X4;
dimm->mtype = MEM_FB_DDR2;
/*
* The eccc mechanism is SDDC (aka SECC), with
* is similar to Chipkill.
*/
- dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
EDAC_S8ECD8ED : EDAC_S4ECD4ED;
ndimms++;
}
--- /dev/null
+/*
+ * Driver for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * [Derived from sb_edac.c]
+ *
+ * Translation of system physical addresses to DIMM addresses
+ * is a two stage process:
+ *
+ * First the Pondicherry 2 memory controller handles slice and channel interleaving
+ * in "sys2pmi()". This is (almost) completley common between platforms.
+ *
+ * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
+ * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_mc.h"
+#include "edac_module.h"
+#include "pnd2_edac.h"
+
+#define APL_NUM_CHANNELS 4
+#define DNV_NUM_CHANNELS 2
+#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
+
+enum type {
+ APL,
+ DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
+};
+
+struct dram_addr {
+ int chan;
+ int dimm;
+ int rank;
+ int bank;
+ int row;
+ int col;
+};
+
+struct pnd2_pvt {
+ int dimm_geom[APL_NUM_CHANNELS];
+ u64 tolm, tohm;
+};
+
+/*
+ * System address space is divided into multiple regions with
+ * different interleave rules in each. The as0/as1 regions
+ * have no interleaving at all. The as2 region is interleaved
+ * between two channels. The mot region is magic and may overlap
+ * other regions, with its interleave rules taking precedence.
+ * Addresses not in any of these regions are interleaved across
+ * all four channels.
+ */
+static struct region {
+ u64 base;
+ u64 limit;
+ u8 enabled;
+} mot, as0, as1, as2;
+
+static struct dunit_ops {
+ char *name;
+ enum type type;
+ int pmiaddr_shift;
+ int pmiidx_shift;
+ int channels;
+ int dimms_per_channel;
+ int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
+ int (*get_registers)(void);
+ int (*check_ecc)(void);
+ void (*mk_region)(char *name, struct region *rp, void *asym);
+ void (*get_dimm_config)(struct mem_ctl_info *mci);
+ int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg);
+} *ops;
+
+static struct mem_ctl_info *pnd2_mci;
+
+#define PND2_MSG_SIZE 256
+
+/* Debug macros */
+#define pnd2_printk(level, fmt, arg...) \
+ edac_printk(level, "pnd2", fmt, ##arg)
+
+#define pnd2_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
+
+#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
+#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
+#define SELECTOR_DISABLED (-1)
+#define _4GB (1ul << 32)
+
+#define PMI_ADDRESS_WIDTH 31
+#define PND_MAX_PHYS_BIT 39
+
+#define APL_ASYMSHIFT 28
+#define DNV_ASYMSHIFT 31
+#define CH_HASH_MASK_LSB 6
+#define SLICE_HASH_MASK_LSB 6
+#define MOT_SLC_INTLV_BIT 12
+#define LOG2_PMI_ADDR_GRANULARITY 5
+#define MOT_SHIFT 24
+
+#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
+#define U64_LSHIFT(val, s) ((u64)(val) << (s))
+
+#ifdef CONFIG_X86_INTEL_SBI_APL
+#include "linux/platform_data/sbi_apl.h"
+int sbi_send(int port, int off, int op, u32 *data)
+{
+ struct sbi_apl_message sbi_arg;
+ int ret, read = 0;
+
+ memset(&sbi_arg, 0, sizeof(sbi_arg));
+
+ if (op == 0 || op == 4 || op == 6)
+ read = 1;
+ else
+ sbi_arg.data = *data;
+
+ sbi_arg.opcode = op;
+ sbi_arg.port_address = port;
+ sbi_arg.register_offset = off;
+ ret = sbi_apl_commit(&sbi_arg);
+ if (ret || sbi_arg.status)
+ edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
+ sbi_arg.status, ret, sbi_arg.data);
+
+ if (ret == 0)
+ ret = sbi_arg.status;
+
+ if (ret == 0 && read)
+ *data = sbi_arg.data;
+
+ return ret;
+}
+#else
+int sbi_send(int port, int off, int op, u32 *data)
+{
+ return -EUNATCH;
+}
+#endif
+
+static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+ int ret = 0;
+
+ edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
+ switch (sz) {
+ case 8:
+ ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+ case 4:
+ ret = sbi_send(port, off, op, (u32 *)data);
+ pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
+ sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 get_mem_ctrl_hub_base_addr(void)
+{
+ struct b_cr_mchbar_lo_pci lo;
+ struct b_cr_mchbar_hi_pci hi;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+ if (pdev) {
+ pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
+ pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
+ pci_dev_put(pdev);
+ } else {
+ return 0;
+ }
+
+ if (!lo.enable) {
+ edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
+ return 0;
+ }
+
+ return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
+}
+
+static u64 get_sideband_reg_base_addr(void)
+{
+ struct pci_dev *pdev;
+ u32 hi, lo;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
+ if (pdev) {
+ pci_read_config_dword(pdev, 0x10, &lo);
+ pci_read_config_dword(pdev, 0x14, &hi);
+ pci_dev_put(pdev);
+ return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
+ } else {
+ return 0xfd000000;
+ }
+}
+
+static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+ struct pci_dev *pdev;
+ char *base;
+ u64 addr;
+
+ if (op == 4) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+ if (!pdev)
+ return -ENODEV;
+
+ pci_read_config_dword(pdev, off, data);
+ pci_dev_put(pdev);
+ } else {
+ /* MMIO via memory controller hub base address */
+ if (op == 0 && port == 0x4c) {
+ addr = get_mem_ctrl_hub_base_addr();
+ if (!addr)
+ return -ENODEV;
+ } else {
+ /* MMIO via sideband register base address */
+ addr = get_sideband_reg_base_addr();
+ if (!addr)
+ return -ENODEV;
+ addr += (port << 16);
+ }
+
+ base = ioremap((resource_size_t)addr, 0x10000);
+ if (!base)
+ return -ENODEV;
+
+ if (sz == 8)
+ *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
+ *(u32 *)data = *(u32 *)(base + off);
+
+ iounmap(base);
+ }
+
+ edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
+ (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
+
+ return 0;
+}
+
+#define RD_REGP(regp, regname, port) \
+ ops->rd_reg(port, \
+ regname##_offset, \
+ regname##_r_opcode, \
+ regp, sizeof(struct regname), \
+ #regname)
+
+#define RD_REG(regp, regname) \
+ ops->rd_reg(regname ## _port, \
+ regname##_offset, \
+ regname##_r_opcode, \
+ regp, sizeof(struct regname), \
+ #regname)
+
+static u64 top_lm, top_hm;
+static bool two_slices;
+static bool two_channels; /* Both PMI channels in one slice enabled */
+
+static u8 sym_chan_mask;
+static u8 asym_chan_mask;
+static u8 chan_mask;
+
+static int slice_selector = -1;
+static int chan_selector = -1;
+static u64 slice_hash_mask;
+static u64 chan_hash_mask;
+
+static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
+{
+ rp->enabled = 1;
+ rp->base = base;
+ rp->limit = limit;
+ edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
+}
+
+static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
+{
+ if (mask == 0) {
+ pr_info(FW_BUG "MOT mask cannot be zero\n");
+ return;
+ }
+ if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
+ pr_info(FW_BUG "MOT mask not power of two\n");
+ return;
+ }
+ if (base & ~mask) {
+ pr_info(FW_BUG "MOT region base/mask alignment error\n");
+ return;
+ }
+ rp->base = base;
+ rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
+ rp->enabled = 1;
+ edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
+}
+
+static bool in_region(struct region *rp, u64 addr)
+{
+ if (!rp->enabled)
+ return false;
+
+ return rp->base <= addr && addr <= rp->limit;
+}
+
+static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
+{
+ int mask = 0;
+
+ if (!p->slice_0_mem_disabled)
+ mask |= p->sym_slice0_channel_enabled;
+
+ if (!p->slice_1_disabled)
+ mask |= p->sym_slice1_channel_enabled << 2;
+
+ if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+ mask &= 0x5;
+
+ return mask;
+}
+
+static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
+ struct b_cr_asym_mem_region0_mchbar *as0,
+ struct b_cr_asym_mem_region1_mchbar *as1,
+ struct b_cr_asym_2way_mem_region_mchbar *as2way)
+{
+ const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ int mask = 0;
+
+ if (as2way->asym_2way_interleave_enable)
+ mask = intlv[as2way->asym_2way_intlv_mode];
+ if (as0->slice0_asym_enable)
+ mask |= (1 << as0->slice0_asym_channel_select);
+ if (as1->slice1_asym_enable)
+ mask |= (4 << as1->slice1_asym_channel_select);
+ if (p->slice_0_mem_disabled)
+ mask &= 0xc;
+ if (p->slice_1_disabled)
+ mask &= 0x3;
+ if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+ mask &= 0x5;
+
+ return mask;
+}
+
+static struct b_cr_tolud_pci tolud;
+static struct b_cr_touud_lo_pci touud_lo;
+static struct b_cr_touud_hi_pci touud_hi;
+static struct b_cr_asym_mem_region0_mchbar asym0;
+static struct b_cr_asym_mem_region1_mchbar asym1;
+static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
+static struct b_cr_mot_out_base_mchbar mot_base;
+static struct b_cr_mot_out_mask_mchbar mot_mask;
+static struct b_cr_slice_channel_hash chash;
+
+/* Apollo Lake dunit */
+/*
+ * Validated on board with just two DIMMs in the [0] and [2] positions
+ * in this array. Other port number matches documentation, but caution
+ * advised.
+ */
+static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
+static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
+
+/* Denverton dunit */
+static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
+static struct d_cr_dsch dsch;
+static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
+static struct d_cr_drp drp[DNV_NUM_CHANNELS];
+static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
+static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
+static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
+static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
+static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
+static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
+
+static void apl_mk_region(char *name, struct region *rp, void *asym)
+{
+ struct b_cr_asym_mem_region0_mchbar *a = asym;
+
+ mk_region(name, rp,
+ U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
+ U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
+ GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+}
+
+static void dnv_mk_region(char *name, struct region *rp, void *asym)
+{
+ struct b_cr_asym_mem_region_denverton *a = asym;
+
+ mk_region(name, rp,
+ U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
+ U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
+ GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
+}
+
+static int apl_get_registers(void)
+{
+ int i;
+
+ if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
+ return -ENODEV;
+
+ for (i = 0; i < APL_NUM_CHANNELS; i++)
+ if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int dnv_get_registers(void)
+{
+ int i;
+
+ if (RD_REG(&dsch, d_cr_dsch))
+ return -ENODEV;
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++)
+ if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
+ RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
+ RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
+ RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
+ RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
+ RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
+ RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
+ RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Read all the h/w config registers once here (they don't
+ * change at run time. Figure out which address ranges have
+ * which interleave characteristics.
+ */
+static int get_registers(void)
+{
+ const int intlv[] = { 10, 11, 12, 12 };
+
+ if (RD_REG(&tolud, b_cr_tolud_pci) ||
+ RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
+ RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
+ RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
+ RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
+ RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
+ RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
+ RD_REG(&chash, b_cr_slice_channel_hash))
+ return -ENODEV;
+
+ if (ops->get_registers())
+ return -ENODEV;
+
+ if (ops->type == DNV) {
+ /* PMI channel idx (always 0) for asymmetric region */
+ asym0.slice0_asym_channel_select = 0;
+ asym1.slice1_asym_channel_select = 0;
+ /* PMI channel bitmap (always 1) for symmetric region */
+ chash.sym_slice0_channel_enabled = 0x1;
+ chash.sym_slice1_channel_enabled = 0x1;
+ }
+
+ if (asym0.slice0_asym_enable)
+ ops->mk_region("as0", &as0, &asym0);
+
+ if (asym1.slice1_asym_enable)
+ ops->mk_region("as1", &as1, &asym1);
+
+ if (asym_2way.asym_2way_interleave_enable) {
+ mk_region("as2way", &as2,
+ U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
+ U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
+ GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+ }
+
+ if (mot_base.imr_en) {
+ mk_region_mask("mot", &mot,
+ U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
+ U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
+ }
+
+ top_lm = U64_LSHIFT(tolud.tolud, 20);
+ top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
+
+ two_slices = !chash.slice_1_disabled &&
+ !chash.slice_0_mem_disabled &&
+ (chash.sym_slice0_channel_enabled != 0) &&
+ (chash.sym_slice1_channel_enabled != 0);
+ two_channels = !chash.ch_1_disabled &&
+ !chash.enable_pmi_dual_data_mode &&
+ ((chash.sym_slice0_channel_enabled == 3) ||
+ (chash.sym_slice1_channel_enabled == 3));
+
+ sym_chan_mask = gen_sym_mask(&chash);
+ asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
+ chan_mask = sym_chan_mask | asym_chan_mask;
+
+ if (two_slices && !two_channels) {
+ if (chash.hvm_mode)
+ slice_selector = 29;
+ else
+ slice_selector = intlv[chash.interleave_mode];
+ } else if (!two_slices && two_channels) {
+ if (chash.hvm_mode)
+ chan_selector = 29;
+ else
+ chan_selector = intlv[chash.interleave_mode];
+ } else if (two_slices && two_channels) {
+ if (chash.hvm_mode) {
+ slice_selector = 29;
+ chan_selector = 30;
+ } else {
+ slice_selector = intlv[chash.interleave_mode];
+ chan_selector = intlv[chash.interleave_mode] + 1;
+ }
+ }
+
+ if (two_slices) {
+ if (!chash.hvm_mode)
+ slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
+ if (!two_channels)
+ slice_hash_mask |= BIT_ULL(slice_selector);
+ }
+
+ if (two_channels) {
+ if (!chash.hvm_mode)
+ chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
+ if (!two_slices)
+ chan_hash_mask |= BIT_ULL(chan_selector);
+ }
+
+ return 0;
+}
+
+/* Get a contiguous memory address (remove the MMIO gap) */
+static u64 remove_mmio_gap(u64 sys)
+{
+ return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
+}
+
+/* Squeeze out one address bit, shift upper part down to fill gap */
+static void remove_addr_bit(u64 *addr, int bitidx)
+{
+ u64 mask;
+
+ if (bitidx == -1)
+ return;
+
+ mask = (1ull << bitidx) - 1;
+ *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
+}
+
+/* XOR all the bits from addr specified in mask */
+static int hash_by_mask(u64 addr, u64 mask)
+{
+ u64 result = addr & mask;
+
+ result = (result >> 32) ^ result;
+ result = (result >> 16) ^ result;
+ result = (result >> 8) ^ result;
+ result = (result >> 4) ^ result;
+ result = (result >> 2) ^ result;
+ result = (result >> 1) ^ result;
+
+ return (int)result & 1;
+}
+
+/*
+ * First stage decode. Take the system address and figure out which
+ * second stage will deal with it based on interleave modes.
+ */
+static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
+{
+ u64 contig_addr, contig_base, contig_offset, contig_base_adj;
+ int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+ MOT_CHAN_INTLV_BIT_1SLC_2CH;
+ int slice_intlv_bit_rm = SELECTOR_DISABLED;
+ int chan_intlv_bit_rm = SELECTOR_DISABLED;
+ /* Determine if address is in the MOT region. */
+ bool mot_hit = in_region(&mot, addr);
+ /* Calculate the number of symmetric regions enabled. */
+ int sym_channels = hweight8(sym_chan_mask);
+
+ /*
+ * The amount we need to shift the asym base can be determined by the
+ * number of enabled symmetric channels.
+ * NOTE: This can only work because symmetric memory is not supposed
+ * to do a 3-way interleave.
+ */
+ int sym_chan_shift = sym_channels >> 1;
+
+ /* Give up if address is out of range, or in MMIO gap */
+ if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
+ (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
+ snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
+ return -EINVAL;
+ }
+
+ /* Get a contiguous memory address (remove the MMIO gap) */
+ contig_addr = remove_mmio_gap(addr);
+
+ if (in_region(&as0, addr)) {
+ *pmiidx = asym0.slice0_asym_channel_select;
+
+ contig_base = remove_mmio_gap(as0.base);
+ contig_offset = contig_addr - contig_base;
+ contig_base_adj = (contig_base >> sym_chan_shift) *
+ ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
+ contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+ } else if (in_region(&as1, addr)) {
+ *pmiidx = 2u + asym1.slice1_asym_channel_select;
+
+ contig_base = remove_mmio_gap(as1.base);
+ contig_offset = contig_addr - contig_base;
+ contig_base_adj = (contig_base >> sym_chan_shift) *
+ ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
+ contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+ } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
+ bool channel1;
+
+ mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
+ *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
+ channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
+ hash_by_mask(contig_addr, chan_hash_mask);
+ *pmiidx |= (u32)channel1;
+
+ contig_base = remove_mmio_gap(as2.base);
+ chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
+ contig_offset = contig_addr - contig_base;
+ remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
+ contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
+ } else {
+ /* Otherwise we're in normal, boring symmetric mode. */
+ *pmiidx = 0u;
+
+ if (two_slices) {
+ bool slice1;
+
+ if (mot_hit) {
+ slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
+ slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
+ } else {
+ slice_intlv_bit_rm = slice_selector;
+ slice1 = hash_by_mask(addr, slice_hash_mask);
+ }
+
+ *pmiidx = (u32)slice1 << 1;
+ }
+
+ if (two_channels) {
+ bool channel1;
+
+ mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+ MOT_CHAN_INTLV_BIT_1SLC_2CH;
+
+ if (mot_hit) {
+ chan_intlv_bit_rm = mot_intlv_bit;
+ channel1 = (addr >> mot_intlv_bit) & 1;
+ } else {
+ chan_intlv_bit_rm = chan_selector;
+ channel1 = hash_by_mask(contig_addr, chan_hash_mask);
+ }
+
+ *pmiidx |= (u32)channel1;
+ }
+ }
+
+ /* Remove the chan_selector bit first */
+ remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
+ /* Remove the slice bit (we remove it second because it must be lower */
+ remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
+ *pmiaddr = contig_addr;
+
+ return 0;
+}
+
+/* Translate PMI address to memory (rank, row, bank, column) */
+#define C(n) (0x10 | (n)) /* column */
+#define B(n) (0x20 | (n)) /* bank */
+#define R(n) (0x40 | (n)) /* row */
+#define RS (0x80) /* rank */
+
+/* addrdec values */
+#define AMAP_1KB 0
+#define AMAP_2KB 1
+#define AMAP_4KB 2
+#define AMAP_RSVD 3
+
+/* dden values */
+#define DEN_4Gb 0
+#define DEN_8Gb 2
+
+/* dwid values */
+#define X8 0
+#define X16 1
+
+static struct dimm_geometry {
+ u8 addrdec;
+ u8 dden;
+ u8 dwid;
+ u8 rowbits, colbits;
+ u16 bits[PMI_ADDRESS_WIDTH];
+} dimms[] = {
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ }
+};
+
+static int bank_hash(u64 pmiaddr, int idx, int shft)
+{
+ int bhash = 0;
+
+ switch (idx) {
+ case 0:
+ bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
+ break;
+ case 1:
+ bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
+ bhash ^= ((pmiaddr >> 22) & 1) << 1;
+ break;
+ case 2:
+ bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
+ break;
+ }
+
+ return bhash;
+}
+
+static int rank_hash(u64 pmiaddr)
+{
+ return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
+}
+
+/* Second stage decode. Compute rank, bank, row & column. */
+static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg)
+{
+ struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
+ struct pnd2_pvt *pvt = mci->pvt_info;
+ int g = pvt->dimm_geom[pmiidx];
+ struct dimm_geometry *d = &dimms[g];
+ int column = 0, bank = 0, row = 0, rank = 0;
+ int i, idx, type, skiprs = 0;
+
+ for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
+ int bit = (pmiaddr >> i) & 1;
+
+ if (i + skiprs >= PMI_ADDRESS_WIDTH) {
+ snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
+ return -EINVAL;
+ }
+
+ type = d->bits[i + skiprs] & ~0xf;
+ idx = d->bits[i + skiprs] & 0xf;
+
+ /*
+ * On single rank DIMMs ignore the rank select bit
+ * and shift remainder of "bits[]" down one place.
+ */
+ if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
+ skiprs = 1;
+ type = d->bits[i + skiprs] & ~0xf;
+ idx = d->bits[i + skiprs] & 0xf;
+ }
+
+ switch (type) {
+ case C(0):
+ column |= (bit << idx);
+ break;
+ case B(0):
+ bank |= (bit << idx);
+ if (cr_drp0->bahen)
+ bank ^= bank_hash(pmiaddr, idx, d->addrdec);
+ break;
+ case R(0):
+ row |= (bit << idx);
+ break;
+ case RS:
+ rank = bit;
+ if (cr_drp0->rsien)
+ rank ^= rank_hash(pmiaddr);
+ break;
+ default:
+ if (bit) {
+ snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
+ return -EINVAL;
+ }
+ goto done;
+ }
+ }
+
+done:
+ daddr->col = column;
+ daddr->bank = bank;
+ daddr->row = row;
+ daddr->rank = rank;
+ daddr->dimm = 0;
+
+ return 0;
+}
+
+/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
+#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
+
+static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg)
+{
+ /* Rank 0 or 1 */
+ daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
+ /* Rank 2 or 3 */
+ daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
+
+ /*
+ * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
+ * flip them if DIMM1 is larger than DIMM0.
+ */
+ daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
+
+ daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
+ if (dsch.ddr4en)
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
+ if (dmap1[pmiidx].bxor) {
+ if (dsch.ddr4en) {
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
+ if (dsch.chan_width == 0)
+ /* 64/72 bit dram channel width */
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+ else
+ /* 32/40 bit dram channel width */
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
+ } else {
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
+ if (dsch.chan_width == 0)
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+ else
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+ }
+ }
+
+ daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
+ if (dmap4[pmiidx].row14 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
+ if (dmap4[pmiidx].row15 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
+ if (dmap4[pmiidx].row16 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
+ if (dmap4[pmiidx].row17 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
+
+ daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
+ if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
+ daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
+
+ return 0;
+}
+
+static int check_channel(int ch)
+{
+ if (drp0[ch].dramtype != 0) {
+ pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
+ return 1;
+ } else if (drp0[ch].eccen == 0) {
+ pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+ return 1;
+ }
+ return 0;
+}
+
+static int apl_check_ecc_active(void)
+{
+ int i, ret = 0;
+
+ /* Check dramtype and ECC mode for each present DIMM */
+ for (i = 0; i < APL_NUM_CHANNELS; i++)
+ if (chan_mask & BIT(i))
+ ret += check_channel(i);
+ return ret ? -EINVAL : 0;
+}
+
+#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
+
+static int check_unit(int ch)
+{
+ struct d_cr_drp *d = &drp[ch];
+
+ if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
+ pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+ return 1;
+ }
+ return 0;
+}
+
+static int dnv_check_ecc_active(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++)
+ ret += check_unit(i);
+ return ret ? -EINVAL : 0;
+}
+
+static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
+ struct dram_addr *daddr, char *msg)
+{
+ u64 pmiaddr;
+ u32 pmiidx;
+ int ret;
+
+ ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
+ if (ret)
+ return ret;
+
+ pmiaddr >>= ops->pmiaddr_shift;
+ /* pmi channel idx to dimm channel idx */
+ pmiidx >>= ops->pmiidx_shift;
+ daddr->chan = pmiidx;
+
+ ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
+ if (ret)
+ return ret;
+
+ edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+ addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
+
+ return 0;
+}
+
+static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
+ struct dram_addr *daddr)
+{
+ enum hw_event_mc_err_type tp_event;
+ char *optype, msg[PND2_MSG_SIZE];
+ bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
+ bool overflow = m->status & MCI_STATUS_OVER;
+ bool uc_err = m->status & MCI_STATUS_UC;
+ bool recov = m->status & MCI_STATUS_S;
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ int rc;
+
+ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
+ HW_EVENT_ERR_CORRECTED;
+
+ /*
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (!((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ /* Only decode errors with an valid address (ADDRV) */
+ if (!(m->status & MCI_STATUS_ADDRV))
+ return;
+
+ rc = get_memory_error_data(mci, m->addr, daddr, msg);
+ if (rc)
+ goto address_error;
+
+ snprintf(msg, sizeof(msg),
+ "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
+ overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
+ errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
+
+ edac_dbg(0, "%s\n", msg);
+
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
+
+ return;
+
+address_error:
+ edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
+}
+
+static void apl_get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct pnd2_pvt *pvt = mci->pvt_info;
+ struct dimm_info *dimm;
+ struct d_cr_drp0 *d;
+ u64 capacity;
+ int i, g;
+
+ for (i = 0; i < APL_NUM_CHANNELS; i++) {
+ if (!(chan_mask & BIT(i)))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
+ if (!dimm) {
+ edac_dbg(0, "No allocated DIMM for channel %d\n", i);
+ continue;
+ }
+
+ d = &drp0[i];
+ for (g = 0; g < ARRAY_SIZE(dimms); g++)
+ if (dimms[g].addrdec == d->addrdec &&
+ dimms[g].dden == d->dden &&
+ dimms[g].dwid == d->dwid)
+ break;
+
+ if (g == ARRAY_SIZE(dimms)) {
+ edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
+ continue;
+ }
+
+ pvt->dimm_geom[i] = g;
+ capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
+ (1ul << dimms[g].colbits);
+ edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
+ dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+ dimm->grain = 32;
+ dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
+ dimm->mtype = MEM_DDR3;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
+ }
+}
+
+static const int dnv_dtypes[] = {
+ DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
+};
+
+static void dnv_get_dimm_config(struct mem_ctl_info *mci)
+{
+ int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
+ struct dimm_info *dimm;
+ struct d_cr_drp *d;
+ u64 capacity;
+
+ if (dsch.ddr4en) {
+ memtype = MEM_DDR4;
+ banks = 16;
+ colbits = 10;
+ } else {
+ memtype = MEM_DDR3;
+ banks = 8;
+ }
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++) {
+ if (dmap4[i].row14 == 31)
+ rowbits = 14;
+ else if (dmap4[i].row15 == 31)
+ rowbits = 15;
+ else if (dmap4[i].row16 == 31)
+ rowbits = 16;
+ else if (dmap4[i].row17 == 31)
+ rowbits = 17;
+ else
+ rowbits = 18;
+
+ if (memtype == MEM_DDR3) {
+ if (dmap1[i].ca11 != 0x3f)
+ colbits = 12;
+ else
+ colbits = 10;
+ }
+
+ d = &drp[i];
+ /* DIMM0 is present if rank0 and/or rank1 is enabled */
+ ranks_of_dimm[0] = d->rken0 + d->rken1;
+ /* DIMM1 is present if rank2 and/or rank3 is enabled */
+ ranks_of_dimm[1] = d->rken2 + d->rken3;
+
+ for (j = 0; j < DNV_MAX_DIMMS; j++) {
+ if (!ranks_of_dimm[j])
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ if (!dimm) {
+ edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
+ continue;
+ }
+
+ capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
+ edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
+ dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+ dimm->grain = 32;
+ dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
+ dimm->mtype = memtype;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
+ }
+ }
+}
+
+static int pnd2_register_mci(struct mem_ctl_info **ppmci)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct pnd2_pvt *pvt;
+ int rc;
+
+ rc = ops->check_ecc();
+ if (rc < 0)
+ return rc;
+
+ /* Allocate a new MC control structure */
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = ops->channels;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ops->dimms_per_channel;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (!mci)
+ return -ENOMEM;
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ mci->mod_name = "pnd2_edac.c";
+ mci->dev_name = ops->name;
+ mci->ctl_name = "Pondicherry2";
+
+ /* Get dimm basic config and the memory layout */
+ ops->get_dimm_config(mci);
+
+ if (edac_mc_add_mc(mci)) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return -EINVAL;
+ }
+
+ *ppmci = mci;
+
+ return 0;
+}
+
+static void pnd2_unregister_mci(struct mem_ctl_info *mci)
+{
+ if (unlikely(!mci || !mci->pvt_info)) {
+ pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
+ return;
+ }
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(NULL);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+ edac_mc_free(mci);
+}
+
+/*
+ * Callback function registered with core kernel mce code.
+ * Called once for each logged error.
+ */
+static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct mem_ctl_info *mci;
+ struct dram_addr daddr;
+ char *type;
+
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
+ mci = pnd2_mci;
+ if (!mci)
+ return NOTIFY_DONE;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller. A memory error
+ * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
+ * bit 12 has an special meaning.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
+ pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
+ mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
+ pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
+ pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
+ pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
+ pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
+ mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
+
+ pnd2_mce_output_error(mci, mce, &daddr);
+
+ /* Advice mcelog that the error were handled */
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block pnd2_mce_dec = {
+ .notifier_call = pnd2_mce_check_error,
+};
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static u64 pnd2_fake_addr;
+#define PND2_BLOB_SIZE 1024
+static char pnd2_result[PND2_BLOB_SIZE];
+static struct dentry *pnd2_test;
+static struct debugfs_blob_wrapper pnd2_blob = {
+ .data = pnd2_result,
+ .size = 0
+};
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct dram_addr daddr;
+ struct mce m;
+
+ *(u64 *)data = val;
+ m.mcgstatus = 0;
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x9f;
+ m.addr = val;
+ pnd2_mce_output_error(pnd2_mci, &m, &daddr);
+ snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
+ "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+ m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
+ pnd2_blob.size = strlen(pnd2_blob.data);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_pnd2_debug(void)
+{
+ pnd2_test = edac_debugfs_create_dir("pnd2_test");
+ edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
+ &pnd2_fake_addr, &fops_u64_wo);
+ debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
+}
+
+static void teardown_pnd2_debug(void)
+{
+ debugfs_remove_recursive(pnd2_test);
+}
+#else
+static void setup_pnd2_debug(void) {}
+static void teardown_pnd2_debug(void) {}
+#endif /* CONFIG_EDAC_DEBUG */
+
+
+static int pnd2_probe(void)
+{
+ int rc;
+
+ edac_dbg(2, "\n");
+ rc = get_registers();
+ if (rc)
+ return rc;
+
+ return pnd2_register_mci(&pnd2_mci);
+}
+
+static void pnd2_remove(void)
+{
+ edac_dbg(0, "\n");
+ pnd2_unregister_mci(pnd2_mci);
+}
+
+static struct dunit_ops apl_ops = {
+ .name = "pnd2/apl",
+ .type = APL,
+ .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
+ .pmiidx_shift = 0,
+ .channels = APL_NUM_CHANNELS,
+ .dimms_per_channel = 1,
+ .rd_reg = apl_rd_reg,
+ .get_registers = apl_get_registers,
+ .check_ecc = apl_check_ecc_active,
+ .mk_region = apl_mk_region,
+ .get_dimm_config = apl_get_dimm_config,
+ .pmi2mem = apl_pmi2mem,
+};
+
+static struct dunit_ops dnv_ops = {
+ .name = "pnd2/dnv",
+ .type = DNV,
+ .pmiaddr_shift = 0,
+ .pmiidx_shift = 1,
+ .channels = DNV_NUM_CHANNELS,
+ .dimms_per_channel = 2,
+ .rd_reg = dnv_rd_reg,
+ .get_registers = dnv_get_registers,
+ .check_ecc = dnv_check_ecc_active,
+ .mk_region = dnv_mk_region,
+ .get_dimm_config = dnv_get_dimm_config,
+ .pmi2mem = dnv_pmi2mem,
+};
+
+static const struct x86_cpu_id pnd2_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
+
+static int __init pnd2_init(void)
+{
+ const struct x86_cpu_id *id;
+ int rc;
+
+ edac_dbg(2, "\n");
+
+ id = x86_match_cpu(pnd2_cpuids);
+ if (!id)
+ return -ENODEV;
+
+ ops = (struct dunit_ops *)id->driver_data;
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ rc = pnd2_probe();
+ if (rc < 0) {
+ pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
+ return rc;
+ }
+
+ if (!pnd2_mci)
+ return -ENODEV;
+
+ mce_register_decode_chain(&pnd2_mce_dec);
+ setup_pnd2_debug();
+
+ return 0;
+}
+
+static void __exit pnd2_exit(void)
+{
+ edac_dbg(2, "\n");
+ teardown_pnd2_debug();
+ mce_unregister_decode_chain(&pnd2_mce_dec);
+ pnd2_remove();
+}
+
+module_init(pnd2_init);
+module_exit(pnd2_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
--- /dev/null
+/*
+ * Register bitfield descriptions for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _PND2_REGS_H
+#define _PND2_REGS_H
+
+struct b_cr_touud_lo_pci {
+ u32 lock : 1;
+ u32 reserved_1 : 19;
+ u32 touud : 12;
+};
+
+#define b_cr_touud_lo_pci_port 0x4c
+#define b_cr_touud_lo_pci_offset 0xa8
+#define b_cr_touud_lo_pci_r_opcode 0x04
+
+struct b_cr_touud_hi_pci {
+ u32 touud : 7;
+ u32 reserved_0 : 25;
+};
+
+#define b_cr_touud_hi_pci_port 0x4c
+#define b_cr_touud_hi_pci_offset 0xac
+#define b_cr_touud_hi_pci_r_opcode 0x04
+
+struct b_cr_tolud_pci {
+ u32 lock : 1;
+ u32 reserved_0 : 19;
+ u32 tolud : 12;
+};
+
+#define b_cr_tolud_pci_port 0x4c
+#define b_cr_tolud_pci_offset 0xbc
+#define b_cr_tolud_pci_r_opcode 0x04
+
+struct b_cr_mchbar_lo_pci {
+ u32 enable : 1;
+ u32 pad_3_1 : 3;
+ u32 pad_14_4: 11;
+ u32 base: 17;
+};
+
+struct b_cr_mchbar_hi_pci {
+ u32 base : 7;
+ u32 pad_31_7 : 25;
+};
+
+/* Symmetric region */
+struct b_cr_slice_channel_hash {
+ u64 slice_1_disabled : 1;
+ u64 hvm_mode : 1;
+ u64 interleave_mode : 2;
+ u64 slice_0_mem_disabled : 1;
+ u64 reserved_0 : 1;
+ u64 slice_hash_mask : 14;
+ u64 reserved_1 : 11;
+ u64 enable_pmi_dual_data_mode : 1;
+ u64 ch_1_disabled : 1;
+ u64 reserved_2 : 1;
+ u64 sym_slice0_channel_enabled : 2;
+ u64 sym_slice1_channel_enabled : 2;
+ u64 ch_hash_mask : 14;
+ u64 reserved_3 : 11;
+ u64 lock : 1;
+};
+
+#define b_cr_slice_channel_hash_port 0x4c
+#define b_cr_slice_channel_hash_offset 0x4c58
+#define b_cr_slice_channel_hash_r_opcode 0x06
+
+struct b_cr_mot_out_base_mchbar {
+ u32 reserved_0 : 14;
+ u32 mot_out_base : 15;
+ u32 reserved_1 : 1;
+ u32 tr_en : 1;
+ u32 imr_en : 1;
+};
+
+#define b_cr_mot_out_base_mchbar_port 0x4c
+#define b_cr_mot_out_base_mchbar_offset 0x6af0
+#define b_cr_mot_out_base_mchbar_r_opcode 0x00
+
+struct b_cr_mot_out_mask_mchbar {
+ u32 reserved_0 : 14;
+ u32 mot_out_mask : 15;
+ u32 reserved_1 : 1;
+ u32 ia_iwb_en : 1;
+ u32 gt_iwb_en : 1;
+};
+
+#define b_cr_mot_out_mask_mchbar_port 0x4c
+#define b_cr_mot_out_mask_mchbar_offset 0x6af4
+#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region0_mchbar {
+ u32 pad : 4;
+ u32 slice0_asym_base : 11;
+ u32 pad_18_15 : 4;
+ u32 slice0_asym_limit : 11;
+ u32 slice0_asym_channel_select : 1;
+ u32 slice0_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region0_mchbar_port 0x4c
+#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
+#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region1_mchbar {
+ u32 pad : 4;
+ u32 slice1_asym_base : 11;
+ u32 pad_18_15 : 4;
+ u32 slice1_asym_limit : 11;
+ u32 slice1_asym_channel_select : 1;
+ u32 slice1_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region1_mchbar_port 0x4c
+#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
+#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
+
+/* Some bit fields moved in above two structs on Denverton */
+struct b_cr_asym_mem_region_denverton {
+ u32 pad : 4;
+ u32 slice_asym_base : 8;
+ u32 pad_19_12 : 8;
+ u32 slice_asym_limit : 8;
+ u32 pad_28_30 : 3;
+ u32 slice_asym_enable : 1;
+};
+
+struct b_cr_asym_2way_mem_region_mchbar {
+ u32 pad : 2;
+ u32 asym_2way_intlv_mode : 2;
+ u32 asym_2way_base : 11;
+ u32 pad_16_15 : 2;
+ u32 asym_2way_limit : 11;
+ u32 pad_30_28 : 3;
+ u32 asym_2way_interleave_enable : 1;
+};
+
+#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
+#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
+#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
+
+/* Apollo Lake d-unit */
+
+struct d_cr_drp0 {
+ u32 rken0 : 1;
+ u32 rken1 : 1;
+ u32 ddmen : 1;
+ u32 rsvd3 : 1;
+ u32 dwid : 2;
+ u32 dden : 3;
+ u32 rsvd13_9 : 5;
+ u32 rsien : 1;
+ u32 bahen : 1;
+ u32 rsvd18_16 : 3;
+ u32 caswizzle : 2;
+ u32 eccen : 1;
+ u32 dramtype : 3;
+ u32 blmode : 3;
+ u32 addrdec : 2;
+ u32 dramdevice_pr : 2;
+};
+
+#define d_cr_drp0_offset 0x1400
+#define d_cr_drp0_r_opcode 0x00
+
+/* Denverton d-unit */
+
+struct d_cr_dsch {
+ u32 ch0en : 1;
+ u32 ch1en : 1;
+ u32 ddr4en : 1;
+ u32 coldwake : 1;
+ u32 newbypdis : 1;
+ u32 chan_width : 1;
+ u32 rsvd6_6 : 1;
+ u32 ooodis : 1;
+ u32 rsvd18_8 : 11;
+ u32 ic : 1;
+ u32 rsvd31_20 : 12;
+};
+
+#define d_cr_dsch_port 0x16
+#define d_cr_dsch_offset 0x0
+#define d_cr_dsch_r_opcode 0x0
+
+struct d_cr_ecc_ctrl {
+ u32 eccen : 1;
+ u32 rsvd31_1 : 31;
+};
+
+#define d_cr_ecc_ctrl_offset 0x180
+#define d_cr_ecc_ctrl_r_opcode 0x0
+
+struct d_cr_drp {
+ u32 rken0 : 1;
+ u32 rken1 : 1;
+ u32 rken2 : 1;
+ u32 rken3 : 1;
+ u32 dimmdwid0 : 2;
+ u32 dimmdden0 : 2;
+ u32 dimmdwid1 : 2;
+ u32 dimmdden1 : 2;
+ u32 rsvd15_12 : 4;
+ u32 dimmflip : 1;
+ u32 rsvd31_17 : 15;
+};
+
+#define d_cr_drp_offset 0x158
+#define d_cr_drp_r_opcode 0x0
+
+struct d_cr_dmap {
+ u32 ba0 : 5;
+ u32 ba1 : 5;
+ u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
+ u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
+ u32 rs0 : 5;
+ u32 rs1 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap_offset 0x174
+#define d_cr_dmap_r_opcode 0x0
+
+struct d_cr_dmap1 {
+ u32 ca11 : 6;
+ u32 bxor : 1;
+ u32 rsvd : 25;
+};
+
+#define d_cr_dmap1_offset 0xb4
+#define d_cr_dmap1_r_opcode 0x0
+
+struct d_cr_dmap2 {
+ u32 row0 : 5;
+ u32 row1 : 5;
+ u32 row2 : 5;
+ u32 row3 : 5;
+ u32 row4 : 5;
+ u32 row5 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap2_offset 0x148
+#define d_cr_dmap2_r_opcode 0x0
+
+struct d_cr_dmap3 {
+ u32 row6 : 5;
+ u32 row7 : 5;
+ u32 row8 : 5;
+ u32 row9 : 5;
+ u32 row10 : 5;
+ u32 row11 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap3_offset 0x14c
+#define d_cr_dmap3_r_opcode 0x0
+
+struct d_cr_dmap4 {
+ u32 row12 : 5;
+ u32 row13 : 5;
+ u32 row14 : 5;
+ u32 row15 : 5;
+ u32 row16 : 5;
+ u32 row17 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap4_offset 0x150
+#define d_cr_dmap4_r_opcode 0x0
+
+struct d_cr_dmap5 {
+ u32 ca3 : 4;
+ u32 ca4 : 4;
+ u32 ca5 : 4;
+ u32 ca6 : 4;
+ u32 ca7 : 4;
+ u32 ca8 : 4;
+ u32 ca9 : 4;
+ u32 rsvd : 4;
+};
+
+#define d_cr_dmap5_offset 0x154
+#define d_cr_dmap5_r_opcode 0x0
+
+#endif /* _PND2_REGS_H */
reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
if (!reg)
goto chk_iob_axi0;
- dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n");
+ dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
if (reg & IOBPA_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
return 0;
}
}
- pr_err_once("requested map not found.\n");
return -ENOENT;
}
rc = efi_mem_desc_lookup(efi.esrt, &md);
if (rc < 0) {
- pr_err("ESRT header is not in the memory map.\n");
+ pr_warn("ESRT header is not in the memory map.\n");
return;
}
status = __gop_query32(sys_table_arg, gop32, &info, &size,
¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
status = __gop_query64(sys_table_arg, gop64, &info, &size,
¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
goto fail_free_event;
}
+ if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
+ enable_irq_wake(irq);
+
list_add_tail(&event->node, &acpi_gpio->events);
return AE_OK;
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
+ if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
+ disable_irq_wake(event->irq);
+
free_irq(event->irq, event);
desc = event->desc;
if (WARN_ON(IS_ERR(desc)))
}
desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc))
break;
+ if (PTR_ERR(desc) == -EPROBE_DEFER)
+ return ERR_CAST(desc);
}
/* Then from plain _CRS GPIOs */
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
-/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 2
-#define AMDGPU_MMHUB 0
-#define AMDGPU_GFXHUB 1
-
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
-
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
-};
-
-/* provided by the mc block */
-struct amdgpu_mc_funcs {
/* adjust mc addr in fb for APU case */
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
+ uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
/* provided by the ih block */
struct amdgpu_bo_va_mapping {
struct list_head list;
- struct interval_tree_node it;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
uint64_t offset;
uint64_t flags;
};
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
- uint32_t (*get_vm_protection_bits)(void);
};
/*
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
- const struct amdgpu_mc_funcs *mc_funcs;
};
/*
#define WREG32_FIELD(reg, field, val) \
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
+#define WREG32_FIELD15(ip, idx, reg, field, val) \
+ WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
+bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
+static inline bool amdgpu_has_atpx(void) { return false; }
#endif
/*
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
};
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+
+ /* get any igp specific overrides */
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 8:
+ case 9:
+ return igp_info->info_8.ucUMAChannelNumber * 64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id)
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
+ const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
+ chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+ chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
return r;
}
- offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
if (r < 0)
return r;
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ fences_user = (void __user *)(uintptr_t)(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
return (arg & (arg - 1)) == 0;
}
-static void amdgpu_get_block_size(struct amdgpu_device *adev)
-{
- /* from AI, asic starts to support multiple level VMPT */
- if (adev->asic_type >= CHIP_VEGA10) {
- if (amdgpu_vm_block_size != 9)
- dev_warn(adev->dev,
- "Multi-VMPT limits block size to one page!\n");
- amdgpu_vm_block_size = 9;
- return;
- }
+static void amdgpu_check_block_size(struct amdgpu_device *adev)
+{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (amdgpu_vm_block_size == -1) {
-
- /* Total bits covered by PD + PTs */
- unsigned bits = ilog2(amdgpu_vm_size) + 18;
-
- /* Make sure the PD is 4K in size up to 8GB address space.
- Above that split equal between PD and PTs */
- if (amdgpu_vm_size <= 8)
- amdgpu_vm_block_size = bits - 9;
- else
- amdgpu_vm_block_size = (bits + 3) / 2;
+ if (amdgpu_vm_block_size == -1)
+ return;
- } else if (amdgpu_vm_block_size < 9) {
+ if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
if (amdgpu_vm_block_size > 24 ||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
dev_warn(adev->dev, "VM page table size (%d) too large\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
+
+ return;
+
+def_value:
+ amdgpu_vm_block_size = -1;
+}
+
+static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+{
+ if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+ dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ if (amdgpu_vm_size < 1) {
+ dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ /*
+ * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
+ */
+ if (amdgpu_vm_size > 1024) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ return;
+
+def_value:
+ amdgpu_vm_size = -1;
}
/**
}
}
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
+ amdgpu_check_vm_size(adev);
- if (amdgpu_vm_size < 1) {
- dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- amdgpu_get_block_size(adev);
+ amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
}
r = amdgpu_resume(adev);
- if (r)
+ if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
-
+ return r;
+ }
amdgpu_fence_driver_resume(adev);
if (resume) {
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
-int amdgpu_vm_size = 64;
+int amdgpu_vm_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(long)args->value;
+ void __user *out = (void __user *)(uintptr_t)args->value;
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
+ if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ break;
+ }
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
return -EINVAL;
if (!adev->irq.client[client_id].sources) {
- adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
- sizeof(struct amdgpu_irq_src),
- GFP_KERNEL);
+ adev->irq.client[client_id].sources =
+ kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
+ sizeof(struct amdgpu_irq_src *),
+ GFP_KERNEL);
if (!adev->irq.client[client_id].sources)
return -ENOMEM;
}
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx(void);
-#else
-static inline bool amdgpu_has_atpx(void) { return false; }
-#endif
-
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
- void __user *out = (void __user *)(long)info->return_pointer;
+ void __user *out = (void __user *)(uintptr_t)info->return_pointer;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
#include <drm/drmP.h>
#include <drm/drm.h>
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- unsigned lpfn = 0;
-
- /* This forces a reallocation if the flag wasn't set before */
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
- places[c].lpfn = lpfn;
+ places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
+
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
c++;
}
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* A shared bo cannot be migrated to VRAM */
+ if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
+ return -EINVAL;
+
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
/* TODO: figure out how to map scattered VRAM to the CPU */
- if ((offset + size) <= adev->mc.visible_vram_size &&
- (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
return -EINVAL;
/* hurrah the memory is not visible ! */
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
- };
+ }
amdgpu_bo_free_kernel(&cmd_buf_bo,
&cmd_buf_mc_addr,
TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
TP_fast_assign(
__entry->bo = bo_va->bo;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
),
TP_fast_assign(
- __entry->soffset = mapping->it.start;
- __entry->eoffset = mapping->it.last + 1;
+ __entry->soffset = mapping->start;
+ __entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
- if (mem->start == AMDGPU_BO_INVALID_OFFSET)
- return -EINVAL;
-
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = adev->mc.aper_base;
mem->bus.is_iomem = true;
-#ifdef __alpha__
- /*
- * Alpha: use bus.addr to hold the ioremap() return,
- * so we can modify bus.base below.
- */
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- else
- mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- if (!mem->bus.addr)
- return -ENOMEM;
-
- /*
- * Alpha: Use just the bus offset plus
- * the hose/domain memory base for bus.base.
- * It then can be used to build PTEs for VRAM
- * access, as done in ttm_bo_vm_fault().
- */
- mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
- adev->ddev->hose->dense_mem_base;
-#endif
break;
default:
return -EINVAL;
{
}
+static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct drm_mm_node *mm = bo->mem.mm_node;
+ uint64_t size = mm->size;
+ uint64_t offset = page_offset;
+
+ page_offset = do_div(offset, size);
+ mm += offset;
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+}
+
/*
* TTM backend functions.
*/
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
+ .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
start = amdgpu_bo_gpu_offset(bo);
- end = (mapping->it.last + 1 - mapping->it.start);
+ end = (mapping->last + 1 - mapping->start);
end = end * AMDGPU_GPU_PAGE_SIZE + start;
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
}
if ((addr + (uint64_t)size) >
- ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
* Jerome Glisse
*/
#include <linux/dma-fence-array.h>
+#include <linux/interval_tree_generic.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
* SI supports 16.
*/
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
+ START, LAST, static, amdgpu_vm_it)
+
+#undef START
+#undef LAST
+
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
if (level == 0)
/* For the root directory */
return adev->vm_manager.max_pfn >>
- (amdgpu_vm_block_size * adev->vm_manager.num_level);
+ (adev->vm_manager.block_size *
+ adev->vm_manager.num_level);
else if (level == adev->vm_manager.num_level)
/* For the page tables on the leaves */
- return AMDGPU_VM_PTE_COUNT;
+ return AMDGPU_VM_PTE_COUNT(adev);
else
/* Everything in between */
- return 1 << amdgpu_vm_block_size;
+ return 1 << adev->vm_manager.block_size;
}
/**
unsigned level)
{
unsigned shift = (adev->vm_manager.num_level - level) *
- amdgpu_vm_block_size;
+ adev->vm_manager.block_size;
unsigned pt_idx, from, to;
int r;
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
}
-static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
+/**
+ * amdgpu_vm_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
{
return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter) ? true : false;
+ atomic_read(&adev->gpu_reset_counter);
}
/**
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
- if (amdgpu_vm_is_gpu_reset(adev, id))
+ if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
if (atomic64_read(&id->owner) != vm->client_id)
if (r)
goto error;
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
if (r)
goto error;
- dma_fence_put(id->first);
- id->first = dma_fence_get(fence);
-
dma_fence_put(id->last_flush);
id->last_flush = NULL;
{
u64 addr = mc_addr;
- if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
- addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
+ if (adev->gart.gart_funcs->adjust_mc_addr)
+ addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
return addr;
}
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
+ bool vm_flush_needed = job->vm_needs_flush ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring);
+ unsigned patch_offset = 0;
int r;
- if (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_is_gpu_reset(adev, id) ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)) {
- unsigned patch_offset = 0;
+ if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ gds_switch_needed = true;
+ vm_flush_needed = true;
+ }
- if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ if (!vm_flush_needed && !gds_switch_needed)
+ return 0;
- if (ring->funcs->emit_pipeline_sync &&
- (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)))
- amdgpu_ring_emit_pipeline_sync(ring);
+ if (ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
- amdgpu_vm_is_gpu_reset(adev, id))) {
- struct dma_fence *fence;
- u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ if (ring->funcs->emit_pipeline_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
- trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+ if (ring->funcs->emit_vm_flush && vm_flush_needed) {
+ u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ struct dma_fence *fence;
- r = amdgpu_fence_emit(ring, &fence);
- if (r)
- return r;
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
- mutex_lock(&adev->vm_manager.lock);
- dma_fence_put(id->last_flush);
- id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
- }
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
- }
+ mutex_lock(&adev->vm_manager.lock);
+ dma_fence_put(id->last_flush);
+ id->last_flush = fence;
+ mutex_unlock(&adev->vm_manager.lock);
+ }
- if (ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ if (gds_switch_needed) {
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+ }
- /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
- if (ring->funcs->emit_switch_buffer) {
- amdgpu_ring_emit_switch_buffer(ring);
- amdgpu_ring_emit_switch_buffer(ring);
- }
+ if (ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+ /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+ if (ring->funcs->emit_switch_buffer) {
+ amdgpu_ring_emit_switch_buffer(ring);
+ amdgpu_ring_emit_switch_buffer(ring);
}
return 0;
}
unsigned idx, level = p->adev->vm_manager.num_level;
while (entry->entries) {
- idx = addr >> (amdgpu_vm_block_size * level--);
+ idx = addr >> (p->adev->vm_manager.block_size * level--);
idx %= amdgpu_bo_size(entry->bo) / 8;
entry = &entry->entries[idx];
}
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+ struct amdgpu_device *adev = params->adev;
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
* reserve space for one command every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
*/
- ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
+ ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
/* padding, etc. */
ndw = 64;
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
}
addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_entries - 1);
+ last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
}
start = last + 1;
- } while (unlikely(start != mapping->it.last + 1));
+ } while (unlikely(start != mapping->last + 1));
return 0;
}
if (fence)
dma_fence_wait(fence, false);
- amdgpu_vm_prt_put(cb->adev);
+ amdgpu_vm_prt_put(adev);
} else {
cb->adev = adev;
if (!fence || dma_fence_add_callback(fence, &cb->cb,
uint64_t saddr, uint64_t offset,
uint64_t size, uint64_t flags)
{
- struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm;
- struct interval_tree_node *it;
uint64_t eaddr;
/* validate the parameters */
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- if (it) {
- struct amdgpu_bo_va_mapping *tmp;
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
- tmp->it.start, tmp->it.last + 1);
+ "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ tmp->start, tmp->last + 1);
return -EINVAL;
}
return -ENOMEM;
INIT_LIST_HEAD(&mapping->list);
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
saddr /= AMDGPU_GPU_PAGE_SIZE;
list_for_each_entry(mapping, &bo_va->valids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
valid = false;
list_for_each_entry(mapping, &bo_va->invalids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
}
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
uint64_t saddr, uint64_t size)
{
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
- struct interval_tree_node *it;
LIST_HEAD(removed);
uint64_t eaddr;
INIT_LIST_HEAD(&after->list);
/* Now gather all removed mappings */
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- while (it) {
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
- it = interval_tree_iter_next(it, saddr, eaddr);
-
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ while (tmp) {
/* Remember mapping split at the start */
- if (tmp->it.start < saddr) {
- before->it.start = tmp->it.start;
- before->it.last = saddr - 1;
+ if (tmp->start < saddr) {
+ before->start = tmp->start;
+ before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
list_add(&before->list, &tmp->list);
}
/* Remember mapping split at the end */
- if (tmp->it.last > eaddr) {
- after->it.start = eaddr + 1;
- after->it.last = tmp->it.last;
+ if (tmp->last > eaddr) {
+ after->start = eaddr + 1;
+ after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->it.start - tmp->it.start;
+ after->offset += after->start - tmp->start;
after->flags = tmp->flags;
list_add(&after->list, &tmp->list);
}
list_del(&tmp->list);
list_add(&tmp->list, &removed);
+
+ tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
}
/* And free them up */
list_for_each_entry_safe(tmp, next, &removed, list) {
- interval_tree_remove(&tmp->it, &vm->va);
+ amdgpu_vm_it_remove(tmp, &vm->va);
list_del(&tmp->list);
- if (tmp->it.start < saddr)
- tmp->it.start = saddr;
- if (tmp->it.last > eaddr)
- tmp->it.last = eaddr;
+ if (tmp->start < saddr)
+ tmp->start = saddr;
+ if (tmp->last > eaddr)
+ tmp->last = eaddr;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
- interval_tree_insert(&before->it, &vm->va);
+ amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
- interval_tree_insert(&after->it, &vm->va);
+ amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
amdgpu_vm_free_mapping(adev, vm, mapping,
bo_va->last_pt_update);
}
}
}
+static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+{
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(vm_size) + 18;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (vm_size <= 8)
+ return (bits - 9);
+ else
+ return ((bits + 3) / 2);
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size and block size
+ *
+ * @adev: amdgpu_device pointer
+ * @vm_size: the default vm size if it's set auto
+ */
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+{
+ /* adjust vm size firstly */
+ if (amdgpu_vm_size == -1)
+ adev->vm_manager.vm_size = vm_size;
+ else
+ adev->vm_manager.vm_size = amdgpu_vm_size;
+
+ /* block size depends on vm size */
+ if (amdgpu_vm_block_size == -1)
+ adev->vm_manager.block_size =
+ amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
+ else
+ adev->vm_manager.block_size = amdgpu_vm_block_size;
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size);
+}
+
/**
* amdgpu_vm_init - initialize a vm instance
*
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
- AMDGPU_VM_PTE_COUNT * 8);
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
+ rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* max number of VMHUB */
+#define AMDGPU_MAX_VMHUBS 2
+#define AMDGPU_GFXHUB 0
+#define AMDGPU_MMHUB 1
+
+/* hardcode that limit for now */
+#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
struct amdgpu_vm_id {
struct list_head list;
- struct dma_fence *first;
struct amdgpu_sync active;
struct dma_fence *last_flush;
atomic64_t owner;
uint64_t max_pfn;
uint32_t num_level;
+ uint64_t vm_size;
+ uint32_t block_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
#endif
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
if (!lpfn)
lpfn = man->size;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
- place->lpfn || amdgpu_vram_page_split == -1) {
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
+ mem->start = 0;
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
+ unsigned long start;
if (pages == pages_per_node)
alignment = pages_per_node;
if (unlikely(r))
goto error;
+ /* Calculate a virtual BO start address to easily check if
+ * everything is CPU accessible.
+ */
+ start = nodes[i].start + nodes[i].size;
+ if (start > mem->num_pages)
+ start -= mem->num_pages;
+ else
+ start = 0;
+ mem->start = max(mem->start, start);
pages_left -= pages;
}
spin_unlock(&mgr->lock);
- mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
}
static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int i;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
}
static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int i;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
}
static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int i;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
}
static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int i;
}
static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t size)
+ u16 *green, u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int i;
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.kiq.ring.ready = false;
}
udelay(50);
}
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
-
- if (ring->use_doorbell)
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 0);
+ tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
+ CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN,
+ ring->use_doorbell ? 1 : 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+ WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
- if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
- if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
/* activate the queue */
WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(mmCP_PQ_STATUS);
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(mmCP_PQ_STATUS, tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
{
int i;
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
- u32 tmp;
- tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
- tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
- DEQUEUE_REQ, 2);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
}
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_pre_soft_reset(void *handle)
static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
WREG32(mmCP_HQD_PQ_RPTR, 0);
WREG32(mmCP_HQD_PQ_WPTR, 0);
vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_post_soft_reset(void *handle)
unsigned int type,
enum amdgpu_interrupt_state state)
{
- uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ);
- if (ring->me == 1)
- target = mmCP_ME1_PIPE0_INT_CNTL;
- else
- target = mmCP_ME2_PIPE0_INT_CNTL;
- target += ring->pipe;
-
switch (type) {
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
- if (state == AMDGPU_IRQ_STATE_DISABLE) {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(target, tmp);
- } else {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(target, tmp);
- }
+ WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ if (ring->me == 1)
+ WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ else
+ WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
break;
default:
BUG(); /* kiq only support GENERIC2_INT now */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
u32 tmp;
int i;
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL));
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
int i;
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
- if (enable) {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
+ if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
mqd->cp_hqd_eop_base_addr_lo);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
return -ETIMEDOUT;
}
-static void gfx_v9_0_print_status(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GFX 9.x registers\n");
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT)));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)));
-
- for (i = 0; i < 32; i++) {
- dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4));
- }
- for (i = 0; i < 16; i++) {
- dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4));
- }
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- dev_info(adev->dev, " se: %d\n", i);
- gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG)));
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1)));
- }
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)));
-
- dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS)));
- dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1)));
- dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL)));
- dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)));
- dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG)));
- dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)));
- dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3)));
- dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1)));
- dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE)));
- dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES)));
- dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL)));
- dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS)));
- dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION)));
- dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE)));
- dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE)));
- dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE)));
- dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE)));
-
- dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)));
- dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT)));
- dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID)));
-
- dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER)));
-
- dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY)));
- dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE)));
- dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI)));
- dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL)));
-
- dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR)));
- dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK)));
-
- dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)));
- dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)));
- dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT)));
- dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX)));
- dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK)));
- dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL)));
-
- dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6)));
- dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12)));
- dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3)));
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
- dev_info(adev->dev, " VM %d:\n", i);
- dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)));
- dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES)));
- }
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
static int gfx_v9_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- if (grbm_soft_reset ) {
- gfx_v9_0_print_status((void *)adev);
+ if (grbm_soft_reset) {
/* stop the rlc */
gfx_v9_0_rlc_stop(adev);
/* Wait a little for things to settle down */
udelay(50);
- gfx_v9_0_print_status((void *)adev);
}
return 0;
}
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
default:
break;
}
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
ring->pipe,
ring->queue, 0);
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
/* write the EOP addr */
BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
amdgpu_bo_kunmap(ring->mqd_obj);
amdgpu_bo_unreserve(ring->mqd_obj);
- if (use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int gfxhub_v1_0_early_init(void *handle)
{
return 0;
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
-
return 0;
}
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ ((adev->vm_manager.block_size - 9)
+ << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v6_0_set_fault_enable_default(adev, false);
else
if (r)
return r;
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
#include "vid.h"
#include "vi.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v8_0_set_fault_enable_default(adev, false);
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i;
+ bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB];
- struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
uint32_t status = 0;
u64 addr;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (!amdgpu_sriov_vf(adev)) {
- if (entry->vm_id_src) {
- status = RREG32(mmhub->vm_l2_pro_fault_status);
- WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
- } else {
- status = RREG32(gfxhub->vm_l2_pro_fault_status);
- WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
- }
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vm_id*/
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = hub->get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
return pte_flag;
}
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
-};
-
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
-{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
-}
-
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
}
-static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
+static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
+ .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
+ .get_invalidate_req = gmc_v9_0_get_invalidate_req,
};
-static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
{
- adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
}
static int gmc_v9_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev);
- gmc_v9_0_set_mc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
return 0;
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 3;
+
+ /* TODO: fix num_level for APU when updating vm size and block size */
+ if (adev->flags & AMD_IS_APU)
+ adev->vm_manager.num_level = 1;
+ else
+ adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ amdgpu_vm_adjust_size(adev, 64);
} else {
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ /*
+ * To fulfill 4-level page support,
+ * vm size is 256TB (48bit), maximum size of Vega10,
+ * block size 512 (9bit)
+ */
+ adev->vm_manager.vm_size = 1U << 18;
+ adev->vm_manager.block_size = 9;
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size,
+ adev->vm_manager.block_size);
}
/* This interrupt is VMC page fault.*/
if (r)
return r;
- /* Because of four level VMPTs, vm size is at least 512GB.
- * The maximum size is 256TB (48bit).
- */
- if (amdgpu_vm_size < 512) {
- DRM_WARN("VM size is at least 512GB!\n");
- amdgpu_vm_size = 512;
- }
- adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int mmhub_v1_0_early_init(void *handle)
{
return 0;
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
-
return 0;
}
#include "vega10/GC/gc_9_0_offset.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
#include "soc15.h"
+#include "vega10_ih.h"
#include "soc15_common.h"
#include "mxgpu_ai.h"
return r;
}
-static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
if (req == IDH_REQ_GPU_INIT_ACCESS ||
req == IDH_REQ_GPU_FINI_ACCESS ||
req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r)
return r;
}
return 0;
}
+static int xgpu_ai_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
return r;
}
+static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ /* wait until RCV_MSG become 3 */
+ if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
+ pr_err("failed to recieve FLR_CMPL\n");
+ return;
+ }
+
+ /* Trigger recovery due to world switch failure */
+ amdgpu_sriov_gpu_reset(adev, false);
+}
+
+static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int r;
+
+ /* see what event we get */
+ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+
+ /* only handle FLR_NOTIFY now */
+ if (!r)
+ schedule_work(&adev->virt.flr_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_ack_irq,
+ .process = xgpu_ai_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_rcv_irq,
+ .process = xgpu_ai_mailbox_rcv_irq,
+};
+
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
+ .reset_gpu = xgpu_ai_request_reset,
};
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 150000
+#define AI_MAILBOX_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
+
#endif
ucode_size = ucode->ucode_size;
ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
+ while (ucode_size) {
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
if (*ucode_mem != fw_sram_reg_val)
bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- uint32_t reg, reg_val;
+ uint32_t reg;
- reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000;
- WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val);
+ reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
+ WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
- if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
+ return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
+ amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
+ xgpu_ai_mailbox_set_irq_funcs(adev);
}
/*
return 0;
}
+static int soc15_common_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_get_irq(adev);
+
+ return 0;
+}
+
static int soc15_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_add_irq_id(adev);
+
return 0;
}
/* disable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, false);
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_put_irq(adev);
return 0;
}
const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
- .late_init = NULL,
+ .late_init = soc15_common_late_init,
.sw_init = soc15_common_sw_init,
.sw_fini = soc15_common_sw_fini,
.hw_init = soc15_common_hw_init,
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
+
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v4_2_resume(void *handle)
if (r)
return r;
- r = uvd_v4_2_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v4_2_hw_init(adev);
}
/**
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
return r;
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v5_0_resume(void *handle)
if (r)
return r;
- r = uvd_v5_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v5_0_hw_init(adev);
}
/**
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
if (r)
return r;
}
- r = uvd_v6_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v6_0_hw_init(adev);
}
/**
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
if (r)
return r;
}
- r = uvd_v7_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v7_0_hw_init(adev);
}
/**
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
unsigned eng = ring->idx;
unsigned i;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v2_0_hw_init(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v2_0_resume(void *handle)
if (r)
return r;
- r = vce_v2_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v2_0_hw_init(adev);
}
static int vce_v2_0_soft_reset(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v3_0_hw_init(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v3_0_resume(void *handle)
if (r)
return r;
- r = vce_v3_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v3_0_hw_init(adev);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v4_0_hw_init(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v4_0_resume(void *handle)
if (r)
return r;
- r = vce_v4_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v4_0_hw_init(adev);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_SET_RESOURCES 0xA0
+/* 1. header
+ * 2. CONTROL
+ * 3. QUEUE_MASK_LO [31:0]
+ * 4. QUEUE_MASK_HI [31:0]
+ * 5. GWS_MASK_LO [31:0]
+ * 6. GWS_MASK_HI [31:0]
+ * 7. OAC_MASK [15:0]
+ * 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
+ */
+# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
+# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
+# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
#define PACKET3_MAP_QUEUES 0xA2
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. MQD_ADDR_LO [31:0]
+ * 5. MQD_ADDR_HI [31:0]
+ * 6. WPTR_ADDR_LO [31:0]
+ * 7. WPTR_ADDR_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
+# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
+# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
+# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2 */
+# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
+# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29)
+# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31)
+#define PACKET3_UNMAP_QUEUES 0xA3
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. CONTROL3
+ * 5. CONTROL4
+ * 6. CONTROL5
+ */
+/* CONTROL */
+# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
+ /* 0 - PREEMPT_QUEUES
+ * 1 - RESET_QUEUES
+ * 2 - DISABLE_PROCESS_QUEUES
+ * 3 - PREEMPT_QUEUES_NO_UNMAP
+ */
+# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2a */
+# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
+/* CONTROL3a */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
+/* CONTROL3b */
+# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
+/* CONTROL4 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
+/* CONTROL5 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
+#define PACKET3_QUERY_STATUS 0xA4
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. ADDR_LO [31:0]
+ * 5. ADDR_HI [31:0]
+ * 6. DATA_LO [31:0]
+ * 7. DATA_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
+# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
+# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
+/* CONTROL2a */
+# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
{
enum amd_pm_state_type ps;
- if (input == NULL)
- return -EINVAL;
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
ps = *(unsigned long *)input;
data.requested_ui_label = power_state_convert(ps);
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
}
mutex_unlock(&pp_handle->pp_lock);
mutex_lock(&pp_handle->pp_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_lock(&pp_handle->pp_lock);
+ mutex_unlock(&pp_handle->pp_lock);
return ret;
}
USHORT usFanStartTemperature;
} ATOM_Vega10_Fan_Table;
+typedef struct _ATOM_Vega10_Fan_Table_V2 {
+ UCHAR ucRevId;
+ USHORT usFanOutputSensitivity;
+ USHORT usFanAcousticLimitRpm;
+ USHORT usThrottlingRPM;
+ USHORT usTargetTemperature;
+ USHORT usMinimumPWMLimit;
+ USHORT usTargetGfxClk;
+ USHORT usFanGainEdge;
+ USHORT usFanGainHotspot;
+ USHORT usFanGainLiquid;
+ USHORT usFanGainVrVddc;
+ USHORT usFanGainVrMvdd;
+ USHORT usFanGainPlx;
+ USHORT usFanGainHbm;
+ UCHAR ucEnableZeroRPM;
+ USHORT usFanStopTemperature;
+ USHORT usFanStartTemperature;
+ UCHAR ucFanParameters;
+ UCHAR ucFanMinRPM;
+ UCHAR ucFanMaxRPM;
+} ATOM_Vega10_Fan_Table_V2;
+
typedef struct _ATOM_Vega10_Thermal_Controller {
UCHAR ucRevId;
UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table;
+typedef struct _ATOM_Vega10_PowerTune_Table_V2
+{
+ UCHAR ucRevId;
+ USHORT usSocketPowerLimit;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usTdcLimit;
+ USHORT usEdcLimit;
+ USHORT usSoftwareShutdownTemp;
+ USHORT usTemperatureLimitHotSpot;
+ USHORT usTemperatureLimitLiquid1;
+ USHORT usTemperatureLimitLiquid2;
+ USHORT usTemperatureLimitHBM;
+ USHORT usTemperatureLimitVrSoc;
+ USHORT usTemperatureLimitVrMem;
+ USHORT usTemperatureLimitPlx;
+ USHORT usLoadLineResistance;
+ UCHAR ucLiquid1_I2C_address;
+ UCHAR ucLiquid2_I2C_address;
+ UCHAR ucLiquid_I2C_Line;
+ UCHAR ucVr_I2C_address;
+ UCHAR ucVr_I2C_Line;
+ UCHAR ucPlx_I2C_address;
+ UCHAR ucPlx_I2C_Line;
+ USHORT usTemperatureLimitTedge;
+} ATOM_Vega10_PowerTune_Table_V2;
+
typedef struct _ATOM_Vega10_Hard_Limit_Record {
ULONG ulSOCCLKLimit;
ULONG ulGFXCLKLimit;
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
const ATOM_Vega10_Thermal_Controller *thermal_controller;
- const ATOM_Vega10_Fan_Table *fan_table;
+ const Vega10_PPTable_Generic_SubTable_Header *header;
+ const ATOM_Vega10_Fan_Table *fan_table_v1;
+ const ATOM_Vega10_Fan_Table_V2 *fan_table_v2;
thermal_controller = (ATOM_Vega10_Thermal_Controller *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usThermalControllerOffset));
PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0),
- "Thermal controller table not set!", return -1);
+ "Thermal controller table not set!", return -EINVAL);
hwmgr->thermal_controller.ucType = thermal_controller->ucType;
hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
hwmgr->thermal_controller.fanInfo.ulMaxRPM =
thermal_controller->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
+ = 100000;
+
set_hw_cap(
hwmgr,
ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
if (!powerplay_table->usFanTableOffset)
return 0;
- fan_table = (const ATOM_Vega10_Fan_Table *)
+ header = (const Vega10_PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usFanTableOffset));
- PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8),
- "Invalid Input Fan Table!", return -1);
+ if (header->ucRevId == 10) {
+ fan_table_v1 = (ATOM_Vega10_Fan_Table *)header;
- hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
- = 100000;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- le16_to_cpu(fan_table->usFanOutputSensitivity);
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- le16_to_cpu(fan_table->usFanRPMMax);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
- le16_to_cpu(fan_table->usThrottlingRPM);
- hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
- le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit));
- hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
- le16_to_cpu(fan_table->usTargetTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
- le16_to_cpu(fan_table->usMinimumPWMLimit);
- hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
- le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk));
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
- le16_to_cpu(fan_table->usFanGainEdge);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
- le16_to_cpu(fan_table->usFanGainHotspot);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
- le16_to_cpu(fan_table->usFanGainLiquid);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
- le16_to_cpu(fan_table->usFanGainVrVddc);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
- le16_to_cpu(fan_table->usFanGainVrMvdd);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
- le16_to_cpu(fan_table->usFanGainPlx);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
- le16_to_cpu(fan_table->usFanGainHbm);
-
- hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
- fan_table->ucEnableZeroRPM;
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
- le16_to_cpu(fan_table->usFanStopTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
- le16_to_cpu(fan_table->usFanStartTemperature);
+ PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8),
+ "Invalid Input Fan Table!", return -EINVAL);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v1->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ le16_to_cpu(fan_table_v1->usFanRPMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v1->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v1->usFanAcousticLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v1->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v1->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v1->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v1->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v1->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v1->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v1->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v1->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v1->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v1->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v1->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v1->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v1->usFanStartTemperature);
+ } else if (header->ucRevId > 10) {
+ fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header;
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v2->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ fan_table_v2->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v2->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v2->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v2->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v2->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v2->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v2->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v2->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v2->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v2->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v2->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v2->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v2->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v2->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v2->usFanStartTemperature);
+ }
return 0;
}
return 0;
}
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
+{
+ switch(line){
+ case Vega10_I2CLineID_DDC1:
+ *scl = Vega10_I2C_DDC1CLK;
+ *sda = Vega10_I2C_DDC1DATA;
+ break;
+ case Vega10_I2CLineID_DDC2:
+ *scl = Vega10_I2C_DDC2CLK;
+ *sda = Vega10_I2C_DDC2DATA;
+ break;
+ case Vega10_I2CLineID_DDC3:
+ *scl = Vega10_I2C_DDC3CLK;
+ *sda = Vega10_I2C_DDC3DATA;
+ break;
+ case Vega10_I2CLineID_DDC4:
+ *scl = Vega10_I2C_DDC4CLK;
+ *sda = Vega10_I2C_DDC4DATA;
+ break;
+ case Vega10_I2CLineID_DDC5:
+ *scl = Vega10_I2C_DDC5CLK;
+ *sda = Vega10_I2C_DDC5DATA;
+ break;
+ case Vega10_I2CLineID_DDC6:
+ *scl = Vega10_I2C_DDC6CLK;
+ *sda = Vega10_I2C_DDC6DATA;
+ break;
+ case Vega10_I2CLineID_SCLSDA:
+ *scl = Vega10_I2C_SCL;
+ *sda = Vega10_I2C_SDA;
+ break;
+ case Vega10_I2CLineID_DDCVGA:
+ *scl = Vega10_I2C_DDCVGACLK;
+ *sda = Vega10_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
static int get_tdp_table(
struct pp_hwmgr *hwmgr,
struct phm_tdp_table **info_tdp_table,
{
uint32_t table_size;
struct phm_tdp_table *tdp_table;
-
- const ATOM_Vega10_PowerTune_Table *power_tune_table =
- (ATOM_Vega10_PowerTune_Table *)table;
-
- table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
- hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *)
- kzalloc(table_size, GFP_KERNEL);
-
- if (!hwmgr->dyn_state.cac_dtp_table)
- return -ENOMEM;
+ uint8_t scl;
+ uint8_t sda;
+ const ATOM_Vega10_PowerTune_Table *power_tune_table;
+ const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2;
table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table);
+
tdp_table = kzalloc(table_size, GFP_KERNEL);
- if (!tdp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ if (!tdp_table)
return -ENOMEM;
- }
- tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
- tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
- tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
- tdp_table->usSoftwareShutdownTemp =
- le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
- tdp_table->usTemperatureLimitTedge =
- le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
- tdp_table->usTemperatureLimitHotspot =
- le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
- tdp_table->usTemperatureLimitLiquid1 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
- tdp_table->usTemperatureLimitLiquid2 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
- tdp_table->usTemperatureLimitHBM =
- le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
- tdp_table->usTemperatureLimitVrVddc =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
- tdp_table->usTemperatureLimitVrMvdd =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
- tdp_table->usTemperatureLimitPlx =
- le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
- tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
- tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
- tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
- tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
- tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
- tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
- tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
- tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
- tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
- tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
-
- hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ if (table->ucRevId == 5) {
+ power_tune_table = (ATOM_Vega10_PowerTune_Table *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
+ tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
+ tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
+ tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
+ tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
+ tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
+ tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
+ tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
+ tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
+ hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ } else {
+ power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda);
+
+ tdp_table->ucLiquid_I2C_Line = scl;
+ tdp_table->ucLiquid_I2C_LineSDA = sda;
+
+ tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda);
+
+ tdp_table->ucVr_I2C_Line = scl;
+ tdp_table->ucVr_I2C_LineSDA = sda;
+ tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda);
+
+ tdp_table->ucPlx_I2C_Line = scl;
+ tdp_table->ucPlx_I2C_LineSDA = sda;
+
+ hwmgr->platform_descriptor.LoadLineSlope =
+ power_tune_table_v2->usLoadLineResistance;
+ }
*info_tdp_table = tdp_table;
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddc_lookup_table, vddc_table, 16);
+ &pp_table_info->vddc_lookup_table, vddc_table, 8);
}
if (powerplay_table->usVddmemLookupTableOffset) {
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddmemLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16);
+ &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4);
}
if (powerplay_table->usVddciLookupTableOffset) {
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddciLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddci_lookup_table, vddci_table, 16);
+ &pp_table_info->vddci_lookup_table, vddci_table, 4);
}
return result;
#include "hwmgr.h"
+enum Vega10_I2CLineID {
+ Vega10_I2CLineID_DDC1 = 0x90,
+ Vega10_I2CLineID_DDC2 = 0x91,
+ Vega10_I2CLineID_DDC3 = 0x92,
+ Vega10_I2CLineID_DDC4 = 0x93,
+ Vega10_I2CLineID_DDC5 = 0x94,
+ Vega10_I2CLineID_DDC6 = 0x95,
+ Vega10_I2CLineID_SCLSDA = 0x96,
+ Vega10_I2CLineID_DDCVGA = 0x97
+};
+
+#define Vega10_I2C_DDC1DATA 0
+#define Vega10_I2C_DDC1CLK 1
+#define Vega10_I2C_DDC2DATA 2
+#define Vega10_I2C_DDC2CLK 3
+#define Vega10_I2C_DDC3DATA 4
+#define Vega10_I2C_DDC3CLK 5
+#define Vega10_I2C_SDA 40
+#define Vega10_I2C_SCL 41
+#define Vega10_I2C_DDC4DATA 65
+#define Vega10_I2C_DDC4CLK 66
+#define Vega10_I2C_DDC5DATA 0x48
+#define Vega10_I2C_DDC5CLK 0x49
+#define Vega10_I2C_DDC6DATA 0x4a
+#define Vega10_I2C_DDC6CLK 0x4b
+#define Vega10_I2C_DDCVGADATA 0x4c
+#define Vega10_I2C_DDCVGACLK 0x4d
+
extern const struct pp_table_func vega10_pptable_funcs;
extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
-#define SMU9_DRIVER_IF_VERSION 0xa
+#define SMU9_DRIVER_IF_VERSION 0xB
+
+#define PPTABLE_V10_SMU_VERSION 1
#define NUM_GFXCLK_DPM_LEVELS 8
#define NUM_UVD_DPM_LEVELS 8
int32_t a0;
int32_t a1;
int32_t a2;
+
+ uint8_t a0_shift;
+ uint8_t a1_shift;
+ uint8_t a2_shift;
+ uint8_t padding;
} GbVdroopTable_t;
typedef struct {
uint16_t Platform_sigma;
uint16_t PSM_Age_CompFactor;
- uint32_t Reserved[20];
+ uint32_t DpmLevelPowerDelta;
+
+ uint32_t Reserved[19];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
typedef struct {
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
- uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */
- uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
uint32_t MmHubPadding[7]; /* SMU internal use */
} AvfsDebugTable_t;
#define UCLK_SWITCH_SLOW 0
#define UCLK_SWITCH_FAST 1
+/* GFX DIDT Configuration */
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
#endif
static int hdlcd_probe(struct platform_device *pdev)
{
- struct device_node *port, *ep;
+ struct device_node *port;
struct component_match *match = NULL;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
/* there is only one output port inside each device, find it */
- ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
- if (!ep)
- return -ENODEV;
-
- if (!of_device_is_available(ep)) {
- of_node_put(ep);
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
return -ENODEV;
- }
-
- /* add the remote encoder port as component */
- port = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- if (!port || !of_device_is_available(port)) {
- of_node_put(port);
- return -EAGAIN;
- }
drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
of_node_put(port);
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <video/videomode.h>
#include "malidp_drv.h"
long rate, req_rate = mode->crtc_clock * 1000;
if (req_rate) {
- rate = clk_round_rate(hwdev->mclk, req_rate);
- if (rate < req_rate) {
- DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
- mode->crtc_clock);
- return false;
- }
-
rate = clk_round_rate(hwdev->pxlclk, req_rate);
if (rate != req_rate) {
DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct videomode vm;
+ int err = pm_runtime_get_sync(crtc->dev->dev);
- drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
+ if (err < 0) {
+ DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
+ return;
+ }
+ drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
clk_prepare_enable(hwdev->pxlclk);
/* We rely on firmware to set mclk to a sensible level. */
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
+ int err;
drm_crtc_vblank_off(crtc);
hwdev->enter_config_mode(hwdev);
clk_disable_unprepare(hwdev->pxlclk);
+
+ err = pm_runtime_put(crtc->dev->dev);
+ if (err < 0) {
+ DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
+ }
+}
+
+static const struct gamma_curve_segment {
+ u16 start;
+ u16 end;
+} segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
+ /* sector 0 */
+ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
+ { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
+ { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
+ { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
+ /* sector 1 */
+ { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
+ /* sector 2 */
+ { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
+ /* sector 3 */
+ { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
+ /* sector 4 */
+ { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
+ /* sector 5 */
+ { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
+ /* sector 6 */
+ { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
+ { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
+ { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
+ { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
+ { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
+ { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
+ { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
+};
+
+#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
+
+static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
+ u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
+{
+ struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
+ int i;
+
+ for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
+ u32 a, b, delta_in, out_start, out_end;
+
+ delta_in = segments[i].end - segments[i].start;
+ /* DP has 12-bit internal precision for its LUTs. */
+ out_start = drm_color_lut_extract(lut[segments[i].start].green,
+ 12);
+ out_end = drm_color_lut_extract(lut[segments[i].end].green, 12);
+ a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
+ b = out_start;
+ coeffs[i] = DE_COEFTAB_DATA(a, b);
+ }
+}
+
+/*
+ * Check if there is a new gamma LUT and if it is of an acceptable size. Also,
+ * reject any LUTs that use distinct red, green, and blue curves.
+ */
+static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
+ struct drm_color_lut *lut;
+ size_t lut_size;
+ int i;
+
+ if (!state->color_mgmt_changed || !state->gamma_lut)
+ return 0;
+
+ if (crtc->state->gamma_lut &&
+ (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
+ return 0;
+
+ if (state->gamma_lut->length % sizeof(struct drm_color_lut))
+ return -EINVAL;
+
+ lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
+ if (lut_size != MALIDP_GAMMA_LUT_SIZE)
+ return -EINVAL;
+
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ for (i = 0; i < lut_size; ++i)
+ if (!((lut[i].red == lut[i].green) &&
+ (lut[i].red == lut[i].blue)))
+ return -EINVAL;
+
+ if (!state->mode_changed) {
+ int ret;
+
+ state->mode_changed = true;
+ /*
+ * Kerneldoc for drm_atomic_helper_check_modeset mandates that
+ * it be invoked when the driver sets ->mode_changed. Since
+ * changing the gamma LUT doesn't depend on any external
+ * resources, it is safe to call it only once.
+ */
+ ret = drm_atomic_helper_check_modeset(crtc->dev, state->state);
+ if (ret)
+ return ret;
+ }
+
+ malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs);
+ return 0;
+}
+
+/*
+ * Check if there is a new CTM and if it contains valid input. Valid here means
+ * that the number is inside the representable range for a Q3.12 number,
+ * excluding truncating the fractional part of the input data.
+ *
+ * The COLORADJ registers can be changed atomically.
+ */
+static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
+ struct drm_color_ctm *ctm;
+ int i;
+
+ if (!state->color_mgmt_changed)
+ return 0;
+
+ if (!state->ctm)
+ return 0;
+
+ if (crtc->state->ctm && (crtc->state->ctm->base.id ==
+ state->ctm->base.id))
+ return 0;
+
+ /*
+ * The size of the ctm is checked in
+ * drm_atomic_replace_property_blob_from_id.
+ */
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+ for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
+ /* Convert from S31.32 to Q3.12. */
+ s64 val = ctm->matrix[i];
+ u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
+ GENMASK_ULL(14, 0);
+
+ /*
+ * Convert to 2s complement and check the destination's top bit
+ * for overflow. NB: Can't check before converting or it'd
+ * incorrectly reject the case:
+ * sign == 1
+ * mag == 0x2000
+ */
+ if (val & BIT_ULL(63))
+ mag = ~mag + 1;
+ if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
+ return -EINVAL;
+ mc->coloradj_coeffs[i] = mag;
+ }
+
+ return 0;
+}
+
+static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
+ struct malidp_se_config *s = &cs->scaler_config;
+ struct drm_plane *plane;
+ struct videomode vm;
+ const struct drm_plane_state *pstate;
+ u32 h_upscale_factor = 0; /* U16.16 */
+ u32 v_upscale_factor = 0; /* U16.16 */
+ u8 scaling = cs->scaled_planes_mask;
+ int ret;
+
+ if (!scaling) {
+ s->scale_enable = false;
+ goto mclk_calc;
+ }
+
+ /* The scaling engine can only handle one plane at a time. */
+ if (scaling & (scaling - 1))
+ return -EINVAL;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ u32 phase;
+
+ if (!(mp->layer->id & scaling))
+ continue;
+
+ /*
+ * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
+ * to get the U16.16 result.
+ */
+ h_upscale_factor = div_u64((u64)pstate->crtc_w << 32,
+ pstate->src_w);
+ v_upscale_factor = div_u64((u64)pstate->crtc_h << 32,
+ pstate->src_h);
+
+ s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
+ (v_upscale_factor >> 16) >= 2);
+
+ s->input_w = pstate->src_w >> 16;
+ s->input_h = pstate->src_h >> 16;
+ s->output_w = pstate->crtc_w;
+ s->output_h = pstate->crtc_h;
+
+#define SE_N_PHASE 4
+#define SE_SHIFT_N_PHASE 12
+ /* Calculate initial_phase and delta_phase for horizontal. */
+ phase = s->input_w;
+ s->h_init_phase =
+ ((phase << SE_N_PHASE) / s->output_w + 1) / 2;
+
+ phase = s->input_w;
+ phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
+ s->h_delta_phase = phase / s->output_w;
+
+ /* Same for vertical. */
+ phase = s->input_h;
+ s->v_init_phase =
+ ((phase << SE_N_PHASE) / s->output_h + 1) / 2;
+
+ phase = s->input_h;
+ phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
+ s->v_delta_phase = phase / s->output_h;
+#undef SE_N_PHASE
+#undef SE_SHIFT_N_PHASE
+ s->plane_src_id = mp->layer->id;
+ }
+
+ s->scale_enable = true;
+ s->hcoeff = malidp_se_select_coeffs(h_upscale_factor);
+ s->vcoeff = malidp_se_select_coeffs(v_upscale_factor);
+
+mclk_calc:
+ drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
+ ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
}
static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_plane_state *pstate;
u32 rot_mem_free, rot_mem_usable;
int rotated_planes = 0;
+ int ret;
/*
* check if there is enough rotation memory available for planes
}
}
- return 0;
+ ret = malidp_crtc_atomic_check_gamma(crtc, state);
+ ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
+ ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
+
+ return ret;
}
static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
.atomic_check = malidp_crtc_atomic_check,
};
+static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct malidp_crtc_state *state, *old_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ old_state = to_malidp_crtc_state(crtc->state);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+ memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
+ sizeof(state->gamma_coeffs));
+ memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
+ sizeof(state->coloradj_coeffs));
+ memcpy(&state->scaler_config, &old_state->scaler_config,
+ sizeof(state->scaler_config));
+ state->scaled_planes_mask = 0;
+
+ return &state->base;
+}
+
+static void malidp_crtc_reset(struct drm_crtc *crtc)
+{
+ struct malidp_crtc_state *state = NULL;
+
+ if (crtc->state) {
+ state = to_malidp_crtc_state(crtc->state);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ }
+
+ kfree(state);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mali_state = NULL;
+
+ if (state) {
+ mali_state = to_malidp_crtc_state(state);
+ __drm_atomic_helper_crtc_destroy_state(state);
+ }
+
+ kfree(mali_state);
+}
+
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = malidp_crtc_reset,
+ .atomic_duplicate_state = malidp_crtc_duplicate_state,
+ .atomic_destroy_state = malidp_crtc_destroy_state,
.enable_vblank = malidp_crtc_enable_vblank,
.disable_vblank = malidp_crtc_disable_vblank,
};
ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
&malidp_crtc_funcs, NULL);
+ if (ret)
+ goto crtc_cleanup_planes;
- if (!ret) {
- drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
- return 0;
- }
+ drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
+ /* No inverse-gamma: it is per-plane. */
+ drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE);
+
+ malidp_se_set_enh_coeffs(malidp->dev);
+
+ return 0;
crtc_cleanup_planes:
malidp_de_planes_destroy(drm);
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#define MALIDP_CONF_VALID_TIMEOUT 250
+static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
+ u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
+{
+ int i;
+ /* Update all channels with a single gamma curve. */
+ const u32 gamma_write_mask = GENMASK(18, 16);
+ /*
+ * Always write an entire table, so the address field in
+ * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask
+ * directly.
+ */
+ malidp_hw_write(hwdev, gamma_write_mask,
+ hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+ for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
+ malidp_hw_write(hwdev, data[i],
+ hwdev->map.coeffs_base +
+ MALIDP_COEF_TABLE_DATA);
+}
+
+static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ if (!crtc->state->color_mgmt_changed)
+ return;
+
+ if (!crtc->state->gamma_lut) {
+ malidp_hw_clearbits(hwdev,
+ MALIDP_DISP_FUNC_GAMMA,
+ MALIDP_DE_DISPLAY_FUNC);
+ } else {
+ struct malidp_crtc_state *mc =
+ to_malidp_crtc_state(crtc->state);
+
+ if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id !=
+ old_state->gamma_lut->base.id))
+ malidp_write_gamma_table(hwdev, mc->gamma_coeffs);
+
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA,
+ MALIDP_DE_DISPLAY_FUNC);
+ }
+}
+
+static
+void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int i;
+
+ if (!crtc->state->color_mgmt_changed)
+ return;
+
+ if (!crtc->state->ctm) {
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ,
+ MALIDP_DE_DISPLAY_FUNC);
+ } else {
+ struct malidp_crtc_state *mc =
+ to_malidp_crtc_state(crtc->state);
+
+ if (!old_state->ctm || (crtc->state->ctm->base.id !=
+ old_state->ctm->base.id))
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
+ malidp_hw_write(hwdev,
+ mc->coloradj_coeffs[i],
+ hwdev->map.coeffs_base +
+ MALIDP_COLOR_ADJ_COEF + 4 * i);
+
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
+ MALIDP_DE_DISPLAY_FUNC);
+ }
+}
+
+static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state);
+ struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state);
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_se_config *s = &cs->scaler_config;
+ struct malidp_se_config *old_s = &old_cs->scaler_config;
+ u32 se_control = hwdev->map.se_base +
+ ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ 0x10 : 0xC);
+ u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
+ u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
+ u32 val;
+
+ /* Set SE_CONTROL */
+ if (!s->scale_enable) {
+ val = malidp_hw_read(hwdev, se_control);
+ val &= ~MALIDP_SE_SCALING_EN;
+ malidp_hw_write(hwdev, val, se_control);
+ return;
+ }
+
+ hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+ val = malidp_hw_read(hwdev, se_control);
+ val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
+
+ val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK);
+ val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0;
+
+ val |= MALIDP_SE_RGBO_IF_EN;
+ malidp_hw_write(hwdev, val, se_control);
+
+ /* Set IN_SIZE & OUT_SIZE. */
+ val = MALIDP_SE_SET_V_SIZE(s->input_h) |
+ MALIDP_SE_SET_H_SIZE(s->input_w);
+ malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE);
+ val = MALIDP_SE_SET_V_SIZE(s->output_h) |
+ MALIDP_SE_SET_H_SIZE(s->output_w);
+ malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE);
+
+ /* Set phase regs. */
+ malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH);
+ malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH);
+ malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH);
+ malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH);
+}
+
/*
* set the "config valid" bit and wait until the hardware acts on it
*/
struct drm_pending_vblank_event *event;
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
- int ret = malidp_set_and_wait_config_valid(drm);
- if (ret)
- DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ if (malidp->crtc.enabled) {
+ /* only set config_valid if the CRTC is enabled */
+ if (malidp_set_and_wait_config_valid(drm))
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ }
event = malidp->crtc.state->event;
if (event) {
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ pm_runtime_get_sync(drm->dev);
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
+ malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
+ malidp_atomic_commit_se_config(crtc, old_crtc_state);
+ }
+
drm_atomic_helper_commit_planes(drm, state, 0);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+
malidp_atomic_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(drm, state);
+ pm_runtime_put(drm->dev);
+
drm_atomic_helper_cleanup_planes(drm, state);
}
return true;
}
+static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
+}
+
+DEVICE_ATTR_RO(core_id);
+
+static int malidp_init_sysfs(struct device *dev)
+{
+ int ret = device_create_file(dev, &dev_attr_core_id);
+
+ if (ret)
+ DRM_ERROR("failed to create device file for core_id\n");
+
+ return ret;
+}
+
+static void malidp_fini_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_core_id);
+}
+
#define MAX_OUTPUT_CHANNELS 3
+static int malidp_runtime_pm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ /* we can only suspend if the hardware is in config mode */
+ WARN_ON(!hwdev->in_config_mode(hwdev));
+
+ hwdev->pm_suspended = true;
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+
+ return 0;
+}
+
+static int malidp_runtime_pm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ clk_prepare_enable(hwdev->pclk);
+ clk_prepare_enable(hwdev->aclk);
+ clk_prepare_enable(hwdev->mclk);
+ hwdev->pm_suspended = false;
+
+ return 0;
+}
+
static int malidp_bind(struct device *dev)
{
struct resource *res;
struct drm_device *drm;
- struct device_node *ep;
struct malidp_drm *malidp;
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
malidp->dev = hwdev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hwdev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hwdev->regs))
goto alloc_fail;
}
- /* Enable APB clock in order to get access to the registers */
- clk_prepare_enable(hwdev->pclk);
- /*
- * Enable AXI clock and main clock so that prefetch can start once
- * the registers are set
- */
- clk_prepare_enable(hwdev->aclk);
- clk_prepare_enable(hwdev->mclk);
+ drm->dev_private = malidp;
+ dev_set_drvdata(dev, drm);
+
+ /* Enable power management */
+ pm_runtime_enable(dev);
+
+ /* Resume device to enable the clocks */
+ if (pm_runtime_enabled(dev))
+ pm_runtime_get_sync(dev);
+ else
+ malidp_runtime_pm_resume(dev);
dev_id = of_match_device(malidp_drm_of_match, dev);
if (!dev_id) {
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
+ malidp->core_id = version;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
- drm->dev_private = malidp;
- dev_set_drvdata(dev, drm);
atomic_set(&malidp->config_valid, 0);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
if (ret < 0)
+ goto query_hw_fail;
+
+ ret = malidp_init_sysfs(dev);
+ if (ret)
goto init_fail;
/* Set the CRTC's port so that the encoder component can find it */
- ep = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (!ep) {
- ret = -EINVAL;
- goto port_fail;
- }
- malidp->crtc.port = of_get_next_parent(ep);
+ malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
}
+ pm_runtime_put(dev);
drm_mode_config_reset(drm);
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
}
+ drm_kms_helper_poll_fini(drm);
fbdev_fail:
+ pm_runtime_get_sync(dev);
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
-port_fail:
- malidp_fini(drm);
init_fail:
+ malidp_fini_sysfs(dev);
+ malidp_fini(drm);
+query_hw_fail:
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
-query_hw_fail:
- clk_disable_unprepare(hwdev->mclk);
- clk_disable_unprepare(hwdev->aclk);
- clk_disable_unprepare(hwdev->pclk);
drm_dev_unref(drm);
alloc_fail:
of_reserved_mem_device_release(dev);
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
if (malidp->fbdev) {
malidp->fbdev = NULL;
}
drm_kms_helper_poll_fini(drm);
+ pm_runtime_get_sync(dev);
+ drm_vblank_cleanup(drm);
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
- drm_vblank_cleanup(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
+ malidp_fini_sysfs(dev);
malidp_fini(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- clk_disable_unprepare(hwdev->mclk);
- clk_disable_unprepare(hwdev->aclk);
- clk_disable_unprepare(hwdev->pclk);
drm_dev_unref(drm);
of_reserved_mem_device_release(dev);
}
static int malidp_platform_probe(struct platform_device *pdev)
{
- struct device_node *port, *ep;
+ struct device_node *port;
struct component_match *match = NULL;
if (!pdev->dev.of_node)
return -ENODEV;
/* there is only one output port inside each device, find it */
- ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
- if (!ep)
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
return -ENODEV;
- if (!of_device_is_available(ep)) {
- of_node_put(ep);
- return -ENODEV;
- }
-
- /* add the remote encoder port as component */
- port = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- if (!port || !of_device_is_available(port)) {
- of_node_put(port);
- return -EAGAIN;
- }
-
drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
port);
of_node_put(port);
return 0;
}
+static int __maybe_unused malidp_pm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_kms_helper_poll_disable(drm);
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 1);
+ console_unlock();
+ malidp->pm_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(malidp->pm_state)) {
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
+ console_unlock();
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(malidp->pm_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused malidp_pm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_atomic_helper_resume(drm, malidp->pm_state);
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
+ console_unlock();
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops malidp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+ SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
+};
+
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
.remove = malidp_platform_remove,
.driver = {
.name = "mali-dp",
+ .pm = &malidp_pm_ops,
.of_match_table = malidp_drm_of_match,
},
};
struct drm_crtc crtc;
wait_queue_head_t wq;
atomic_t config_valid;
+ struct drm_atomic_state *pm_state;
+ u32 core_id;
};
#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
+struct malidp_crtc_state {
+ struct drm_crtc_state base;
+ u32 gamma_coeffs[MALIDP_COEFFTAB_NUM_COEFFS];
+ u32 coloradj_coeffs[MALIDP_COLORADJ_NUM_COEFFS];
+ struct malidp_se_config scaler_config;
+ /* Bitfield of all the planes that have requested a scaled output. */
+ u8 scaled_planes_mask;
+};
+
+#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base)
+
int malidp_de_planes_init(struct drm_device *drm);
void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
* in an attempt to provide to the rest of the driver code a unified view
*/
+#include <linux/clk.h>
#include <linux/types.h>
#include <linux/io.h>
#include <drm/drmP.h>
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
};
+#define SE_N_SCALING_COEFFS 96
+static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = {
+ [MALIDP_UPSCALING_COEFFS - 1] = {
+ 0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052,
+ 0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f,
+ 0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a,
+ 0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40,
+ 0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c,
+ 0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5,
+ 0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15,
+ 0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5,
+ 0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0,
+ 0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6,
+ 0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073,
+ 0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001
+ },
+ [MALIDP_DOWNSCALING_1_5_COEFFS - 1] = {
+ 0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4,
+ 0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c,
+ 0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5,
+ 0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8,
+ 0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba,
+ 0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20,
+ 0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03,
+ 0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d,
+ 0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68,
+ 0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f,
+ 0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62,
+ 0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f
+ },
+ [MALIDP_DOWNSCALING_2_COEFFS - 1] = {
+ 0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9,
+ 0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52,
+ 0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158,
+ 0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455,
+ 0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e,
+ 0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887,
+ 0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5,
+ 0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e,
+ 0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d,
+ 0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0,
+ 0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf,
+ 0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03
+ },
+ [MALIDP_DOWNSCALING_2_75_COEFFS - 1] = {
+ 0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3,
+ 0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106,
+ 0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280,
+ 0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410,
+ 0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533,
+ 0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3,
+ 0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566,
+ 0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468,
+ 0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4,
+ 0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160,
+ 0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f,
+ 0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60
+ },
+ [MALIDP_DOWNSCALING_4_COEFFS - 1] = {
+ 0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f,
+ 0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff,
+ 0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd,
+ 0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374,
+ 0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db,
+ 0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a,
+ 0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed,
+ 0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397,
+ 0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb,
+ 0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233,
+ 0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160,
+ 0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9
+ },
+};
+
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
static int malidp500_query_hw(struct malidp_hw_device *hwdev)
return w * drm_format_plane_cpp(fmt, 0) * 8;
}
+static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
+ u32 direction,
+ u16 addr,
+ u8 coeffs_id)
+{
+ int i;
+ u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL;
+
+ malidp_hw_write(hwdev,
+ direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK),
+ scaling_control + MALIDP_SE_COEFFTAB_ADDR);
+ for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i)
+ malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA(
+ dp500_se_scaling_coeffs[coeffs_id][i]),
+ scaling_control + MALIDP_SE_COEFFTAB_DATA);
+}
+
+static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config)
+{
+ /* Get array indices into dp500_se_scaling_coeffs. */
+ u8 h = (u8)se_config->hcoeff - 1;
+ u8 v = (u8)se_config->vcoeff - 1;
+
+ if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) ||
+ v >= ARRAY_SIZE(dp500_se_scaling_coeffs)))
+ return -EINVAL;
+
+ if ((h == v) && (se_config->hcoeff != old_config->hcoeff ||
+ se_config->vcoeff != old_config->vcoeff)) {
+ malidp500_se_write_pp_coefftab(hwdev,
+ (MALIDP_SE_V_COEFFTAB |
+ MALIDP_SE_H_COEFFTAB),
+ 0, v);
+ } else {
+ if (se_config->vcoeff != old_config->vcoeff)
+ malidp500_se_write_pp_coefftab(hwdev,
+ MALIDP_SE_V_COEFFTAB,
+ 0, v);
+ if (se_config->hcoeff != old_config->hcoeff)
+ malidp500_se_write_pp_coefftab(hwdev,
+ MALIDP_SE_H_COEFFTAB,
+ 0, h);
+ }
+
+ return 0;
+}
+
+static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm)
+{
+ unsigned long mclk;
+ unsigned long pxlclk = vm->pixelclock; /* Hz */
+ unsigned long htotal = vm->hactive + vm->hfront_porch +
+ vm->hback_porch + vm->hsync_len;
+ unsigned long input_size = se_config->input_w * se_config->input_h;
+ unsigned long a = 10;
+ long ret;
+
+ /*
+ * mclk = max(a, 1.5) * pxlclk
+ *
+ * To avoid float calculaiton, using 15 instead of 1.5 and div by
+ * 10 to get mclk.
+ */
+ if (se_config->scale_enable) {
+ a = 15 * input_size / (htotal * se_config->output_h);
+ if (a < 15)
+ a = 15;
+ }
+ mclk = a * pxlclk / 10;
+ ret = clk_get_rate(hwdev->mclk);
+ if (ret < mclk) {
+ DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
+ mclk / 1000);
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
return w * bytes_per_col;
}
+static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config)
+{
+ u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) |
+ MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK);
+ u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) |
+ MALIDP550_SE_CTL_HCSEL(se_config->hcoeff);
+
+ malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL);
+ malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL);
+ return 0;
+}
+
+static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm)
+{
+ unsigned long mclk;
+ unsigned long pxlclk = vm->pixelclock;
+ unsigned long htotal = vm->hactive + vm->hfront_porch +
+ vm->hback_porch + vm->hsync_len;
+ unsigned long numerator = 1, denominator = 1;
+ long ret;
+
+ if (se_config->scale_enable) {
+ numerator = max(se_config->input_w, se_config->output_w) *
+ se_config->input_h;
+ numerator += se_config->output_w *
+ (se_config->output_h -
+ min(se_config->input_h, se_config->output_h));
+ denominator = (htotal - 2) * se_config->output_h;
+ }
+
+ /* mclk can't be slower than pxlclk. */
+ if (numerator < denominator)
+ numerator = denominator = 1;
+ mclk = (pxlclk * numerator) / denominator;
+ ret = clk_get_rate(hwdev->mclk);
+ if (ret < mclk) {
+ DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
+ mclk / 1000);
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
+ .coeffs_base = MALIDP500_COEFFS_BASE,
.se_base = MALIDP500_SE_BASE,
.dc_base = MALIDP500_DC_BASE,
.out_depth_base = MALIDP500_OUTPUT_DEPTH,
.set_config_valid = malidp500_set_config_valid,
.modeset = malidp500_modeset,
.rotmem_required = malidp500_rotmem_required,
+ .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp500_se_calc_mclk,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
.map = {
+ .coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp550_se_calc_mclk,
.features = 0,
},
[MALIDP_650] = {
.map = {
+ .coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp550_se_calc_mclk,
.features = 0,
},
};
u16 stride_offset; /* Offset to the first stride register. */
};
+enum malidp_scaling_coeff_set {
+ MALIDP_UPSCALING_COEFFS = 1,
+ MALIDP_DOWNSCALING_1_5_COEFFS = 2,
+ MALIDP_DOWNSCALING_2_COEFFS = 3,
+ MALIDP_DOWNSCALING_2_75_COEFFS = 4,
+ MALIDP_DOWNSCALING_4_COEFFS = 5,
+};
+
+struct malidp_se_config {
+ u8 scale_enable : 1;
+ u8 enhancer_enable : 1;
+ u8 hcoeff : 3;
+ u8 vcoeff : 3;
+ u8 plane_src_id;
+ u16 input_w, input_h;
+ u16 output_w, output_h;
+ u32 h_init_phase, h_delta_phase;
+ u32 v_init_phase, v_delta_phase;
+};
+
/* regmap features */
#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
struct malidp_hw_regmap {
/* address offset of the DE register bank */
/* is always 0x0000 */
+ /* address offset of the DE coefficients registers */
+ const u16 coeffs_base;
/* address offset of the SE registers bank */
const u16 se_base;
/* address offset of the DC registers bank */
*/
int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+ int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config);
+
+ long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm);
+
u8 features;
u8 min_line_size;
u16 max_line_size;
+ /* track the device PM state */
+ bool pm_suspended;
+
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
{
+ WARN_ON(hwdev->pm_suspended);
return readl(hwdev->regs + reg);
}
static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
u32 value, u32 reg)
{
+ WARN_ON(hwdev->pm_suspended);
writel(value, hwdev->regs + reg);
}
return !(pitch & (hwdev->map.bus_align_bytes - 1));
}
+/* U16.16 */
+#define FP_1_00000 0x00010000 /* 1.0 */
+#define FP_0_66667 0x0000AAAA /* 0.6667 = 1/1.5 */
+#define FP_0_50000 0x00008000 /* 0.5 = 1/2 */
+#define FP_0_36363 0x00005D17 /* 0.36363 = 1/2.75 */
+#define FP_0_25000 0x00004000 /* 0.25 = 1/4 */
+
+static inline enum malidp_scaling_coeff_set
+malidp_se_select_coeffs(u32 upscale_factor)
+{
+ return (upscale_factor >= FP_1_00000) ? MALIDP_UPSCALING_COEFFS :
+ (upscale_factor >= FP_0_66667) ? MALIDP_DOWNSCALING_1_5_COEFFS :
+ (upscale_factor >= FP_0_50000) ? MALIDP_DOWNSCALING_2_COEFFS :
+ (upscale_factor >= FP_0_36363) ? MALIDP_DOWNSCALING_2_75_COEFFS :
+ MALIDP_DOWNSCALING_4_COEFFS;
+}
+
+#undef FP_0_25000
+#undef FP_0_36363
+#undef FP_0_50000
+#undef FP_0_66667
+#undef FP_1_00000
+
+static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
+{
+ static const s32 enhancer_coeffs[] = {
+ -8, -8, -8, -8, 128, -8, -8, -8, -8
+ };
+ u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
+ MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
+ u32 image_enh = hwdev->map.se_base +
+ ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
+ u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
+ int i;
+
+ malidp_hw_write(hwdev, val, image_enh);
+ for (i = 0; i < ARRAY_SIZE(enhancer_coeffs); ++i)
+ malidp_hw_write(hwdev, enhancer_coeffs[i], enh_coeffs + i * 4);
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
#define MALIDP_BGND_COLOR_G 0x000
#define MALIDP_BGND_COLOR_B 0x000
+#define MALIDP_COLORADJ_NUM_COEFFS 12
+#define MALIDP_COEFFTAB_NUM_COEFFS 64
+
+#define MALIDP_GAMMA_LUT_SIZE 4096
+
#endif /* __MALIDP_HW_H__ */
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "malidp_hw.h"
#include "malidp_drv.h"
#define MALIDP_LAYER_FORMAT 0x000
#define MALIDP_LAYER_CONTROL 0x004
#define LAYER_ENABLE (1 << 0)
+#define LAYER_FLOWCFG_MASK 7
+#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1)
+#define LAYER_FLOWCFG_SCALE_SE 3
#define LAYER_ROT_OFFSET 8
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
devm_kfree(plane->dev->dev, mp);
}
+/*
+ * Replicate what the default ->reset hook does: free the state pointer and
+ * allocate a new empty object. We just need enough space to store
+ * a malidp_plane_state instead of a drm_plane_state.
+ */
+static void malidp_plane_reset(struct drm_plane *plane)
+{
+ struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
+
+ if (state)
+ __drm_atomic_helper_plane_destroy_state(&state->base);
+ kfree(state);
+ plane->state = NULL;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.rotation = DRM_ROTATE_0;
+ plane->state = &state->base;
+ }
+}
+
static struct
drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
kfree(m_state);
}
+static void malidp_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct malidp_plane_state *ms = to_malidp_plane_state(state);
+
+ drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
+ drm_printf(p, "\tformat_id=%u\n", ms->format);
+ drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
+}
+
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
- .reset = drm_atomic_helper_plane_reset,
+ .reset = malidp_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
+ .atomic_print_state = malidp_plane_atomic_print_state,
};
+static int malidp_se_check_scaling(struct malidp_plane *mp,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ struct malidp_crtc_state *mc;
+ struct drm_rect clip = { 0 };
+ u32 src_w, src_h;
+ int ret;
+
+ if (!crtc_state)
+ return -EINVAL;
+
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_state(state, &clip, 0, INT_MAX, true, true);
+ if (ret)
+ return ret;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
+ /* Scaling not necessary for this plane. */
+ mc->scaled_planes_mask &= ~(mp->layer->id);
+ return 0;
+ }
+
+ if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
+ return -EINVAL;
+
+ mc = to_malidp_crtc_state(crtc_state);
+
+ mc->scaled_planes_mask |= mp->layer->id;
+ /* Defer scaling requirements calculation to the crtc check. */
+ return 0;
+}
+
static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- struct drm_rect clip = { 0 };
int i, ret;
- u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
}
}
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
-
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
(state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
+ ret = malidp_se_check_scaling(mp, state);
+ if (ret)
+ return ret;
+
/* packed RGB888 / BGR888 can't be rotated or flipped */
if (state->rotation != DRM_ROTATE_0 &&
(fb->format->format == DRM_FORMAT_RGB888 ||
fb->format->format == DRM_FORMAT_BGR888))
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
val &= ~LAYER_COMP_MASK;
val |= LAYER_COMP_PIXEL;
+ val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
+ if (plane->state->crtc) {
+ struct malidp_crtc_state *m =
+ to_malidp_crtc_state(plane->state->crtc->state);
+
+ if (m->scaler_config.scale_enable &&
+ m->scaler_config.plane_src_id == mp->layer->id)
+ val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
+ }
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
{
struct malidp_plane *mp = to_malidp_plane(plane);
- malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
+ malidp_hw_clearbits(mp->hwdev,
+ LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
mp->layer->base + MALIDP_LAYER_CONTROL);
}
/* bit masks that are common between products */
#define MALIDP_CFG_VALID (1 << 0)
+#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
+#define MALIDP_DISP_FUNC_CADJ (1 << 4)
#define MALIDP_DISP_FUNC_ILACED (1 << 8)
/* register offsets for IRQ management */
#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16)
+/* register offsets relative to MALIDP5x0_COEFFS_BASE */
+#define MALIDP_COLOR_ADJ_COEF 0x00000
+#define MALIDP_COEF_TABLE_ADDR 0x00030
+#define MALIDP_COEF_TABLE_DATA 0x00034
+
+/* Scaling engine registers and masks. */
+#define MALIDP_SE_SCALING_EN (1 << 0)
+#define MALIDP_SE_ALPHA_EN (1 << 1)
+#define MALIDP_SE_ENH_MASK 3
+#define MALIDP_SE_ENH(x) (((x) & MALIDP_SE_ENH_MASK) << 2)
+#define MALIDP_SE_RGBO_IF_EN (1 << 4)
+#define MALIDP550_SE_CTL_SEL_MASK 7
+#define MALIDP550_SE_CTL_VCSEL(x) \
+ (((x) & MALIDP550_SE_CTL_SEL_MASK) << 20)
+#define MALIDP550_SE_CTL_HCSEL(x) \
+ (((x) & MALIDP550_SE_CTL_SEL_MASK) << 16)
+
+/* Blocks with offsets from SE_CONTROL register. */
+#define MALIDP_SE_LAYER_CONTROL 0x14
+#define MALIDP_SE_L0_IN_SIZE 0x00
+#define MALIDP_SE_L0_OUT_SIZE 0x04
+#define MALIDP_SE_SET_V_SIZE(x) (((x) & 0x1fff) << 16)
+#define MALIDP_SE_SET_H_SIZE(x) (((x) & 0x1fff) << 0)
+#define MALIDP_SE_SCALING_CONTROL 0x24
+#define MALIDP_SE_H_INIT_PH 0x00
+#define MALIDP_SE_H_DELTA_PH 0x04
+#define MALIDP_SE_V_INIT_PH 0x08
+#define MALIDP_SE_V_DELTA_PH 0x0c
+#define MALIDP_SE_COEFFTAB_ADDR 0x10
+#define MALIDP_SE_COEFFTAB_ADDR_MASK 0x7f
+#define MALIDP_SE_V_COEFFTAB (1 << 8)
+#define MALIDP_SE_H_COEFFTAB (1 << 9)
+#define MALIDP_SE_SET_V_COEFFTAB_ADDR(x) \
+ (MALIDP_SE_V_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
+#define MALIDP_SE_SET_H_COEFFTAB_ADDR(x) \
+ (MALIDP_SE_H_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
+#define MALIDP_SE_COEFFTAB_DATA 0x14
+#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff
+#define MALIDP_SE_SET_COEFFTAB_DATA(x) \
+ ((x) & MALIDP_SE_COEFFTAB_DATA_MASK)
+/* Enhance coeffents reigster offset */
+#define MALIDP_SE_IMAGE_ENH 0x3C
+/* ENH_LIMITS offset 0x0 */
+#define MALIDP_SE_ENH_LOW_LEVEL 24
+#define MALIDP_SE_ENH_HIGH_LEVEL 63
+#define MALIDP_SE_ENH_LIMIT_MASK 0xfff
+#define MALIDP_SE_SET_ENH_LIMIT_LOW(x) \
+ ((x) & MALIDP_SE_ENH_LIMIT_MASK)
+#define MALIDP_SE_SET_ENH_LIMIT_HIGH(x) \
+ (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
+#define MALIDP_SE_ENH_COEFF0 0x04
+
/* register offsets and bits specific to DP500 */
#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
#define MALIDP500_COLOR_ADJ_COEF 0x00078
#define MALIDP500_COEF_TABLE_ADDR 0x000a8
#define MALIDP500_COEF_TABLE_DATA 0x000ac
+
+/*
+ * The YUV2RGB coefficients on the DP500 are not in the video layer's register
+ * block. They belong in a separate block above the layer's registers, hence
+ * the negative offset.
+ */
+#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
+/*
+ * To match DP550/650, the start of the coeffs registers is
+ * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1.
+ */
+#define MALIDP500_COEFFS_BASE 0x00078
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
#define MALIDP500_DE_LG1_BASE 0x00200
#define MALIDP500_DE_LG2_BASE 0x00300
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
#define MALIDP500_SE_BASE 0x00c00
+#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_PTR_BASE 0x00e0c
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP550_DE_DISP_SIDEBAND 0x00040
#define MALIDP550_DE_BGND_COLOR 0x00044
#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
-#define MALIDP550_DE_COLOR_COEF 0x00050
-#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
-#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
+#define MALIDP550_COEFFS_BASE 0x00050
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
#define MALIDP550_DE_LV2_BASE 0x00200
#define MALIDP550_DE_LS_PTR_BASE 0x0042c
#define MALIDP550_DE_PERF_BASE 0x00500
#define MALIDP550_SE_BASE 0x08000
+#define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
#define MALIDP550_DC_CONFIG_REQ (1 << 16)
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap_atomic = armada_gem_dmabuf_no_kmap,
- .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
- .kmap = armada_gem_dmabuf_no_kmap,
- .kunmap = armada_gem_dmabuf_no_kunmap,
+ .map_atomic = armada_gem_dmabuf_no_kmap,
+ .unmap_atomic = armada_gem_dmabuf_no_kunmap,
+ .map = armada_gem_dmabuf_no_kmap,
+ .unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
};
}
static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
int i;
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
#include <linux/of_graph.h>
#include <drm/drmP.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
#include "atmel_hlcdc_dc.h"
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int atmel_hlcdc_check_endpoint(struct drm_device *dev,
- const struct of_endpoint *ep)
-{
- struct device_node *np;
- void *obj;
-
- np = of_graph_get_remote_port_parent(ep->local_node);
-
- obj = of_drm_find_panel(np);
- if (!obj)
- obj = of_drm_find_bridge(np);
-
- of_node_put(np);
-
- return obj ? 0 : -EPROBE_DEFER;
-}
-
static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
- const struct of_endpoint *ep)
+ const struct device_node *np)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_rgb_output *output;
- struct device_node *np;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
output->encoder.possible_crtcs = 0x1;
- np = of_graph_get_remote_port_parent(ep->local_node);
-
- ret = -EPROBE_DEFER;
+ ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
+ if (ret)
+ return ret;
- panel = of_drm_find_panel(np);
if (panel) {
- of_node_put(np);
output->connector.dpms = DRM_MODE_DPMS_OFF;
output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_helper_add(&output->connector,
return 0;
}
- bridge = of_drm_find_bridge(np);
- of_node_put(np);
-
if (bridge) {
ret = drm_bridge_attach(&output->encoder, bridge, NULL);
if (!ret)
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
- struct device_node *ep_np = NULL;
- struct of_endpoint ep;
- int ret;
-
- for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
- ret = of_graph_parse_endpoint(ep_np, &ep);
- if (!ret)
- ret = atmel_hlcdc_check_endpoint(dev, &ep);
-
- if (ret) {
- of_node_put(ep_np);
- return ret;
- }
- }
-
- for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
- ret = of_graph_parse_endpoint(ep_np, &ep);
- if (!ret)
- ret = atmel_hlcdc_attach_endpoint(dev, &ep);
-
- if (ret) {
- of_node_put(ep_np);
+ struct device_node *remote;
+ int ret = -ENODEV;
+ int endpoint = 0;
+
+ while (true) {
+ /* Loop thru possible multiple connections to the output */
+ remote = of_graph_get_remote_node(dev->dev->of_node, 0,
+ endpoint++);
+ if (!remote)
+ break;
+
+ ret = atmel_hlcdc_attach_endpoint(dev, remote);
+ of_node_put(remote);
+ if (ret)
return ret;
- }
}
- return 0;
+ return ret;
}
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
{
u32 num_lanes;
- struct device_node *endpoint;
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
adv->num_dsi_lanes = num_lanes;
- endpoint = of_graph_get_next_endpoint(np, NULL);
- if (!endpoint)
+ adv->host_node = of_graph_get_remote_node(np, 0, 0);
+ if (!adv->host_node)
return -ENODEV;
- adv->host_node = of_graph_get_remote_port_parent(endpoint);
- if (!adv->host_node) {
- of_node_put(endpoint);
- return -ENODEV;
- }
-
- of_node_put(endpoint);
of_node_put(adv->host_node);
adv->use_timing_gen = !of_property_read_bool(np,
struct analogix_dp_device *dp = dev_get_drvdata(dev);
analogix_dp_bridge_disable(dp->bridge);
+ dp->connector.funcs->destroy(&dp->connector);
+ dp->encoder->funcs->destroy(dp->encoder);
if (dp->plat_data->panel) {
if (drm_panel_unprepare(dp->plat_data->panel))
DRM_ERROR("failed to turnoff the panel\n");
+ if (drm_panel_detach(dp->plat_data->panel))
+ DRM_ERROR("failed to detach the panel\n");
}
+ drm_dp_aux_unregister(&dp->aux);
pm_runtime_disable(dev);
+ clk_disable_unprepare(dp->clock);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
{
- struct device_node *end_node, *phandle, *remote;
+ struct device_node *phandle, *remote;
struct i2c_adapter *ddc;
- end_node = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
- if (!end_node) {
- dev_err(dev, "Missing connector endpoint\n");
- return ERR_PTR(-ENODEV);
- }
-
- remote = of_graph_get_remote_port_parent(end_node);
- of_node_put(end_node);
- if (!remote) {
- dev_err(dev, "Enable to parse remote node\n");
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
return ERR_PTR(-EINVAL);
- }
phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
of_node_put(remote);
return -ENOMEM;
}
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
- ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
- drm_bridge_add(&ge_b850v3_lvds_ptr->bridge);
-
success:
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
return 0;
ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
+ /* drm bridge initialization */
+ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
+ ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ge_b850v3_lvds_ptr->bridge);
+
/* Clear pending interrupts since power up. */
i2c_smbus_write_word_data(stdp4028_i2c,
STDP4028_DPTX_IRQ_STS_REG,
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include "drm_crtc.h"
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
- struct device_node *endpoint, *panel_node;
int ret;
ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
return -ENOMEM;
}
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- panel_node = of_graph_get_remote_port_parent(endpoint);
- if (panel_node) {
- ptn_bridge->panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (!ptn_bridge->panel)
- return -EPROBE_DEFER;
- }
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ptn_bridge->panel, NULL);
+ if (ret)
+ return ret;
ptn_bridge->client = client;
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include "drmP.h"
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct device_node *endpoint, *panel_node;
struct ps8622_bridge *ps8622;
int ret;
if (!ps8622)
return -ENOMEM;
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- panel_node = of_graph_get_remote_port_parent(endpoint);
- if (panel_node) {
- ps8622->panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (!ps8622->panel)
- return -EPROBE_DEFER;
- }
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ps8622->panel, NULL);
+ if (ret)
+ return ret;
ps8622->client = client;
#include <drm/drm_encoder_slave.h>
#include <drm/bridge/dw_hdmi.h>
+#include <uapi/linux/media-bus-format.h>
+#include <uapi/linux/videodev2.h>
+
#include "dw-hdmi.h"
#include "dw-hdmi-audio.h"
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
-#define RGB 0
-#define YCBCR444 1
-#define YCBCR422_16BITS 2
-#define YCBCR422_8BITS 3
-#define XVYCC444 4
-
enum hdmi_datamap {
RGB444_8B = 0x01,
RGB444_10B = 0x03,
};
struct hdmi_data_info {
- unsigned int enc_in_format;
- unsigned int enc_out_format;
- unsigned int enc_color_depth;
- unsigned int colorimetry;
+ unsigned int enc_in_bus_format;
+ unsigned int enc_out_bus_format;
+ unsigned int enc_in_encoding;
+ unsigned int enc_out_encoding;
unsigned int pix_repet_factor;
unsigned int hdcp_enable;
struct hdmi_vmode video_mode;
}
EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
+static bool hdmi_bus_fmt_is_rgb(unsigned int bus_format)
+{
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool hdmi_bus_fmt_is_yuv444(unsigned int bus_format)
+{
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool hdmi_bus_fmt_is_yuv422(unsigned int bus_format)
+{
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static int hdmi_bus_fmt_color_depth(unsigned int bus_format)
+{
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ return 8;
+
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ return 10;
+
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ return 12;
+
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ return 16;
+
+ default:
+ return 0;
+ }
+}
+
/*
* this submodule is responsible for the video data synchronization.
* for example, for RGB 4:4:4 input, the data map is defined as
int color_format = 0;
u8 val;
- if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.enc_color_depth == 8)
- color_format = 0x01;
- else if (hdmi->hdmi_data.enc_color_depth == 10)
- color_format = 0x03;
- else if (hdmi->hdmi_data.enc_color_depth == 12)
- color_format = 0x05;
- else if (hdmi->hdmi_data.enc_color_depth == 16)
- color_format = 0x07;
- else
- return;
- } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
- if (hdmi->hdmi_data.enc_color_depth == 8)
- color_format = 0x09;
- else if (hdmi->hdmi_data.enc_color_depth == 10)
- color_format = 0x0B;
- else if (hdmi->hdmi_data.enc_color_depth == 12)
- color_format = 0x0D;
- else if (hdmi->hdmi_data.enc_color_depth == 16)
- color_format = 0x0F;
- else
- return;
- } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
- if (hdmi->hdmi_data.enc_color_depth == 8)
- color_format = 0x16;
- else if (hdmi->hdmi_data.enc_color_depth == 10)
- color_format = 0x14;
- else if (hdmi->hdmi_data.enc_color_depth == 12)
- color_format = 0x12;
- else
- return;
+ switch (hdmi->hdmi_data.enc_in_bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ color_format = 0x01;
+ break;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ color_format = 0x03;
+ break;
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ color_format = 0x05;
+ break;
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ color_format = 0x07;
+ break;
+
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ color_format = 0x09;
+ break;
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ color_format = 0x0B;
+ break;
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ color_format = 0x0D;
+ break;
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ color_format = 0x0F;
+ break;
+
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ color_format = 0x16;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ color_format = 0x14;
+ break;
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ color_format = 0x12;
+ break;
+
+ default:
+ return;
}
val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
static int is_color_space_conversion(struct dw_hdmi *hdmi)
{
- return hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format;
+ return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
}
static int is_color_space_decimation(struct dw_hdmi *hdmi)
{
- if (hdmi->hdmi_data.enc_out_format != YCBCR422_8BITS)
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
return 0;
- if (hdmi->hdmi_data.enc_in_format == RGB ||
- hdmi->hdmi_data.enc_in_format == YCBCR444)
+
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format) ||
+ hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_in_bus_format))
return 1;
+
return 0;
}
static int is_color_space_interpolation(struct dw_hdmi *hdmi)
{
- if (hdmi->hdmi_data.enc_in_format != YCBCR422_8BITS)
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_in_bus_format))
return 0;
- if (hdmi->hdmi_data.enc_out_format == RGB ||
- hdmi->hdmi_data.enc_out_format == YCBCR444)
+
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
+ hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
return 1;
+
return 0;
}
u32 csc_scale = 1;
if (is_color_space_conversion(hdmi)) {
- if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry ==
- HDMI_COLORIMETRY_ITU_601)
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+ if (hdmi->hdmi_data.enc_out_encoding ==
+ V4L2_YCBCR_ENC_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
- } else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry ==
- HDMI_COLORIMETRY_ITU_601)
+ } else if (hdmi_bus_fmt_is_rgb(
+ hdmi->hdmi_data.enc_in_bus_format)) {
+ if (hdmi->hdmi_data.enc_out_encoding ==
+ V4L2_YCBCR_ENC_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
else if (is_color_space_decimation(hdmi))
decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
- if (hdmi->hdmi_data.enc_color_depth == 8)
+ switch (hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format)) {
+ case 8:
color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
- else if (hdmi->hdmi_data.enc_color_depth == 10)
+ break;
+ case 10:
color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
- else if (hdmi->hdmi_data.enc_color_depth == 12)
+ break;
+ case 12:
color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
- else if (hdmi->hdmi_data.enc_color_depth == 16)
+ break;
+ case 16:
color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
- else
+ break;
+
+ default:
return;
+ }
/* Configure the CSC registers */
hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
u8 val, vp_conf;
- if (hdmi_data->enc_out_format == RGB ||
- hdmi_data->enc_out_format == YCBCR444) {
- if (!hdmi_data->enc_color_depth) {
- output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
- } else if (hdmi_data->enc_color_depth == 8) {
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
+ hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 8:
color_depth = 4;
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
- } else if (hdmi_data->enc_color_depth == 10) {
+ break;
+ case 10:
color_depth = 5;
- } else if (hdmi_data->enc_color_depth == 12) {
+ break;
+ case 12:
color_depth = 6;
- } else if (hdmi_data->enc_color_depth == 16) {
+ break;
+ case 16:
color_depth = 7;
- } else {
- return;
+ break;
+ default:
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
}
- } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
- if (!hdmi_data->enc_color_depth ||
- hdmi_data->enc_color_depth == 8)
+ } else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 0:
+ case 8:
remap_size = HDMI_VP_REMAP_YCC422_16bit;
- else if (hdmi_data->enc_color_depth == 10)
+ break;
+ case 10:
remap_size = HDMI_VP_REMAP_YCC422_20bit;
- else if (hdmi_data->enc_color_depth == 12)
+ break;
+ case 12:
remap_size = HDMI_VP_REMAP_YCC422_24bit;
- else
+ break;
+
+ default:
return;
+ }
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
} else {
return;
connector_status_connected : connector_status_disconnected;
}
+static void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense)
+{
+ u8 old_mask = hdmi->phy_mask;
+
+ if (force || disabled || !rxsense)
+ hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
+ else
+ hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
+
+ if (old_mask != hdmi->phy_mask)
+ hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
+}
+
+static void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
+{
+ /*
+ * Configure the PHY RX SENSE and HPD interrupts polarities and clear
+ * any pending interrupt.
+ */
+ hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
+ HDMI_IH_PHY_STAT0);
+
+ /* Enable cable hot plug irq. */
+ hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
+
+ /* Clear and unmute interrupts. */
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
+ HDMI_IH_PHY_STAT0);
+ hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
+ HDMI_IH_MUTE_PHY_STAT0);
+}
+
static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = {
.init = dw_hdmi_phy_init,
.disable = dw_hdmi_phy_disable,
.read_hpd = dw_hdmi_phy_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+ .setup_hpd = dw_hdmi_phy_setup_hpd,
};
/* -----------------------------------------------------------------------------
/* Initialise info frame from DRM mode */
drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- if (hdmi->hdmi_data.enc_out_format == YCBCR444)
+ if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
+ else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV422;
else
frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
- if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
- frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
- if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
- frame.extended_colorimetry =
+ switch (hdmi->hdmi_data.enc_out_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
+ frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+ else
+ frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
- frame.extended_colorimetry =
+ break;
+ case V4L2_YCBCR_ENC_709:
+ if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
+ frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+ else
+ frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
- } else if (hdmi->hdmi_data.enc_out_format != RGB) {
- frame.colorimetry = hdmi->hdmi_data.colorimetry;
- frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- } else { /* Carries no data */
- frame.colorimetry = HDMI_COLORIMETRY_NONE;
- frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
+ default: /* Carries no data */
+ frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
}
frame.scan_mode = HDMI_SCAN_MODE_NONE;
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_601;
else
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_709;
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
- /* TODO: Get input format from IPU (via FB driver interface) */
- hdmi->hdmi_data.enc_in_format = RGB;
+ /* TOFIX: Get input format from plat data or fallback to RGB888 */
+ if (hdmi->plat_data->input_bus_format)
+ hdmi->hdmi_data.enc_in_bus_format =
+ hdmi->plat_data->input_bus_format;
+ else
+ hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* TOFIX: Get input encoding from plat data or fallback to none */
+ if (hdmi->plat_data->input_bus_encoding)
+ hdmi->hdmi_data.enc_in_encoding =
+ hdmi->plat_data->input_bus_encoding;
+ else
+ hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
- hdmi->hdmi_data.enc_out_format = RGB;
+ /* TOFIX: Default to RGB888 output format */
+ hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- hdmi->hdmi_data.enc_color_depth = 8;
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
return 0;
}
-/* Wait until we are registered to enable interrupts */
-static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
+static void dw_hdmi_setup_i2c(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
HDMI_PHY_I2CM_INT_ADDR);
hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
HDMI_PHY_I2CM_CTLINT_ADDR);
-
- /* enable cable hot plug irq */
- hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
-
- /* Clear Hotplug interrupts */
- hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
- HDMI_IH_PHY_STAT0);
-
- return 0;
}
static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
*/
static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
{
- u8 old_mask = hdmi->phy_mask;
-
- if (hdmi->force || hdmi->disabled || !hdmi->rxsense)
- hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
- else
- hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
-
- if (old_mask != hdmi->phy_mask)
- hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
+ if (hdmi->phy.ops->update_hpd)
+ hdmi->phy.ops->update_hpd(hdmi, hdmi->phy.data,
+ hdmi->force, hdmi->disabled,
+ hdmi->rxsense);
}
static enum drm_connector_status
return 0;
}
+static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+ struct drm_connector *connector = &hdmi->connector;
+ enum drm_mode_status status;
+
+ status = dw_hdmi_connector_mode_valid(connector, mode);
+ if (status != MODE_OK)
+ return false;
+ return true;
+}
+
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
+ .mode_fixup = dw_hdmi_bridge_mode_fixup,
};
static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
return ret;
}
+void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
+{
+ mutex_lock(&hdmi->mutex);
+
+ if (!hdmi->force) {
+ /*
+ * If the RX sense status indicates we're disconnected,
+ * clear the software rxsense status.
+ */
+ if (!rx_sense)
+ hdmi->rxsense = false;
+
+ /*
+ * Only set the software rxsense status when both
+ * rxsense and hpd indicates we're connected.
+ * This avoids what seems to be bad behaviour in
+ * at least iMX6S versions of the phy.
+ */
+ if (hpd)
+ hdmi->rxsense = true;
+
+ dw_hdmi_update_power(hdmi);
+ dw_hdmi_update_phy_mask(hdmi);
+ }
+ mutex_unlock(&hdmi->mutex);
+}
+
+void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
+{
+ struct dw_hdmi *hdmi = dev_get_drvdata(dev);
+
+ __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
+
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
* ask the source to re-read the EDID.
*/
if (intr_stat &
- (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
- mutex_lock(&hdmi->mutex);
- if (!hdmi->force) {
- /*
- * If the RX sense status indicates we're disconnected,
- * clear the software rxsense status.
- */
- if (!(phy_stat & HDMI_PHY_RX_SENSE))
- hdmi->rxsense = false;
-
- /*
- * Only set the software rxsense status when both
- * rxsense and hpd indicates we're connected.
- * This avoids what seems to be bad behaviour in
- * at least iMX6S versions of the phy.
- */
- if (phy_stat & HDMI_PHY_HPD)
- hdmi->rxsense = true;
-
- dw_hdmi_update_power(hdmi);
- dw_hdmi_update_phy_mask(hdmi);
- }
- mutex_unlock(&hdmi->mutex);
- }
+ (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD))
+ __dw_hdmi_setup_rx_sense(hdmi,
+ phy_stat & HDMI_PHY_HPD,
+ phy_stat & HDMI_PHY_RX_SENSE);
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
hdmi->ddc = NULL;
}
- /*
- * Configure registers related to HDMI interrupt
- * generation before registering IRQ.
- */
- hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
-
- /* Clear Hotplug interrupts */
- hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
- HDMI_IH_PHY_STAT0);
-
hdmi->bridge.driver_private = hdmi;
hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
#ifdef CONFIG_OF
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
- ret = dw_hdmi_fb_registered(hdmi);
- if (ret)
- goto err_iahb;
-
- /* Unmute interrupts */
- hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
- HDMI_IH_MUTE_PHY_STAT0);
+ dw_hdmi_setup_i2c(hdmi);
+ if (hdmi->phy.ops->setup_hpd)
+ hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct device_node *ep;
struct tc_data *tc;
int ret;
tc->dev = dev;
/* port@2 is the output port */
- ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
- if (ep) {
- struct device_node *remote;
-
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- dev_warn(dev, "endpoint %s not connected\n",
- ep->full_name);
- of_node_put(ep);
- return -ENODEV;
- }
- of_node_put(ep);
- tc->panel = of_drm_find_panel(remote);
- if (tc->panel) {
- dev_dbg(dev, "found panel %s\n", remote->full_name);
- } else {
- dev_dbg(dev, "waiting for panel %s\n",
- remote->full_name);
- of_node_put(remote);
- return -EPROBE_DEFER;
- }
- of_node_put(remote);
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
+ if (ret)
+ return ret;
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
static int tfp410_get_connector_properties(struct tfp410 *dvi)
{
- struct device_node *ep = NULL, *connector_node = NULL;
- struct device_node *ddc_phandle = NULL;
+ struct device_node *connector_node, *ddc_phandle;
int ret = 0;
/* port@1 is the connector node */
- ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 1, -1);
- if (!ep)
- goto fail;
-
- connector_node = of_graph_get_remote_port_parent(ep);
+ connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
if (!connector_node)
- goto fail;
+ return -ENODEV;
dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
"hpd-gpios", 0, GPIOD_IN, "hpd");
else
ret = -EPROBE_DEFER;
+ of_node_put(ddc_phandle);
+
fail:
- of_node_put(ep);
of_node_put(connector_node);
- of_node_put(ddc_phandle);
return ret;
}
* but it's a requirement that we provide the function
*/
static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
int i;
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- unsigned crtc_mask = 0;
- struct drm_crtc *crtc;
int ret;
bool global = false;
- drm_for_each_crtc(crtc, dev) {
- if (crtc->acquire_ctx != state->acquire_ctx)
- continue;
-
- crtc_mask |= drm_crtc_mask(crtc);
- crtc->acquire_ctx = NULL;
- }
-
if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
global = true;
if (ret)
goto retry;
- drm_for_each_crtc(crtc, dev)
- if (drm_crtc_mask(crtc) & crtc_mask)
- crtc->acquire_ctx = state->acquire_ctx;
-
if (global)
dev->mode_config.acquire_ctx = state->acquire_ctx;
}
drm_atomic_connector_print_state(&p, connector_state);
}
-/**
- * drm_state_dump - dump entire device atomic state
- * @dev: the drm device
- * @p: where to print the state to
- *
- * Just for debugging. Drivers might want an option to dump state
- * to dmesg in case of error irq's. (Hint, you probably want to
- * ratelimit this!)
- *
- * The caller must drm_modeset_lock_all(), or if this is called
- * from error irq handler, it should not be enabled by default.
- * (Ie. if you are debugging errors you might not care that this
- * is racey. But calling this without all modeset locks held is
- * not inherently safe.)
- */
-void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
+ bool take_locks)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return;
- list_for_each_entry(plane, &config->plane_list, head)
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&plane->mutex, NULL);
drm_atomic_plane_print_state(p, plane->state);
+ if (take_locks)
+ drm_modeset_unlock(&plane->mutex);
+ }
- list_for_each_entry(crtc, &config->crtc_list, head)
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&crtc->mutex, NULL);
drm_atomic_crtc_print_state(p, crtc->state);
+ if (take_locks)
+ drm_modeset_unlock(&crtc->mutex);
+ }
drm_connector_list_iter_begin(dev, &conn_iter);
+ if (take_locks)
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
drm_for_each_connector_iter(connector, &conn_iter)
drm_atomic_connector_print_state(p, connector->state);
+ if (take_locks)
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_connector_list_iter_end(&conn_iter);
}
+
+/**
+ * drm_state_dump - dump entire device atomic state
+ * @dev: the drm device
+ * @p: where to print the state to
+ *
+ * Just for debugging. Drivers might want an option to dump state
+ * to dmesg in case of error irq's. (Hint, you probably want to
+ * ratelimit this!)
+ *
+ * The caller must drm_modeset_lock_all(), or if this is called
+ * from error irq handler, it should not be enabled by default.
+ * (Ie. if you are debugging errors you might not care that this
+ * is racey. But calling this without all modeset locks held is
+ * not inherently safe.)
+ */
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+{
+ __drm_state_dump(dev, p, false);
+}
EXPORT_SYMBOL(drm_state_dump);
#ifdef CONFIG_DEBUG_FS
struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
- drm_modeset_lock_all(dev);
- drm_state_dump(dev, &p);
- drm_modeset_unlock_all(dev);
+ __drm_state_dump(dev, &p, true);
return 0;
}
kfree(fence_state);
}
-int drm_atomic_remove_fb(struct drm_framebuffer *fb)
-{
- struct drm_modeset_acquire_ctx ctx;
- struct drm_device *dev = fb->dev;
- struct drm_atomic_state *state;
- struct drm_plane *plane;
- struct drm_connector *conn;
- struct drm_connector_state *conn_state;
- int i, ret = 0;
- unsigned plane_mask;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
-
- drm_modeset_acquire_init(&ctx, 0);
- state->acquire_ctx = &ctx;
-
-retry:
- plane_mask = 0;
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret)
- goto unlock;
-
- drm_for_each_plane(plane, dev) {
- struct drm_plane_state *plane_state;
-
- if (plane->state->fb != fb)
- continue;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- goto unlock;
- }
-
- if (plane_state->crtc->primary == plane) {
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
-
- ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
- if (ret)
- goto unlock;
-
- crtc_state->active = false;
- ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- if (ret)
- goto unlock;
- }
-
- drm_atomic_set_fb_for_plane(plane_state, NULL);
- ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
- if (ret)
- goto unlock;
-
- plane_mask |= BIT(drm_plane_index(plane));
-
- plane->old_fb = plane->fb;
- }
-
- for_each_connector_in_state(state, conn, conn_state, i) {
- ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
-
- if (ret)
- goto unlock;
- }
-
- if (plane_mask)
- ret = drm_atomic_commit(state);
-
-unlock:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
*
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
- * update and adds any additional connectors needed for full modesets and calls
- * down into &drm_crtc_helper_funcs.mode_fixup and
- * &drm_encoder_helper_funcs.mode_fixup or
- * &drm_encoder_helper_funcs.atomic_check functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets. It calls
+ * the various per-object callbacks in the follow order:
+ *
+ * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
+ * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
+ * 3. If it's determined a modeset is needed then all connectors on the affected crtc
+ * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
+ * 4. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
+ * 5. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
+ * This function is only called when the encoder will be part of a configured crtc,
+ * it must not be used for implementing connector property validation.
+ * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
+ * instead.
+ * 6. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
*
* &drm_crtc_state.mode_changed is set when the input mode is changed.
* &drm_crtc_state.connectors_changed is set when a connector is added or
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
+ unsigned connectors_mask = 0;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ bool has_connectors =
+ !!new_crtc_state->connector_mask;
+
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
crtc->base.id, crtc->name);
new_crtc_state->mode_changed = true;
new_crtc_state->connectors_changed = true;
}
+
+ if (old_crtc_state->active != new_crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
+ crtc->base.id, crtc->name);
+ new_crtc_state->active_changed = true;
+ }
+
+ if (new_crtc_state->enable != has_connectors) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
+ crtc->base.id, crtc->name);
+
+ return -EINVAL;
+ }
}
- ret = handle_conflicting_encoders(state, state->legacy_set_config);
+ ret = handle_conflicting_encoders(state, false);
if (ret)
return ret;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
/*
* This only sets crtc->connectors_changed for routing changes,
* drivers must set crtc->connectors_changed themselves when
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
}
+
+ if (funcs->atomic_check)
+ ret = funcs->atomic_check(connector, new_connector_state);
+ if (ret)
+ return ret;
+
+ connectors_mask += BIT(i);
}
/*
* crtc only changed its mode but has the same set of connectors.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- bool has_connectors =
- !!new_crtc_state->connector_mask;
-
- /*
- * We must set ->active_changed after walking connectors for
- * otherwise an update that only changes active would result in
- * a full modeset because update_connector_routing force that.
- */
- if (old_crtc_state->active != new_crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
- crtc->base.id, crtc->name);
- new_crtc_state->active_changed = true;
- }
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
+ }
- if (new_crtc_state->enable != has_connectors) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
- crtc->base.id, crtc->name);
+ /*
+ * Iterate over all connectors again, to make sure atomic_check()
+ * has been called on them when a modeset is forced.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
- return -EINVAL;
- }
+ if (connectors_mask & BIT(i))
+ continue;
+
+ if (funcs->atomic_check)
+ ret = funcs->atomic_check(connector, new_connector_state);
+ if (ret)
+ return ret;
}
return mode_fixup(state);
if (!state)
return -ENOMEM;
- state->legacy_set_config = true;
state->acquire_ctx = ctx;
ret = __drm_atomic_helper_set_config(set, state);
if (ret != 0)
goto fail;
+ ret = handle_conflicting_encoders(state, true);
+ if (ret)
+ return ret;
+
ret = drm_atomic_commit(state);
fail:
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx ctx;
int err;
drm_mode_config_reset(dev);
- drm_modeset_lock_all(dev);
- err = drm_atomic_helper_commit_duplicated_state(state, config->acquire_ctx);
- drm_modeset_unlock_all(dev);
+ drm_modeset_acquire_init(&ctx, 0);
+ while (1) {
+ err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+ if (err != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
return err;
}
if (!state)
return -ENOMEM;
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
* @green: green correction table
* @blue: green correction table
* @size: size of the tables
+ * @ctx: lock acquire context
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
*/
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
- uint32_t size)
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
blob_data[i].blue = blue[i];
}
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
-retry:
+ state->acquire_ctx = ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
+fail:
drm_atomic_state_put(state);
drm_property_blob_put(blob);
return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
+ if (!crtc)
+ return -ENOENT;
- if (crtc->funcs->gamma_set == NULL) {
- ret = -ENOSYS;
- goto out;
- }
+ if (crtc->funcs->gamma_set == NULL)
+ return -ENOSYS;
/* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
+ if (crtc_lut->gamma_size != crtc->gamma_size)
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret)
goto out;
- }
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
goto out;
}
- ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+ ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
+ crtc->gamma_size, &ctx);
out:
- drm_modeset_unlock_all(dev);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
+ if (!crtc)
+ return -ENOENT;
/* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
+ if (crtc_lut->gamma_size != crtc->gamma_size)
+ return -EINVAL;
+ drm_modeset_lock(&crtc->mutex, NULL);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
goto out;
}
out:
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock(&crtc->mutex);
return ret;
}
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ mutex_lock(&crtc->dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
retry:
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
return ret;
}
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_atomic_remove_fb(struct drm_framebuffer *fb);
/* drm_plane.c */
for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
i__ = 0; i__ < (fbh)->connector_count; i__++)
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_fb_helper_connector **temp;
+ unsigned int count;
+
+ if (!drm_fbdev_emulation)
+ return 0;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
+ count = fb_helper->connector_count + 1;
+
+ if (count > fb_helper->connector_info_alloc_count) {
+ size_t size = count * sizeof(fb_conn);
+
+ temp = krealloc(fb_helper->connector_info, size, GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ fb_helper->connector_info_alloc_count = count;
+ fb_helper->connector_info = temp;
+ }
+
+ fb_conn = kzalloc(sizeof(*fb_conn), GFP_KERNEL);
+ if (!fb_conn)
+ return -ENOMEM;
+
+ drm_connector_get(connector);
+ fb_conn->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_conn;
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+
/**
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
* emulation helper
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
-{
- struct drm_fb_helper_connector **temp;
- struct drm_fb_helper_connector *fb_helper_connector;
-
- if (!drm_fbdev_emulation)
- return 0;
-
- WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
- if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
- temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
-
- fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
- fb_helper->connector_info = temp;
- }
-
-
- fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!fb_helper_connector)
- return -ENOMEM;
-
- drm_connector_get(connector);
- fb_helper_connector->connector = connector;
- fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
-
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
fb_helper_connector = fb_helper->connector_info[i];
drm_connector_put(fb_helper_connector->connector);
- for (j = i + 1; j < fb_helper->connector_count; j++) {
+ for (j = i + 1; j < fb_helper->connector_count; j++)
fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
- }
+
fb_helper->connector_count--;
kfree(fb_helper_connector);
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
+ crtc->gamma_size, NULL);
}
/**
if (funcs->mode_set_base_atomic == NULL)
continue;
+ if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev))
+ continue;
+
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+
crtc = mode_set->crtc;
funcs = crtc->helper_private;
fb = drm_mode_config_fb(crtc);
if (funcs->mode_set_base_atomic == NULL)
continue;
+ if (drm_drv_uses_atomic_modeset(crtc->dev))
+ continue;
+
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
- unsigned plane_mask;
+ unsigned int plane_mask;
state = drm_atomic_state_alloc(dev);
if (!state)
goto fail;
}
- for(i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
ret = __drm_atomic_helper_set_config(mode_set, state);
goto retry;
}
-static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
int i;
- drm_warn_on_modeset_not_all_locked(dev);
-
- if (drm_drv_uses_atomic_modeset(dev))
- return restore_fbdev_mode_atomic(fb_helper);
-
drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
return 0;
}
+static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ return restore_fbdev_mode_atomic(fb_helper);
+ else
+ return restore_fbdev_mode_legacy(fb_helper);
+}
+
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: fbcon to restore
struct drm_crtc *crtc;
int bound = 0, crtcs_bound = 0;
- /* Sometimes user space wants everything disabled, so don't steal the
- * display if there's a master. */
+ /*
+ * Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master.
+ */
if (READ_ONCE(dev->master))
return false;
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret;
+
ret = drm_fb_helper_force_kernel_mode();
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
+ if (list_empty(&kernel_fb_helper_list))
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
}
mutex_unlock(&kernel_fb_helper_lock);
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
+
mask <<= info->var.transp.offset;
value |= mask;
}
struct drm_atomic_state *state;
struct drm_plane *plane;
int i, ret;
- unsigned plane_mask;
+ unsigned int plane_mask;
state = drm_atomic_state_alloc(dev);
if (!state)
state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_mask = 0;
- for(i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set;
mode_set = &fb_helper->crtc_info[i].mode_set;
goto retry;
}
-/**
- * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
- * @var: updated screen information
- * @info: fbdev registered by the helper
- */
-int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+static int pan_display_legacy(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
int ret = 0;
int i;
- if (oops_in_progress)
- return -EBUSY;
-
- drm_modeset_lock_all(dev);
- if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
- return -EBUSY;
- }
-
- if (drm_drv_uses_atomic_modeset(dev)) {
- ret = pan_display_atomic(var, info);
- goto unlock;
- }
-
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
}
}
}
-unlock:
+
+ return ret;
+}
+
+/**
+ * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ int ret;
+
+ if (oops_in_progress)
+ return -EBUSY;
+
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = pan_display_atomic(var, info);
+ else
+ ret = pan_display_legacy(var, info);
drm_modeset_unlock_all(dev);
+
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
- sizes.fb_width = (unsigned)-1;
- sizes.fb_height = (unsigned)-1;
+ sizes.fb_width = (u32)-1;
+ sizes.fb_height = (u32)-1;
- /* if driver picks 8 or 16 by default use that
- for both depth/bpp */
+ /* if driver picks 8 or 16 by default use that for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
+
if (connector->has_tile) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- /* hmm everyone went away - assume VGA cable just fell out
- and will come back later. */
+ /*
+ * hmm everyone went away - assume VGA cable just fell out
+ * and will come back later.
+ */
DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
sizes.fb_width = sizes.surface_width = 1024;
sizes.fb_height = sizes.surface_height = 768;
info->fix.accel = FB_ACCEL_NONE;
info->fix.line_length = pitch;
- return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
+
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
continue;
} else {
- if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
+ if (fb_helper_conn->connector->tile_h_loc != tile_pass - 1 &&
fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
/* if this tile_pass doesn't cover any of the tiles - keep going */
continue;
- /* find the tile offsets for this pass - need
- to find all tiles left and above */
+ /*
+ * find the tile offsets for this pass - need to find
+ * all tiles left and above
+ */
drm_get_tile_offsets(fb_helper, modes, offsets,
i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
}
if (!encoder)
goto out;
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
+ /*
+ * select a crtc for this connector and then attempt to configure
+ * remaining connectors
+ */
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
#include <drm/drmP.h>
#include <drm/drm_auth.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_atomic.h>
#include "drm_crtc_internal.h"
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
+static int atomic_remove_fb(struct drm_framebuffer *fb)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_device *dev = fb->dev;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ int i, ret = 0;
+ unsigned plane_mask;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
+ state->acquire_ctx = &ctx;
+
+retry:
+ plane_mask = 0;
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret)
+ goto unlock;
+
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->state->fb != fb)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto unlock;
+ }
+
+ if (plane_state->crtc->primary == plane) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+
+ ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
+ if (ret)
+ goto unlock;
+
+ crtc_state->active = false;
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret)
+ goto unlock;
+ }
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ goto unlock;
+
+ plane_mask |= BIT(drm_plane_index(plane));
+
+ plane->old_fb = plane->fb;
+ }
+
+ for_each_connector_in_state(state, conn, conn_state, i) {
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+
+ if (ret)
+ goto unlock;
+ }
+
+ if (plane_mask)
+ ret = drm_atomic_commit(state);
+
+unlock:
+ if (plane_mask)
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static void legacy_remove_fb(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->primary->fb == fb) {
+ /* should turn off the crtc */
+ if (drm_crtc_force_disable(crtc))
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->fb == fb)
+ drm_plane_force_disable(plane);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
/**
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
struct drm_device *dev;
- struct drm_crtc *crtc;
- struct drm_plane *plane;
if (!fb)
return;
*/
if (drm_framebuffer_read_refcount(fb) > 1) {
if (drm_drv_uses_atomic_modeset(dev)) {
- int ret = drm_atomic_remove_fb(fb);
+ int ret = atomic_remove_fb(fb);
WARN(ret, "atomic remove_fb failed with %i\n", ret);
- goto out;
- }
-
- drm_modeset_lock_all(dev);
- /* remove from any CRTC */
- drm_for_each_crtc(crtc, dev) {
- if (crtc->primary->fb == fb) {
- /* should turn off the crtc */
- if (drm_crtc_force_disable(crtc))
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
- }
- }
-
- drm_for_each_plane(plane, dev) {
- if (plane->fb == fb)
- drm_plane_force_disable(plane);
- }
- drm_modeset_unlock_all(dev);
+ } else
+ legacy_remove_fb(fb);
}
-out:
drm_framebuffer_put(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
-/**
+/*
* \file drm_ioc32.c
*
* 32-bit ioctl compatibility routines for the DRM.
#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
typedef struct drm_version_32 {
- int version_major; /**< Major version */
- int version_minor; /**< Minor version */
- int version_patchlevel; /**< Patch level */
- u32 name_len; /**< Length of name buffer */
- u32 name; /**< Name of driver */
- u32 date_len; /**< Length of date buffer */
- u32 date; /**< User-space buffer to hold date */
- u32 desc_len; /**< Length of desc buffer */
- u32 desc; /**< User-space buffer to hold desc */
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel; /* Patch level */
+ u32 name_len; /* Length of name buffer */
+ u32 name; /* Name of driver */
+ u32 date_len; /* Length of date buffer */
+ u32 date; /* User-space buffer to hold date */
+ u32 desc_len; /* Length of desc buffer */
+ u32 desc; /* User-space buffer to hold desc */
} drm_version32_t;
static int compat_drm_version(struct file *file, unsigned int cmd,
}
typedef struct drm_unique32 {
- u32 unique_len; /**< Length of unique */
- u32 unique; /**< Unique name for driver instantiation */
+ u32 unique_len; /* Length of unique */
+ u32 unique; /* Unique name for driver instantiation */
} drm_unique32_t;
static int compat_drm_getunique(struct file *file, unsigned int cmd,
}
typedef struct drm_map32 {
- u32 offset; /**< Requested physical address (0 for SAREA)*/
- u32 size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- u32 handle; /**< User-space: "Handle" to pass to mmap() */
- int mtrr; /**< MTRR slot used */
+ u32 offset; /* Requested physical address (0 for SAREA) */
+ u32 size; /* Requested physical size (bytes) */
+ enum drm_map_type type; /* Type of memory to map */
+ enum drm_map_flags flags; /* Flags */
+ u32 handle; /* User-space: "Handle" to pass to mmap() */
+ int mtrr; /* MTRR slot used */
} drm_map32_t;
static int compat_drm_getmap(struct file *file, unsigned int cmd,
}
typedef struct drm_client32 {
- int idx; /**< Which client desired? */
- int auth; /**< Is client authenticated? */
- u32 pid; /**< Process ID */
- u32 uid; /**< User ID */
- u32 magic; /**< Magic */
- u32 iocs; /**< Ioctl count */
+ int idx; /* Which client desired? */
+ int auth; /* Is client authenticated? */
+ u32 pid; /* Process ID */
+ u32 uid; /* User ID */
+ u32 magic; /* Magic */
+ u32 iocs; /* Ioctl count */
} drm_client32_t;
static int compat_drm_getclient(struct file *file, unsigned int cmd,
}
typedef struct drm_buf_desc32 {
- int count; /**< Number of buffers of this size */
- int size; /**< Size in bytes */
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
int flags;
- u32 agp_start; /**< Start address in the AGP aperture */
+ u32 agp_start; /* Start address in the AGP aperture */
} drm_buf_desc32_t;
static int compat_drm_addbufs(struct file *file, unsigned int cmd,
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/drm.
+ * drm_compat_ioctl - 32bit IOCTL compatibility handler for DRM drivers
+ * @filp: file this ioctl is called on
+ * @cmd: ioctl cmd number
+ * @arg: user argument
+ *
+ * Compatibility handler for 32 bit userspace running on 64 kernels. All actual
+ * IOCTL handling is forwarded to drm_ioctl(), while marshalling structures as
+ * appropriate. Note that this only handles DRM core IOCTLs, if the driver has
+ * botched IOCTL itself, it must handle those by wrapping this function.
*
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * Returns:
+ * Zero on success, negative error code on failure.
*/
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return ret;
}
-
EXPORT_SYMBOL(drm_compat_ioctl);
case DRM_CAP_ADDFB2_MODIFIERS:
req->value = dev->mode_config.allow_fb_modifiers;
break;
+ case DRM_CAP_CRTC_IN_VBLANK_EVENT:
+ req->value = 1;
+ break;
default:
return -EINVAL;
}
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+/**
+ * DOC: driver specific ioctls
+ *
+ * First things first, driver private IOCTLs should only be needed for drivers
+ * supporting rendering. Kernel modesetting is all standardized, and extended
+ * through properties. There are a few exceptions in some existing drivers,
+ * which define IOCTL for use by the display DRM master, but they all predate
+ * properties.
+ *
+ * Now if you do have a render driver you always have to support it through
+ * driver private properties. There's a few steps needed to wire all the things
+ * up.
+ *
+ * First you need to define the structure for your IOCTL in your driver private
+ * UAPI header in ``include/uapi/drm/my_driver_drm.h``::
+ *
+ * struct my_driver_operation {
+ * u32 some_thing;
+ * u32 another_thing;
+ * };
+ *
+ * Please make sure that you follow all the best practices from
+ * ``Documentation/ioctl/botching-up-ioctls.txt``. Note that drm_ioctl()
+ * automatically zero-extends structures, hence make sure you can add more stuff
+ * at the end, i.e. don't put a variable sized array there.
+ *
+ * Then you need to define your IOCTL number, using one of DRM_IO(), DRM_IOR(),
+ * DRM_IOW() or DRM_IOWR(). It must start with the DRM_IOCTL\_ prefix::
+ *
+ * ##define DRM_IOCTL_MY_DRIVER_OPERATION \
+ * DRM_IOW(DRM_COMMAND_BASE, struct my_driver_operation)
+ *
+ * DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
+ * DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
+ * up the handlers and set the access rights:
+ *
+ * static const struct drm_ioctl_desc my_driver_ioctls[] = {
+ * DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation,
+ * DRM_AUTH|DRM_RENDER_ALLOW),
+ * };
+ *
+ * And then assign this to the &drm_driver.ioctls field in your driver
+ * structure.
+ */
+
/**
* drm_ioctl - ioctl callback implementation for DRM drivers
* @filp: file this ioctl is called on
* @cmd: ioctl cmd number
* @arg: user argument
*
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
+ * Looks up the ioctl function in the DRM core and the driver dispatch table,
+ * stored in &drm_driver.ioctls. It checks for necessary permission by calling
+ * drm_ioctl_permit(), and dispatches to the respective function.
*
* Returns:
* Zero on success, negative error code on failure.
e->pipe = pipe;
e->event.sequence = drm_vblank_count(dev, pipe);
+ e->event.crtc_id = crtc->base.id;
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
now = get_drm_timestamp();
}
e->pipe = pipe;
+ e->event.crtc_id = crtc->base.id;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
-/**
- * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
- * @crtc: DRM CRTC
- * @plane: DRM plane to be updated on @crtc
- *
- * This function locks the given crtc and plane (which should be either the
- * primary or cursor plane) using a hidden acquire context. This is necessary so
- * that drivers internally using the atomic interfaces can grab further locks
- * with the lock acquire context.
- *
- * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
- * converted to universal planes yet.
- */
-void drm_modeset_lock_crtc(struct drm_crtc *crtc,
- struct drm_plane *plane)
-{
- struct drm_modeset_acquire_ctx *ctx;
- int ret;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (WARN_ON(!ctx))
- return;
-
- drm_modeset_acquire_init(ctx, 0);
-
-retry:
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- goto fail;
-
- if (plane) {
- ret = drm_modeset_lock(&plane->mutex, ctx);
- if (ret)
- goto fail;
-
- if (plane->crtc) {
- ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
- if (ret)
- goto fail;
- }
- }
-
- WARN_ON(crtc->acquire_ctx);
-
- /* now we hold the locks, so now that it is safe, stash the
- * ctx for drm_modeset_unlock_crtc():
- */
- crtc->acquire_ctx = ctx;
-
- return;
-
-fail:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(ctx);
- goto retry;
- }
-}
-EXPORT_SYMBOL(drm_modeset_lock_crtc);
-
-/**
- * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
- * @crtc: drm crtc
- *
- * Legacy ioctl operations like cursor updates or page flips only have per-crtc
- * locking, and store the acquire ctx in the corresponding crtc. All other
- * legacy operations take all locks and use a global acquire context. This
- * function grabs the right one.
- */
-struct drm_modeset_acquire_ctx *
-drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
-{
- if (crtc->acquire_ctx)
- return crtc->acquire_ctx;
-
- WARN_ON(!crtc->dev->mode_config.acquire_ctx);
-
- return crtc->dev->mode_config.acquire_ctx;
-}
-EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
-
-/**
- * drm_modeset_unlock_crtc - drop crtc lock
- * @crtc: drm crtc
- *
- * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
- * locks acquired through the hidden context.
- */
-void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
-{
- struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
-
- if (WARN_ON(!ctx))
- return;
-
- crtc->acquire_ctx = NULL;
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-
- kfree(ctx);
-}
-EXPORT_SYMBOL(drm_modeset_unlock_crtc);
-
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
#include <linux/list.h>
#include <linux/of_graph.h>
#include <drm/drmP.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_panel.h>
#include <drm/drm_of.h>
static void drm_release_of(struct device *dev, void *data)
return -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
+
+/*
+ * drm_of_find_panel_or_bridge - return connected panel or bridge device
+ * @np: device tree node containing encoder output ports
+ * @panel: pointer to hold returned drm_panel
+ * @bridge: pointer to hold returned drm_bridge
+ *
+ * Given a DT node's port and endpoint number, find the connected node and
+ * return either the associated struct drm_panel or drm_bridge device. Either
+ * @panel or @bridge must not be NULL.
+ *
+ * Returns zero if successful, or one of the standard error codes if it fails.
+ */
+int drm_of_find_panel_or_bridge(const struct device_node *np,
+ int port, int endpoint,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge)
+{
+ int ret = -EPROBE_DEFER;
+ struct device_node *remote;
+
+ if (!panel && !bridge)
+ return -EINVAL;
+
+ remote = of_graph_get_remote_node(np, port, endpoint);
+ if (!remote)
+ return -ENODEV;
+
+ if (panel) {
+ *panel = of_drm_find_panel(remote);
+ if (*panel)
+ ret = 0;
+ }
+
+ /* No panel found yet, check for a bridge next. */
+ if (bridge) {
+ if (ret) {
+ *bridge = of_drm_find_bridge(remote);
+ if (*bridge)
+ ret = 0;
+ } else {
+ *bridge = NULL;
+ }
+
+ }
+
+ of_node_put(remote);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
struct drm_mode_cursor2 *req,
- struct drm_file *file_priv)
+ struct drm_file *file_priv,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = NULL;
int32_t crtc_x, crtc_y;
uint32_t crtc_w = 0, crtc_h = 0;
uint32_t src_w = 0, src_h = 0;
- struct drm_modeset_acquire_ctx ctx;
int ret = 0;
BUG_ON(!crtc->cursor);
WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret)
- goto fail;
- ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx);
- if (ret)
- goto fail;
-
/*
* Obtain fb we'll be using (either new or existing) and take an extra
* reference to it if fb != null. setplane will take care of dropping
*/
ret = __setplane_internal(crtc->cursor, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, &ctx);
+ 0, 0, src_w, src_h, ctx);
/* Update successful; save new cursor position, if necessary */
if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
crtc->cursor_y = req->y;
}
-fail:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return ret;
}
struct drm_file *file_priv)
{
struct drm_crtc *crtc;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENOENT;
}
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret)
+ goto out;
/*
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
- if (crtc->cursor)
- return drm_mode_cursor_universal(crtc, req, file_priv);
+ if (crtc->cursor) {
+ ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx);
+ if (ret)
+ goto out;
+
+ ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
+ goto out;
+ }
- drm_modeset_lock_crtc(crtc, crtc->cursor);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
}
}
out:
- drm_modeset_unlock_crtc(crtc);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
return ret;
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap = drm_gem_dmabuf_kmap,
- .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
- .kunmap = drm_gem_dmabuf_kunmap,
- .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .map = drm_gem_dmabuf_kmap,
+ .map_atomic = drm_gem_dmabuf_kmap_atomic,
+ .unmap = drm_gem_dmabuf_kunmap,
+ .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
*
* This library provides some helper code for output probing. It provides an
* implementation of the core &drm_connector_funcs.fill_modes interface with
- * drm_helper_probe_single_connector_modes.
+ * drm_helper_probe_single_connector_modes().
*
* It also provides support for polling connectors with a work item and for
* generic hotplug interrupt handling where the driver doesn't or cannot keep
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
static enum drm_connector_status
-drm_connector_detect(struct drm_connector *connector, bool force)
+drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
- return connector->funcs->detect ?
- connector->funcs->detect(connector, force) :
- connector_status_connected;
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
+ if (!ret) {
+ if (funcs->detect_ctx)
+ ret = funcs->detect_ctx(connector, &ctx, force);
+ else if (connector->funcs->detect)
+ ret = connector->funcs->detect(connector, force);
+ else
+ ret = connector_status_connected;
+ }
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ if (WARN_ON(ret < 0))
+ ret = connector_status_unknown;
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+/**
+ * drm_helper_probe_detect - probe connector status
+ * @connector: connector to probe
+ * @ctx: acquire_ctx, or NULL to let this function handle locking.
+ * @force: Whether destructive probe operations should be performed.
+ *
+ * This function calls the detect callbacks of the connector.
+ * This function returns &drm_connector_status, or
+ * if @ctx is set, it might also return -EDEADLK.
+ */
+int
+drm_helper_probe_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ if (!ctx)
+ return drm_helper_probe_detect_ctx(connector, force);
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ if (funcs->detect_ctx)
+ return funcs->detect_ctx(connector, ctx, force);
+ else if (connector->funcs->detect)
+ return connector->funcs->detect(connector, force);
+ else
+ return connector_status_connected;
}
+EXPORT_SYMBOL(drm_helper_probe_detect);
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
struct drm_display_mode *mode;
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
- int count = 0;
+ int count = 0, ret;
int mode_flags = 0;
bool verbose_prune = true;
enum drm_connector_status old_status;
+ struct drm_modeset_acquire_ctx ctx;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_modeset_acquire_init(&ctx, 0);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
+
+retry:
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ } else
+ WARN_ON(ret < 0);
+
/* set all old modes to the stale state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_STALE;
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
- connector->status = drm_connector_detect(connector, true);
+ ret = drm_helper_probe_detect(connector, &ctx, true);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ } else if (WARN(ret < 0, "Invalid return value %i for connector detection\n", ret))
+ ret = connector_status_unknown;
+
+ connector->status = ret;
}
/*
prune:
drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
if (list_empty(&connector->modes))
return 0;
repoll = true;
- connector->status = drm_connector_detect(connector, false);
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
if (old_status != connector->status) {
const char *old, *new;
old_status = connector->status;
- connector->status = drm_connector_detect(connector, false);
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
struct drm_property *property;
int enum_count = 0;
int value_count = 0;
- int ret = 0, i;
- int copied;
+ int i, copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
uint64_t __user *values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
property = drm_property_find(dev, out_resp->prop_id);
- if (!property) {
- ret = -ENOENT;
- goto done;
- }
-
- if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- list_for_each_entry(prop_enum, &property->enum_list, head)
- enum_count++;
- }
-
- value_count = property->num_values;
+ if (!property)
+ return -ENOENT;
strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
out_resp->flags = property->flags;
- if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
+ value_count = property->num_values;
+ values_ptr = u64_to_user_ptr(out_resp->values_ptr);
+
+ for (i = 0; i < value_count; i++) {
+ if (i < out_resp->count_values &&
+ put_user(property->values[i], values_ptr + i)) {
+ return -EFAULT;
}
}
out_resp->count_values = value_count;
+ copied = 0;
+ enum_ptr = u64_to_user_ptr(out_resp->enum_blob_ptr);
+
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
- enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
-
- if (copy_to_user(&enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
- }
- copied++;
- }
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
+ enum_count++;
+ if (out_resp->count_enum_blobs < enum_count)
+ continue;
+
+ if (copy_to_user(&enum_ptr[copied].value,
+ &prop_enum->value, sizeof(uint64_t)))
+ return -EFAULT;
+
+ if (copy_to_user(&enum_ptr[copied].name,
+ &prop_enum->name, DRM_PROP_NAME_LEN))
+ return -EFAULT;
+ copied++;
}
out_resp->count_enum_blobs = enum_count;
}
*/
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
out_resp->count_enum_blobs = 0;
-done:
- drm_modeset_unlock_all(dev);
- return ret;
+
+ return 0;
}
static void drm_property_free_blob(struct kref *kref)
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)
+/**
+ * DOC: overview
+ *
+ * DRM provides very little additional support to drivers for sysfs
+ * interactions, beyond just all the standard stuff. Drivers who want to expose
+ * additional sysfs properties and property groups can attach them at either
+ * &drm_device.dev or &drm_connector.kdev.
+ *
+ * Registration is automatically handled when calling drm_dev_register(), or
+ * drm_connector_register() in case of hot-plugged connectors. Unregistration is
+ * also automatically handled by drm_dev_unregister() and
+ * drm_connector_unregister().
+ */
+
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
NULL
};
-/**
- * drm_sysfs_connector_add - add a connector to sysfs
- * @connector: connector to add
- *
- * Create a connector device in sysfs, along with its associated connector
- * properties (so far, connection status, dpms, mode list and edid) and
- * generate a hotplug event so userspace knows there's a new connector
- * available.
- */
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return 0;
}
-/**
- * drm_sysfs_connector_remove - remove an connector device from sysfs
- * @connector: connector to remove
- *
- * Remove @connector and its associated attributes from sysfs. Note that
- * the device model core will take care of sending the "remove" uevent
- * at this time, so we don't need to do it.
- *
- * Note:
- * This routine should only be called if the connector was previously
- * successfully registered. If @connector hasn't been registered yet,
- * you'll likely see a panic somewhere deep in sysfs code when called.
- */
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
if (!connector->kdev)
kfree(dev);
}
-/**
- * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
- * @minor: minor to allocate sysfs device for
- *
- * This allocates a new sysfs device for @minor and returns it. The device is
- * not registered nor linked. The caller has to use device_add() and
- * device_del() to register and unregister it.
- *
- * Note that dev_get_drvdata() on the new device will return the minor.
- * However, the device does not hold a ref-count to the minor nor to the
- * underlying drm_device. This is unproblematic as long as you access the
- * private data only in sysfs callbacks. device_del() disables those
- * synchronously, so they cannot be called after you cleanup a minor.
- */
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
}
/**
- * drm_class_device_register - Register a struct device in the drm class.
+ * drm_class_device_register - register new device with the DRM sysfs class
+ * @dev: device to register
*
- * @dev: pointer to struct device to register.
- *
- * @dev should have all relevant members pre-filled with the exception
- * of the class member. In particular, the device_type member must
- * be set.
+ * Registers a new &struct device within the DRM sysfs class. Essentially only
+ * used by ttm to have a place for its global settings. Drivers should never use
+ * this.
*/
-
int drm_class_device_register(struct device *dev)
{
if (!drm_class || IS_ERR(drm_class))
}
EXPORT_SYMBOL_GPL(drm_class_device_register);
+/**
+ * drm_class_device_unregister - unregister device with the DRM sysfs class
+ * @dev: device to unregister
+ *
+ * Unregisters a &struct device from the DRM sysfs class. Essentially only used
+ * by ttm to have a place for its global settings. Drivers should never use
+ * this.
+ */
void drm_class_device_unregister(struct device *dev)
{
return device_unregister(dev);
depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU
select SHMEM
+ select SYNC_FILE
select TMPFS
select IOMMU_API
select IOMMU_SUPPORT
return 0;
}
-static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv;
DRIVER_PRIME |
DRIVER_RENDER,
.open = etnaviv_open,
- .preclose = etnaviv_preclose,
+ .postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 0,
+ .minor = 1,
};
/*
#include <linux/reservation.h>
#include "etnaviv_drv.h"
+struct dma_fence;
struct etnaviv_gem_ops;
struct etnaviv_gem_object;
struct drm_device *dev;
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
- u32 fence;
+ struct dma_fence *fence;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
+ u32 flags;
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
+#include <linux/sync_file.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+ bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
- ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+ ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
+ explicit);
if (ret)
break;
}
}
ww_acquire_fini(&submit->ticket);
+ dma_fence_put(submit->fence);
kfree(submit);
}
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
+ struct dma_fence *in_fence = NULL;
+ struct sync_file *sync_file = NULL;
+ int out_fence_fd = -1;
void *stream;
int ret;
return -EINVAL;
}
+ if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+ DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
goto err_submit_cmds;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_submit_cmds;
+ }
+ }
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto err_submit_cmds;
}
+ submit->flags = args->flags;
+
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
goto err_submit_objects;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto err_submit_objects;
+ }
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
+ ret = dma_fence_wait(in_fence, true);
+ if (ret)
+ goto err_submit_objects;
+ }
+ }
+
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
if (ret == 0)
cmdbuf = NULL;
- args->fence = submit->fence;
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ /*
+ * This can be improved: ideally we want to allocate the sync
+ * file before kicking off the GPU job and just attach the
+ * fence to the sync file here, eliminating the ENOMEM
+ * possibility at this stage.
+ */
+ sync_file = sync_file_create(submit->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
+ args->fence_fd = out_fence_fd;
+ args->fence = submit->fence->seqno;
out:
submit_unpin_objects(submit);
flush_workqueue(priv->wq);
err_submit_objects:
+ if (in_fence)
+ dma_fence_put(in_fence);
submit_cleanup(submit);
err_submit_cmds:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
/* if we still own the cmdbuf */
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
}
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 clock;
+
+ clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+ etnaviv_gpu_load_clock(gpu, clock);
+}
+
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
{
u32 control, idle;
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
/* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
+
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* Wait for stable clock. Vivante's code waited for 1ms */
usleep_range(1000, 10000);
}
/* We rely on the GPU running, so program the clock */
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- /* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
return 0;
}
{
struct etnaviv_fence *f;
+ /*
+ * GPU lock must already be held, otherwise fence completion order might
+ * not match the seqno order assigned here.
+ */
+ lockdep_assert_held(&gpu->lock);
+
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
}
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive)
+ unsigned int context, bool exclusive, bool explicit)
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
return ret;
}
+ if (explicit)
+ return 0;
+
/*
* If we have any shared fences, then the exclusive fence
* should be ignored as it will already have been signalled.
goto out_pm_put;
}
+ mutex_lock(&gpu->lock);
+
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_pm_put;
+ goto out_unlock;
}
- mutex_lock(&gpu->lock);
-
gpu->event[event].fence = fence;
- submit->fence = fence->seqno;
- gpu->active_fence = submit->fence;
+ submit->fence = dma_fence_get(fence);
+ gpu->active_fence = submit->fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true;
hangcheck_timer_reset(gpu);
ret = 0;
+out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:
#ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
- u32 clock;
int ret;
ret = mutex_lock_killable(&gpu->lock);
if (ret)
return ret;
- clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- etnaviv_gpu_load_clock(gpu, clock);
+ etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
}
#endif
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 6;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ *state = gpu->freq_scale;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ mutex_lock(&gpu->lock);
+ gpu->freq_scale = state;
+ if (!pm_runtime_suspended(gpu->dev))
+ etnaviv_gpu_update_clock(gpu);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+ .get_max_state = etnaviv_gpu_cooling_get_max_state,
+ .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+ .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
static int etnaviv_gpu_bind(struct device *dev, struct device *master,
void *data)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ (char *)dev_name(dev), gpu, &cooling_ops);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
- if (ret < 0)
+ if (ret < 0) {
+ thermal_cooling_device_unregister(gpu->cooling);
return ret;
+ }
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
}
gpu->drm = NULL;
+
+ thermal_cooling_device_unregister(gpu->cooling);
+ gpu->cooling = NULL;
}
static const struct component_ops gpu_ops = {
struct etnaviv_gpu {
struct drm_device *drm;
+ struct thermal_cooling_device *cooling;
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
u32 hangcheck_fence;
u32 hangcheck_dma_addr;
struct work_struct recover_work;
+ unsigned int freq_scale;
};
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
#endif
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive);
+ unsigned int context, bool exclusive, bool implicit);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/bridge/analogix_dp.h>
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = NULL, *endpoint = NULL;
+ struct device_node *np;
struct exynos_dp_device *dp;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret;
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
goto out;
}
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- np = of_graph_get_remote_port_parent(endpoint);
- if (np) {
- /* The remote port can be either a panel or a bridge */
- dp->plat_data.panel = of_drm_find_panel(np);
- if (!dp->plat_data.panel) {
- dp->ptn_bridge = of_drm_find_bridge(np);
- if (!dp->ptn_bridge) {
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- }
- of_node_put(np);
- } else {
- DRM_ERROR("no remote endpoint device node found.\n");
- return -EINVAL;
- }
- } else {
- DRM_ERROR("no port endpoint subnode found.\n");
- return -EINVAL;
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, &bridge);
+ if (ret)
+ return ret;
+
+ /* The remote port can be either a panel or a bridge */
+ dp->plat_data.panel = panel;
+ dp->ptn_bridge = bridge;
out:
return component_add(&pdev->dev, &exynos_dp_ops);
FIMD_PORT_WRB,
};
-static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
-{
- struct device_node *np, *ep;
-
- ep = of_graph_get_endpoint_by_regs(dev->of_node, FIMD_PORT_RGB, 0);
- if (!ep)
- return NULL;
-
- np = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
-
- return np;
-}
-
static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
{
struct device *dev = ctx->dev;
struct device_node *dn = dev->of_node;
struct device_node *np;
- ctx->panel_node = exynos_dpi_of_find_panel_node(dev);
+ ctx->panel_node = of_graph_get_remote_node(dn, FIMD_PORT_RGB, 0);
np = of_get_child_by_name(dn, "display-timings");
if (np) {
of_node_put(ep);
- ep = of_graph_get_next_endpoint(node, NULL);
- if (!ep) {
- ret = -EINVAL;
- goto end;
- }
+ dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
+ if (!dsi->bridge_node)
+ return -EINVAL;
- dsi->bridge_node = of_graph_get_remote_port_parent(ep);
- if (!dsi->bridge_node) {
- ret = -EINVAL;
- goto end;
- }
end:
of_node_put(ep);
writel(reg, mic->reg + MIC_OP);
}
-static struct device_node *get_remote_node(struct device_node *from, int reg)
-{
- struct device_node *endpoint = NULL, *remote_node = NULL;
-
- endpoint = of_graph_get_endpoint_by_regs(from, reg, -1);
- if (!endpoint) {
- DRM_ERROR("mic: Failed to find remote port from %s",
- from->full_name);
- goto exit;
- }
-
- remote_node = of_graph_get_remote_port_parent(endpoint);
- if (!remote_node) {
- DRM_ERROR("mic: Failed to find remote port parent from %s",
- from->full_name);
- goto exit;
- }
-
-exit:
- of_node_put(endpoint);
- return remote_node;
-}
-
static int parse_dt(struct exynos_mic *mic)
{
int ret = 0, i, j;
* The first node must be for decon and the second one must be for dsi.
*/
for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
- remote_node = get_remote_node(mic->dev->of_node, i);
+ remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
if (!remote_node) {
ret = -EPIPE;
goto exit;
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include "fsl_dcu_drm_drv.h"
return ret;
}
-static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
- const struct of_endpoint *ep)
-{
- struct drm_bridge *bridge;
- struct device_node *np;
-
- np = of_graph_get_remote_port_parent(ep->local_node);
-
- fsl_dev->connector.panel = of_drm_find_panel(np);
- if (fsl_dev->connector.panel) {
- of_node_put(np);
- return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
- }
-
- bridge = of_drm_find_bridge(np);
- of_node_put(np);
- if (!bridge)
- return -ENODEV;
-
- return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
-}
-
int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
{
- struct of_endpoint ep;
- struct device_node *ep_node, *panel_node;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
int ret;
/* This is for backward compatibility */
return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
}
- ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
- if (!ep_node)
- return -ENODEV;
-
- ret = of_graph_parse_endpoint(ep_node, &ep);
- of_node_put(ep_node);
+ ret = drm_of_find_panel_or_bridge(fsl_dev->np, 0, 0, &panel, &bridge);
if (ret)
- return -ENODEV;
+ return ret;
+
+ if (panel) {
+ fsl_dev->connector.panel = panel;
+ return fsl_dcu_attach_panel(fsl_dev, panel);
+ }
- return fsl_dcu_attach_endpoint(fsl_dev, &ep);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
}
}
int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
- u32 size)
+ u32 size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int i;
extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
extern void gma_crtc_load_lut(struct drm_crtc *crtc);
extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, u32 size);
+ u16 *blue, u32 size,
+ struct drm_modeset_acquire_ctx *ctx);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_graph.h>
#include <drm/drm_of.h>
#include <drm/drm_crtc_helper.h>
{
struct dsi_hw_ctx *ctx = dsi->ctx;
struct device_node *np = pdev->dev.of_node;
- struct device_node *endpoint, *bridge_node;
- struct drm_bridge *bridge;
struct resource *res;
+ int ret;
/*
* Get the endpoint node. In our case, dsi has one output port1
* to which the external HDMI bridge is connected.
*/
- endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
- if (!endpoint) {
- DRM_ERROR("no valid endpoint node\n");
- return -ENODEV;
- }
- of_node_put(endpoint);
-
- bridge_node = of_graph_get_remote_port_parent(endpoint);
- if (!bridge_node) {
- DRM_ERROR("no valid bridge node\n");
- return -ENODEV;
- }
- of_node_put(bridge_node);
-
- bridge = of_drm_find_bridge(bridge_node);
- if (!bridge) {
- DRM_INFO("wait for external HDMI bridge driver.\n");
- return -EPROBE_DEFER;
- }
- dsi->bridge = bridge;
+ ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+ if (ret)
+ return ret;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
.unbind = kirin_drm_unbind,
};
-static struct device_node *kirin_get_remote_node(struct device_node *np)
-{
- struct device_node *endpoint, *remote;
-
- /* get the first endpoint, in our case only one remote node
- * is connected to display controller.
- */
- endpoint = of_graph_get_next_endpoint(np, NULL);
- if (!endpoint) {
- DRM_ERROR("no valid endpoint node\n");
- return ERR_PTR(-ENODEV);
- }
-
- remote = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!remote) {
- DRM_ERROR("no valid remote node\n");
- return ERR_PTR(-ENODEV);
- }
-
- if (!of_device_is_available(remote)) {
- DRM_ERROR("not available for remote node\n");
- return ERR_PTR(-ENODEV);
- }
-
- return remote;
-}
-
static int kirin_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return -EINVAL;
}
- remote = kirin_get_remote_node(np);
+ remote = of_graph_get_remote_node(np, 0, 0);
if (IS_ERR(remote))
return PTR_ERR(remote);
{
int ret;
- if (vgpu->failsafe)
- return 0;
-
if (WARN_ON(bytes > 4))
return -EINVAL;
{
struct decode_info *d_info;
- if (ring_id >= I915_NUM_ENGINES)
- return INVALID_OP;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
struct decode_info *d_info;
int i;
- if (ring_id >= I915_NUM_ENGINES)
- return;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info);
return -ENODEV;
}
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV;
}
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
return 0;
t1 = get_cycles();
- memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+ s_before_advance_custom = *s;
if (info->handler) {
ret = info->handler(s);
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
- s.vgpu = wa_ctx->workload->vgpu;
- s.ring_id = wa_ctx->workload->ring_id;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
- s.workload = wa_ctx->workload;
+ s.workload = workload;
ret = ip_gma_set(&s, gma_head);
if (ret)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
goto unmap_src;
}
- ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
- wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ ret = copy_gma_to_hva(workload->vgpu,
+ workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
if (ret < 0) {
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
#define DPCD_HEADER_SIZE 0xb
+/* let the virtual display supports DP1.2 */
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
- 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
+ vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ SKL_FUSE_DOWNLOAD_STATUS |
+ SKL_FUSE_PG0_DIST_STATUS |
+ SKL_FUSE_PG1_DIST_STATUS |
+ SKL_FUSE_PG2_DIST_STATUS;
+ vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ LCPLL_PLL_ENABLE |
+ LCPLL_PLL_LOCK;
+ vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+
+ }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_B << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_C << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_D << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if (IS_SKYLAKE(dev_priv) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
- }
+ } else
+ aux_data_for_write = (0xff << 16);
}
/* write the return value in AUX_CH_DATA reg which includes:
* ACK of I2C_WRITE
static int ring_id_to_context_switch_event(int ring_id)
{
- if (WARN_ON(ring_id < RCS && ring_id >
- ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id < RCS ||
+ ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- int ring_id = wa_ctx->workload->ring_id;
- struct i915_gem_context *shadow_ctx =
- wa_ctx->workload->vgpu->shadow_ctx;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
- workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
if (emulate_schedule_in)
- memcpy(&workload->elsp_dwords,
- &vgpu->execlist[ring_id].elsp_dwords,
- sizeof(workload->elsp_dwords));
+ workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
- ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ ctx_status_ptr.read_ptr = 0;
+ ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
struct gvt_firmware_header *h;
void *firmware;
void *p;
- unsigned long size;
+ unsigned long size, crc32_start;
int i;
int ret;
- size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
memcpy(gvt->firmware.mmio, p, info->mmio_size);
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
+
firmware_attr.size = size;
firmware_attr.private = firmware;
firmware->mmio = mem;
- sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
gvt_vgpu_err("fail to translate guest gtt entry\n");
- return ret;
+ /* guest driver may read/write the entry when partial
+ * update the entry in this situation p2m will fail
+ * settting the shadow entry to point to a scratch page
+ */
+ ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
}
} else {
m = e;
- m.val64 = 0;
+ ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
u32 index;
u32 offset;
u32 num_entries;
struct intel_gvt_gtt_entry e;
+ intel_runtime_pm_get(dev_priv);
+
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+ intel_runtime_pm_put(dev_priv);
}
/**
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
+ .vgpu_activate = intel_gvt_activate_vgpu,
+ .vgpu_deactivate = intel_gvt_deactivate_vgpu,
};
/**
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
}
return 0;
idr_destroy(&gvt->vgpu_idr);
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
struct intel_gvt *gvt;
+ struct intel_vgpu *vgpu;
int ret;
/*
goto out_clean_types;
}
+ vgpu = intel_gvt_create_idle_vgpu(gvt);
+ if (IS_ERR(vgpu)) {
+ ret = PTR_ERR(vgpu);
+ gvt_err("failed to create idle vgpu\n");
+ goto out_clean_types;
+ }
+ gvt->idle_vgpu = vgpu;
+
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
struct intel_vgpu_sbi sbi;
};
+struct vgpu_sched_ctl {
+ int weight;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
bool failsafe;
bool resetting;
void *sched_data;
+ struct vgpu_sched_ctl sched_ctl;
struct intel_vgpu_fence fence;
struct intel_vgpu_gm gm;
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ ktime_t last_ctx_submit_time;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ unsigned int weight;
enum intel_vgpu_edid resolution;
};
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
+ struct intel_vgpu *idle_vgpu;
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+ INTEL_GVT_REQUEST_SCHED = 1,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
__u64 resolution;
__s32 primary;
__u64 vgpu_id;
+
+ __u32 weight;
};
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
-
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *);
void (*vgpu_reset)(struct intel_vgpu *);
+ void (*vgpu_activate)(struct intel_vgpu *);
+ void (*vgpu_deactivate)(struct intel_vgpu *);
};
return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv))
return D_SKL;
+ else if (IS_KABYLAKE(gvt->dev_priv))
+ return D_KBL;
return 0;
}
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
- offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
return 0;
}
+static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
+ vgpu->last_ctx_submit_time = ktime_get();
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_vgpu_err("fail submit workload on ring %d\n",
MMIO_D(0x7180, D_ALL);
MMIO_D(0x7408, D_ALL);
MMIO_D(0x7c00, D_ALL);
- MMIO_D(GEN6_MBCTL, D_ALL);
+ MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
MMIO_D(0x911c, D_ALL);
MMIO_D(0x9120, D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
+ skl_power_well_ctl_write);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_D(0x45504, D_SKL);
- MMIO_D(0x45520, D_SKL);
- MMIO_D(0x46000, D_SKL);
- MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL);
- MMIO_D(0x6C048, D_SKL);
- MMIO_D(0x6C050, D_SKL);
- MMIO_D(0x6C044, D_SKL);
- MMIO_D(0x6C04C, D_SKL);
- MMIO_D(0x6C054, D_SKL);
- MMIO_D(0x6c058, D_SKL);
- MMIO_D(0x6c05c, D_SKL);
- MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
-
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_D(0x70380, D_SKL);
- MMIO_D(0x71380, D_SKL);
- MMIO_D(0x72380, D_SKL);
- MMIO_D(0x7039c, D_SKL);
-
- MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_D(0x8f074, D_SKL);
- MMIO_D(0x8f004, D_SKL);
- MMIO_D(0x8f034, D_SKL);
-
- MMIO_D(0xb11c, D_SKL);
-
- MMIO_D(0x51000, D_SKL);
- MMIO_D(0x6c00c, D_SKL);
-
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_D(0xd08, D_SKL);
- MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL_PLUS);
+ MMIO_D(0x45520, D_SKL_PLUS);
+ MMIO_D(0x46000, D_SKL_PLUS);
+ MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL | D_KBL);
+ MMIO_D(0x6C048, D_SKL | D_KBL);
+ MMIO_D(0x6C050, D_SKL | D_KBL);
+ MMIO_D(0x6C044, D_SKL | D_KBL);
+ MMIO_D(0x6C04C, D_SKL | D_KBL);
+ MMIO_D(0x6C054, D_SKL | D_KBL);
+ MMIO_D(0x6c058, D_SKL | D_KBL);
+ MMIO_D(0x6c05c, D_SKL | D_KBL);
+ MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL_PLUS);
+ MMIO_D(0x71380, D_SKL_PLUS);
+ MMIO_D(0x72380, D_SKL_PLUS);
+ MMIO_D(0x7039c, D_SKL_PLUS);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL | D_KBL);
+ MMIO_D(0x8f004, D_SKL | D_KBL);
+ MMIO_D(0x8f034, D_SKL | D_KBL);
+
+ MMIO_D(0xb11c, D_SKL | D_KBL);
+
+ MMIO_D(0x51000, D_SKL | D_KBL);
+ MMIO_D(0x6c00c, D_SKL_PLUS);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL_PLUS);
+ MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL);
+ MMIO_D(0x45008, D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL);
+ MMIO_D(0x46430, D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL);
+ MMIO_D(0x46520, D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL);
- MMIO_D(0xb004, D_SKL);
+ MMIO_D(0xc403c, D_SKL | D_KBL);
+ MMIO_D(0xb004, D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL);
- MMIO_D(0x1082c0, D_SKL);
- MMIO_D(0x4068, D_SKL);
- MMIO_D(0x67054, D_SKL);
- MMIO_D(0x6e560, D_SKL);
- MMIO_D(0x6e554, D_SKL);
- MMIO_D(0x2b20, D_SKL);
- MMIO_D(0x65f00, D_SKL);
- MMIO_D(0x65f08, D_SKL);
- MMIO_D(0x320f0, D_SKL);
-
- MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x70034, D_SKL);
- MMIO_D(0x71034, D_SKL);
- MMIO_D(0x72034, D_SKL);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
-
- MMIO_D(0x44500, D_SKL);
+ MMIO_D(0x65900, D_SKL_PLUS);
+ MMIO_D(0x1082c0, D_SKL | D_KBL);
+ MMIO_D(0x4068, D_SKL | D_KBL);
+ MMIO_D(0x67054, D_SKL | D_KBL);
+ MMIO_D(0x6e560, D_SKL | D_KBL);
+ MMIO_D(0x6e554, D_SKL | D_KBL);
+ MMIO_D(0x2b20, D_SKL | D_KBL);
+ MMIO_D(0x65f00, D_SKL | D_KBL);
+ MMIO_D(0x65f08, D_SKL | D_KBL);
+ MMIO_D(0x320f0, D_SKL | D_KBL);
+
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x70034, D_SKL_PLUS);
+ MMIO_D(0x71034, D_SKL_PLUS);
+ MMIO_D(0x72034, D_SKL_PLUS);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
+
+ MMIO_D(0x44500, D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
+
+ MMIO_D(0x4ab8, D_KBL);
+ MMIO_D(0x940c, D_SKL_PLUS);
+ MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(0x4ab0, D_SKL | D_KBL);
+ MMIO_D(0x20d4, D_SKL | D_KBL);
+
return 0;
}
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map;
} else {
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n",
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution));
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
if (ret)
goto undo_group;
+ intel_gvt_ops->vgpu_activate(vgpu);
+
atomic_set(&vgpu->vdev.released, 0);
return ret;
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
+ intel_gvt_ops->vgpu_deactivate(vgpu);
+
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
return 0;
}
+static ssize_t
+vgpu_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+
+ if (mdev) {
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)
+ mdev_get_drvdata(mdev);
+ return sprintf(buf, "%d\n", vgpu->id);
+ }
+ return sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(vgpu_id);
+
+static struct attribute *intel_vgpu_attrs[] = {
+ &dev_attr_vgpu_id.attr,
+ NULL
+};
+
+static const struct attribute_group intel_vgpu_group = {
+ .name = "intel_vgpu",
+ .attrs = intel_vgpu_attrs,
+};
+
+static const struct attribute_group *intel_vgpu_groups[] = {
+ &intel_vgpu_group,
+ NULL,
+};
+
static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
+ .mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
vgpu->handle = (unsigned long)info;
info->vgpu = vgpu;
info->kvm = kvm;
+ kvm_get_kvm(info->kvm);
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- struct intel_vgpu *vgpu = info->vgpu;
-
- if (!info) {
- gvt_vgpu_err("kvmgt_guest_info invalid\n");
- return false;
- }
-
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+ kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
gvt_cache_destroy(info->vgpu);
vfree(info);
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
+#define D_KBL (1 << 5)
-#define D_GEN9PLUS (D_SKL)
-#define D_GEN8PLUS (D_BDW | D_SKL)
-#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
-#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_GEN9PLUS (D_SKL | D_KBL)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_SKL_PLUS (D_SKL)
-#define D_BDW_PLUS (D_BDW | D_SKL)
-#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
-#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_SKL_PLUS (D_SKL | D_KBL)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
-#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
struct intel_gvt_mmio_info {
u32 offset;
u32 value;
};
-static struct render_mmio gen8_render_mmio_list[] = {
+static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{BCS, _MMIO(0x22028), 0x0, false},
};
-static struct render_mmio gen9_render_mmio_list[] = {
+static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{VCS2, _MMIO(0x1c028), 0xffff, false},
{VECS, _MMIO(0x1a028), 0xffff, false},
+
+ {RCS, _MMIO(0x7304), 0xffff, true},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x940c), 0x0, false},
+ {RCS, _MMIO(0x4ab8), 0x0, false},
+
+ {RCS, _MMIO(0x4ab0), 0x0, false},
+ {RCS, _MMIO(0x20d4), 0x0, false},
+
+ {RCS, _MMIO(0xb004), 0x0, false},
+ {RCS, _MMIO(0x20a0), 0x0, false},
+ {RCS, _MMIO(0x20e4), 0xffff, false},
};
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
- return;
-
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
- I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+ I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
POSTING_READ(l3_offset);
l3_offset.reg += 4;
}
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
- return;
-
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
u32 v;
int i, array_size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);
return false;
}
+struct vgpu_sched_data {
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+
+ ktime_t sched_in_time;
+ ktime_t sched_out_time;
+ ktime_t sched_time;
+ ktime_t left_ts;
+ ktime_t allocated_ts;
+
+ struct vgpu_sched_ctl sched_ctl;
+};
+
+struct gvt_sched_data {
+ struct intel_gvt *gvt;
+ struct hrtimer timer;
+ unsigned long period;
+ struct list_head lru_runq_head;
+};
+
+static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+{
+ ktime_t delta_ts;
+ struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+
+ delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+
+ vgpu_data->sched_time += delta_ts;
+ vgpu_data->left_ts -= delta_ts;
+}
+
+#define GVT_TS_BALANCE_PERIOD_MS 100
+#define GVT_TS_BALANCE_STAGE_NUM 10
+
+static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
+{
+ struct vgpu_sched_data *vgpu_data;
+ struct list_head *pos;
+ static uint64_t stage_check;
+ int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
+
+ /* The timeslice accumulation reset at stage 0, which is
+ * allocated again without adding previous debt.
+ */
+ if (stage == 0) {
+ int total_weight = 0;
+ ktime_t fair_timeslice;
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ total_weight += vgpu_data->sched_ctl.weight;
+ }
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
+ vgpu_data->sched_ctl.weight /
+ total_weight;
+
+ vgpu_data->allocated_ts = fair_timeslice;
+ vgpu_data->left_ts = vgpu_data->allocated_ts;
+ }
+ } else {
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+
+ /* timeslice for next 100ms should add the left/debt
+ * slice of previous stages.
+ */
+ vgpu_data->left_ts += vgpu_data->allocated_ts;
+ }
+ }
+}
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct vgpu_sched_data *vgpu_data;
+ ktime_t cur_time;
/* no target to schedule */
if (!scheduler->next_vgpu)
return;
- gvt_dbg_sched("try to schedule next vgpu %d\n",
- scheduler->next_vgpu->id);
-
/*
* after the flag is set, workload dispatch thread will
* stop dispatching workload for current vgpu
/* still have uncompleted workload? */
for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i]) {
- gvt_dbg_sched("still have running workload\n");
+ if (scheduler->current_workload[i])
return;
- }
}
- gvt_dbg_sched("switch to next vgpu %d\n",
- scheduler->next_vgpu->id);
+ cur_time = ktime_get();
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ vgpu_data->sched_out_time = cur_time;
+ vgpu_update_timeslice(scheduler->current_vgpu);
+ }
+ vgpu_data = scheduler->next_vgpu->sched_data;
+ vgpu_data->sched_in_time = cur_time;
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
wake_up(&scheduler->waitq[i]);
}
-struct tbs_vgpu_data {
- struct list_head list;
- struct intel_vgpu *vgpu;
- /* put some per-vgpu sched stats here */
-};
-
-struct tbs_sched_data {
- struct intel_gvt *gvt;
- struct delayed_work work;
- unsigned long period;
- struct list_head runq_head;
-};
-
-#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
-
-static void tbs_sched_func(struct work_struct *work)
+static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
{
- struct tbs_sched_data *sched_data = container_of(work,
- struct tbs_sched_data, work.work);
- struct tbs_vgpu_data *vgpu_data;
-
- struct intel_gvt *gvt = sched_data->gvt;
- struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-
+ struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
- struct list_head *pos, *head;
-
- mutex_lock(&gvt->lock);
-
- /* no vgpu or has already had a target */
- if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- goto out;
-
- if (scheduler->current_vgpu) {
- vgpu_data = scheduler->current_vgpu->sched_data;
- head = &vgpu_data->list;
- } else {
- head = &sched_data->runq_head;
- }
+ struct list_head *head = &sched_data->lru_runq_head;
+ struct list_head *pos;
/* search a vgpu with pending workload */
list_for_each(pos, head) {
- if (pos == &sched_data->runq_head)
- continue;
- vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
- vgpu = vgpu_data->vgpu;
- break;
+ /* Return the vGPU only if it has time slice left */
+ if (vgpu_data->left_ts > 0) {
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
}
+ return vgpu;
+}
+
+/* in nanosecond */
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct gvt_sched_data *sched_data)
+{
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct vgpu_sched_data *vgpu_data;
+ struct intel_vgpu *vgpu = NULL;
+ static uint64_t timer_check;
+
+ if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ gvt_balance_timeslice(sched_data);
+
+ /* no active vgpu or has already had a target */
+ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;
- gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+
+ /* Move the last used vGPU to the tail of lru_list */
+ vgpu_data = vgpu->sched_data;
+ list_del_init(&vgpu_data->lru_list);
+ list_add_tail(&vgpu_data->lru_list,
+ &sched_data->lru_runq_head);
+ } else {
+ scheduler->next_vgpu = gvt->idle_vgpu;
}
out:
- if (scheduler->next_vgpu) {
- gvt_dbg_sched("try to schedule next vgpu %d\n",
- scheduler->next_vgpu->id);
+ if (scheduler->next_vgpu)
try_to_schedule_next_vgpu(gvt);
- }
+}
- /*
- * still have vgpu on runq
- * or last schedule haven't finished due to running workload
- */
- if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- schedule_delayed_work(&sched_data->work, sched_data->period);
+void intel_gvt_schedule(struct intel_gvt *gvt)
+{
+ struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+ mutex_lock(&gvt->lock);
+ tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
}
+static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
+{
+ struct gvt_sched_data *data;
+
+ data = container_of(timer_data, struct gvt_sched_data, timer);
+
+ intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
+
+ hrtimer_add_expires_ns(&data->timer, data->period);
+
+ return HRTIMER_RESTART;
+}
+
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data;
+ struct gvt_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- INIT_LIST_HEAD(&data->runq_head);
- INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ INIT_LIST_HEAD(&data->lru_runq_head);
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ data->timer.function = tbs_timer_fn;
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
+
return 0;
}
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data = scheduler->sched_data;
+ struct gvt_sched_data *data = scheduler->sched_data;
+
+ hrtimer_cancel(&data->timer);
- cancel_delayed_work(&data->work);
kfree(data);
scheduler->sched_data = NULL;
}
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *data;
+ struct vgpu_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->sched_ctl.weight = vgpu->sched_ctl.weight;
data->vgpu = vgpu;
- INIT_LIST_HEAD(&data->list);
+ INIT_LIST_HEAD(&data->lru_list);
vgpu->sched_data = data;
+
return 0;
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- if (!list_empty(&vgpu_data->list))
+ if (!list_empty(&vgpu_data->lru_list))
return;
- list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, 0);
+ list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+
+ if (!hrtimer_active(&sched_data->timer))
+ hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
+ sched_data->period), HRTIMER_MODE_ABS);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- list_del_init(&vgpu_data->list);
+ list_del_init(&vgpu_data->lru_list);
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
+void intel_gvt_schedule(struct intel_gvt *gvt);
+
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
return 0;
}
+static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+{
+ return i915_gem_context_force_single_submission(req->ctx);
+}
+
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
- if (unlikely(!workload))
+ if (!is_gvt_request(req) || unlikely(!workload))
return NOTIFY_OK;
switch (action) {
goto out;
}
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
- gvt_dbg_sched("ring id %d stop - no available workload\n",
- ring_id);
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out;
- }
/*
* still have current workload, maybe the workload disptacher
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
};
struct intel_shadow_wa_ctx {
- struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+#define VGPU_MAX_WEIGHT 16
+#define VGPU_WEIGHT(vgpu_num) \
+ (VGPU_MAX_WEIGHT / (vgpu_num))
+
static struct {
unsigned int low_mm;
unsigned int high_mm;
unsigned int fence;
+
+ /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
+ * with a weight of 4 on a contended host, different vGPU type has
+ * different weight set. Legal weights range from 1 to 16.
+ */
+ unsigned int weight;
enum intel_vgpu_edid edid;
char *name;
} vgpu_types[] = {
/* Fixed vGPU type table */
- { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
- { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
- { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
- { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+ { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
};
/**
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
gvt->types[i].fence = vgpu_types[i].fence;
+
+ if (vgpu_types[i].weight < 1 ||
+ vgpu_types[i].weight > VGPU_MAX_WEIGHT)
+ return -EINVAL;
+
+ gvt->types[i].weight = vgpu_types[i].weight;
gvt->types[i].resolution = vgpu_types[i].edid;
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
- gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence,
+ gvt->types[i].weight,
vgpu_edid_str(gvt->types[i].resolution));
}
}
/**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
* @vgpu: virtual GPU
*
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
*
*/
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+ mutex_lock(&vgpu->gvt->lock);
+ vgpu->active = true;
+ mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
- idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
+
+ mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ WARN(vgpu->active, "vGPU is still active!\n");
+
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
mutex_unlock(&gvt->lock);
}
+#define IDLE_VGPU_IDR 0
+
+/**
+ * intel_gvt_create_idle_vgpu - create an idle virtual GPU
+ * @gvt: GVT device
+ *
+ * This function is called when user wants to create an idle virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ enum intel_engine_id i;
+ int ret;
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ vgpu->id = IDLE_VGPU_IDR;
+ vgpu->gvt = gvt;
+
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ vgpu->active = false;
+
+ return vgpu;
+
+out_free_vgpu:
+ vfree(vgpu);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy an idle virtual GPU.
+ *
+ */
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_vgpu_clean_sched_policy(vgpu);
+ vfree(vgpu);
+}
+
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
mutex_lock(&gvt->lock);
- ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
+ GFP_KERNEL);
if (ret < 0)
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
+ vgpu->sched_ctl.weight = param->weight;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
intel_vgpu_init_cfg_space(vgpu, param->primary);
if (ret)
goto out_clean_shadow_ctx;
- vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.weight = type->weight;
param.resolution = type->resolution;
/* XXX current param based on MB */
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
+ struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
- gpu = i915_capture_gpu_state(inode->i_private);
+ intel_runtime_pm_get(i915);
+ gpu = i915_capture_gpu_state(i915);
+ intel_runtime_pm_put(i915);
if (!gpu)
return -ENOMEM;
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
- spin_lock_irq(&dev_priv->uncore.lock);
- for_each_fw_domain(fw_domain, dev_priv) {
+ for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
- fw_domain->wake_count);
- }
- spin_unlock_irq(&dev_priv->uncore.lock);
+ READ_ONCE(fw_domain->wake_count));
return 0;
}
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
- ring->space, ring->head, ring->tail,
- ring->last_retired_head);
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
+ ring->space, ring->head, ring->tail);
}
static int i915_context_status(struct seq_file *m, void *unused)
enum intel_engine_id id;
uint64_t tot = 0;
- seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
- client->priority, client->ctx_index, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+ seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
+ client->priority, client->stage_id, client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
}
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
static int
i915_wedged_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct drm_i915_private *i915 = data;
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
/*
* There is no safeguard against this debugfs entry colliding
* while it is writing to 'i915_wedged'
*/
- if (i915_reset_backoff(&dev_priv->gpu_error))
+ if (i915_reset_backoff(&i915->gpu_error))
return -EAGAIN;
- i915_handle_error(dev_priv, val,
- "Manually setting wedged to %llu", val);
+ for_each_engine_masked(engine, i915, val, tmp) {
+ engine->hangcheck.seqno = intel_engine_get_seqno(engine);
+ engine->hangcheck.stalled = true;
+ }
+
+ i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
- wait_on_bit(&dev_priv->gpu_error.flags,
+ wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE);
if (err)
goto err_unlock;
- /* Retire to kick idle work */
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
-
*irq = val;
mutex_unlock(&i915->drm.struct_mutex);
goto unlock;
}
- if (val & (DROP_RETIRE | DROP_ACTIVE))
+ if (val & DROP_RETIRE)
i915_gem_retire_requests(dev_priv);
lockdep_set_current_reclaim_state(GFP_KERNEL);
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_uc;
intel_modeset_gem_init(dev);
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_uc:
+ intel_uc_fini_fw(dev_priv);
cleanup_irq:
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
cleanup_csr:
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
+ intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
goto out;
}
- intel_guc_suspend(dev_priv);
-
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
+static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
+ u32 mask, u32 val)
+{
+ /* The HW does not like us polling for PW_STATUS frequently, so
+ * use the sleeping loop rather than risk the busy spin within
+ * intel_wait_for_register().
+ *
+ * Transitioning between RC6 states should be at most 2ms (see
+ * valleyview_enable_rps) so use a 3ms timeout.
+ */
+ return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
+ 3);
+}
+
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
u32 val;
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
+ u32 mask;
u32 val;
- int err = 0;
+ int err;
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= ~VLV_GTLC_ALLOWWAKEREQ;
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
- err = intel_wait_for_register(dev_priv,
- VLV_GTLC_PW_STATUS,
- VLV_GTLC_ALLOWWAKEACK,
- allow,
- 1);
+ mask = VLV_GTLC_ALLOWWAKEACK;
+ val = allow ? mask : 0;
+
+ err = vlv_wait_for_pw_status(dev_priv, mask, val);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
return err;
}
-static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
- bool wait_for_on)
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
{
u32 mask;
u32 val;
- int err;
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
- if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
- return 0;
-
- DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
- onoff(wait_for_on),
- I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
- err = intel_wait_for_register(dev_priv,
- VLV_GTLC_PW_STATUS, mask, val,
- 3);
- if (err)
+ if (vlv_wait_for_pw_status(dev_priv, mask, val))
DRM_ERROR("timeout waiting for GT wells to go %s\n",
onoff(wait_for_on));
-
- return err;
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
* Bspec defines the following GT well on flags as debug only, so
* don't treat them as hard failures.
*/
- (void)vlv_wait_for_gt_wells(dev_priv, false);
+ vlv_wait_for_gt_wells(dev_priv, false);
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170320"
-#define DRIVER_TIMESTAMP 1489994464
-
-#undef WARN_ON
-/* Many gcc seem to no see through this and fall over :( */
-#if 0
-#define WARN_ON(x) ({ \
- bool __i915_warn_cond = (x); \
- if (__builtin_constant_p(__i915_warn_cond)) \
- BUILD_BUG_ON(__i915_warn_cond); \
- WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
-#else
-#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-#endif
-
-#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
-
-#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
- (long) (x), __func__);
+#define DRIVER_DATE "20170403"
+#define DRIVER_TIMESTAMP 1491198738
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
};
enum forcewake_domains {
- FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
FORCEWAKE_ALL = (FORCEWAKE_RENDER |
FORCEWAKE_BLITTER |
FORCEWAKE_MEDIA)
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
-
- void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint8_t val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint16_t val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint32_t val, bool trace);
+ enum forcewake_domains domains);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint32_t val, bool trace);
};
struct intel_forcewake_range {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ u32 fw_set;
+ u32 fw_clear;
+ u32 fw_reset;
+
struct intel_uncore_forcewake_domain {
- struct drm_i915_private *i915;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned wake_count;
struct hrtimer timer;
i915_reg_t reg_set;
- u32 val_set;
- u32 val_clear;
i915_reg_t reg_ack;
- i915_reg_t reg_post;
- u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
int unclaimed_mmio_check;
};
+#define __mask_next_bit(mask) ({ \
+ int __idx = ffs(mask) - 1; \
+ mask &= ~BIT(__idx); \
+ __idx; \
+})
+
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
- for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
- (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
- (domain__)++) \
- for_each_if ((mask__) & (domain__)->mask)
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+ for (tmp__ = (mask__); \
+ tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
-#define for_each_fw_domain(domain__, dev_priv__) \
- for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
+#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
+ for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
+ func(unfenced_needs_alignment); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
(id__)++) \
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
-#define __mask_next_bit(mask) ({ \
- int __idx = ffs(mask) - 1; \
- mask &= ~BIT(__idx); \
- __idx; \
-})
-
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
+static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
+static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
+ gfp_t reclaim;
+
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
+ *
+ * However, since graphics tend to be disposable,
+ * defer the oom here by reporting the ENOMEM back
+ * to userspace.
*/
- page = shmem_read_mapping_page(mapping, i);
+ reclaim = mapping_gfp_mask(mapping);
+ reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
+
+ page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_sg;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
+ /* Retire completed requests first so the list of inflight/incomplete
+ * requests is accurate and we don't try and mark successful requests
+ * as in error during __i915_gem_set_wedged_BKL().
+ */
+ i915_gem_retire_requests(dev_priv);
+
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
i915_gem_context_lost(dev_priv);
- i915_gem_retire_requests(dev_priv);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
- wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
- intel_engines_are_idle(dev_priv),
- 10);
+ wait_for(intel_engines_are_idle(dev_priv), 10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
return 0;
}
+static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
+{
+ return wait_for(intel_engine_is_idle(engine), timeout_ms);
+}
+
+static int wait_for_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
+ i915_gem_set_wedged(i915);
+ return -EIO;
+ }
+
+ GEM_BUG_ON(intel_engine_get_seqno(engine) !=
+ intel_engine_last_submit(engine));
+ }
+
+ return 0;
+}
+
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
int ret;
if (ret)
return ret;
}
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ ret = wait_for_engines(i915);
} else {
ret = wait_for_timeline(&i915->gt.global_timeline, flags);
- if (ret)
- return ret;
}
- return 0;
+ return ret;
}
/** Flushes the GTT write domain for the object if it's dirty. */
* system agents we cannot reproduce this behaviour).
*/
wmb();
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+ }
+ }
intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
if (ret)
goto err_unlock;
- i915_gem_retire_requests(dev_priv);
- GEM_BUG_ON(dev_priv->gt.active_requests);
-
assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
+ intel_guc_suspend(dev_priv);
+
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
- false, I915_FENCE_TIMEOUT,
+ true, I915_FENCE_TIMEOUT,
GFP_KERNEL);
reservation_object_lock(obj->resv, NULL);
}
static inline int
-mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
- u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
- /* Use an extended w/a on ivb+ if signalling from other rings */
- i915.semaphores ?
+ /* Use an extended w/a on gen7 if signalling from other rings */
+ (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
+ u32 *cs;
- /* These flags are for resource streamer on HSW+ */
+ flags |= MI_MM_SPACE_GTT;
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
- flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_GEN(dev_priv) < 8)
- flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
-
+ /* These flags are for resource streamer on HSW+ */
+ flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
+ else
+ flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (INTEL_GEN(dev_priv) >= 7)
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap = i915_gem_dmabuf_kmap,
- .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
- .kunmap = i915_gem_dmabuf_kunmap,
- .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .map = i915_gem_dmabuf_kmap,
+ .map_atomic = i915_gem_dmabuf_kmap_atomic,
+ .unmap = i915_gem_dmabuf_kunmap,
+ .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
goto search_again;
found:
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list));
}
struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+ bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
int retry;
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+ needs_unfenced_map) &&
i915_gem_object_is_tiled(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ /* The timeline struct (as part of the ppgtt underneath a context)
+ * may be freed when the request is no longer in use by the GPU.
+ * We could extend the life of a context to beyond that of all
+ * fences, possibly keeping the hw resource around indefinitely,
+ * or we just give them a false name. Since
+ * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+ * lie seems justifiable.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return "signaled";
+
return to_request(fence)->timeline->common->name;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
- struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
if (ret)
return ret;
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests > 1);
-
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- if (wait_for(intel_engine_is_idle(engine), 50))
- return -EBUSY;
+ struct i915_gem_timeline *timeline;
+ struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
- }
-
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
- memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
- }
+ list_for_each_entry(timeline, &i915->gt.timelines, link)
+ memset(timeline->engine[id].sync_seqno, 0,
+ sizeof(timeline->engine[id].sync_seqno));
}
return 0;
* completion order.
*/
list_del(&request->ring_link);
- request->ring->last_retired_head = request->postfix;
+ request->ring->head = request->postfix;
if (!--request->i915->gt.active_requests) {
GEM_BUG_ON(!request->i915->gt.awake);
mod_delayed_work(request->i915->wq,
GEM_BUG_ON(to == from);
+ if (i915_gem_request_completed(from))
+ return 0;
+
if (to->engine->schedule) {
ret = i915_priotree_add_dependency(to->i915,
&to->priotree,
BUG();
}
+static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+{
+ if (!unlock)
+ return;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ /* expedite the RCU grace period to free some request slabs */
+ synchronize_rcu_expedited();
+}
+
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
- if (unlock)
- mutex_unlock(&dev_priv->drm.struct_mutex);
- /* expedite the RCU grace period to free some request slabs */
- synchronize_rcu_expedited();
+ i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
return count;
}
count += obj->base.size >> PAGE_SHIFT;
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_shrinker_unlock(dev, unlock);
return count;
}
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+
+ i915_gem_shrinker_unlock(dev, unlock);
return freed;
}
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
- if (slu->unlock)
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
}
static int
/**
* DOC: GuC-based command submission
*
- * i915_guc_client:
- * We use the term client to avoid confusion with contexts. A i915_guc_client is
- * equivalent to GuC object guc_context_desc. This context descriptor is
- * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
- * and workqueue for it. Also the process descriptor (guc_process_desc), which
- * is mapped to client space. So the client can write Work Item then ring the
- * doorbell.
+ * GuC client:
+ * A i915_guc_client refers to a submission path through GuC. Currently, there
+ * is only one of these (the execbuf_client) and this one is charged with all
+ * submissions to the GuC. This struct is the owner of a doorbell, a process
+ * descriptor and a workqueue (all of them inside a single gem object that
+ * contains all required pages for these elements).
*
- * To simplify the implementation, we allocate one gem object that contains all
- * pages for doorbell, process descriptor and workqueue.
+ * GuC stage descriptor:
+ * During initialization, the driver allocates a static pool of 1024 such
+ * descriptors, and shares them with the GuC.
+ * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * guc_stage_desc (via the client's stage_id), so effectively only one
+ * gets used. This stage descriptor lets the GuC know about the doorbell,
+ * workqueue and process descriptor. Theoretically, it also lets the GuC
+ * know about our HW contexts (context ID, etc...), but we actually
+ * employ a kind of submission where the GuC uses the LRCA sent via the work
+ * item instead (the single guc_stage_desc associated to execbuf client
+ * contains information about the default kernel context only, but this is
+ * essentially unused). This is called a "proxy" submission.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
* ELSP context descriptor dword into Work Item.
* See guc_wq_item_append()
*
+ * ADS:
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
+ * scheduling policies (guc_policies), a structure describing a collection of
+ * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
+ * its internal state for sleep.
+ *
*/
+static inline bool is_high_priority(struct i915_guc_client* client)
+{
+ return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
+}
+
+static int __reserve_doorbell(struct i915_guc_client *client)
+{
+ unsigned long offset;
+ unsigned long end;
+ u16 id;
+
+ GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
+
+ /*
+ * The bitmap tracks which doorbell registers are currently in use.
+ * It is split into two halves; the first half is used for normal
+ * priority contexts, the second half for high-priority ones.
+ */
+ offset = 0;
+ end = GUC_NUM_DOORBELLS/2;
+ if (is_high_priority(client)) {
+ offset = end;
+ end += offset;
+ }
+
+ id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
+ if (id == end)
+ return -ENOSPC;
+
+ __set_bit(id, client->guc->doorbell_bitmap);
+ client->doorbell_id = id;
+ DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
+ client->stage_id, yesno(is_high_priority(client)),
+ id);
+ return 0;
+}
+
+static void __unreserve_doorbell(struct i915_guc_client *client)
+{
+ GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
+
+ __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+}
+
/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
-static int guc_allocate_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_release_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
+{
+ struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
+
+ return &base[client->stage_id];
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
* client object which contains the page being used for the doorbell
*/
-static int guc_update_doorbell_id(struct intel_guc *guc,
- struct i915_guc_client *client,
- u16 new_id)
+static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
{
- struct sg_table *sg = guc->ctx_pool_vma->pages;
- void *doorbell_bitmap = guc->doorbell_bitmap;
- struct guc_doorbell_info *doorbell;
- struct guc_context_desc desc;
- size_t len;
+ struct guc_stage_desc *desc;
- doorbell = client->vaddr + client->doorbell_offset;
+ /* Update the GuC's idea of the doorbell ID */
+ desc = __get_stage_desc(client);
+ desc->db_id = new_id;
+}
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
- test_bit(client->doorbell_id, doorbell_bitmap)) {
- /* Deactivate the old doorbell */
- doorbell->db_status = GUC_DOORBELL_DISABLED;
- (void)guc_release_doorbell(guc, client);
- __clear_bit(client->doorbell_id, doorbell_bitmap);
- }
+static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
+{
+ return client->vaddr + client->doorbell_offset;
+}
- /* Update the GuC's idea of the doorbell ID */
- len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
- desc.db_id = new_id;
- len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
-
- client->doorbell_id = new_id;
- if (new_id == GUC_INVALID_DOORBELL_ID)
- return 0;
+static bool has_doorbell(struct i915_guc_client *client)
+{
+ if (client->doorbell_id == GUC_DOORBELL_INVALID)
+ return false;
- /* Activate the new doorbell */
- __set_bit(new_id, doorbell_bitmap);
+ return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+}
+
+static int __create_doorbell(struct i915_guc_client *client)
+{
+ struct guc_doorbell_info *doorbell;
+ int err;
+
+ doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = client->doorbell_cookie;
- return guc_allocate_doorbell(guc, client);
+
+ err = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (err) {
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+ }
+ return err;
}
-static void guc_disable_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __destroy_doorbell(struct i915_guc_client *client)
{
- (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
+ struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+ struct guc_doorbell_info *doorbell;
+ u16 db_id = client->doorbell_id;
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+
+ doorbell = __get_doorbell(client);
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+
+ /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
+ * to go to zero after updating db_status before we call the GuC to
+ * release the doorbell */
+ if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ WARN_ONCE(true, "Doorbell never became invalid after disable\n");
+
+ return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
-static uint16_t
-select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+static int create_doorbell(struct i915_guc_client *client)
{
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- * Note that logically higher priorities are numerically less than
- * normal ones, so the test below means "is it high-priority?"
- */
- const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
- const uint16_t half = GUC_MAX_DOORBELLS / 2;
- const uint16_t start = hi_pri ? half : 0;
- const uint16_t end = start + half;
- uint16_t id;
+ int ret;
- id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
- if (id == end)
- id = GUC_INVALID_DOORBELL_ID;
+ ret = __reserve_doorbell(client);
+ if (ret)
+ return ret;
- DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
- hi_pri ? "high" : "normal", id);
+ __update_doorbell_desc(client, client->doorbell_id);
+
+ ret = __create_doorbell(client);
+ if (ret)
+ goto err;
+
+ return 0;
- return id;
+err:
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ __unreserve_doorbell(client);
+ return ret;
}
-/*
- * Select, assign and relase doorbell cachelines
- *
- * These functions track which doorbell cachelines are in use.
- * The data they manipulate is protected by the intel_guc_send lock.
- */
+static int destroy_doorbell(struct i915_guc_client *client)
+{
+ int err;
+
+ GEM_BUG_ON(!has_doorbell(client));
+
+ /* XXX: wait for any interrupts */
+ /* XXX: wait for workqueue to drain */
+
+ err = __destroy_doorbell(client);
+ if (err)
+ return err;
+
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+
+ __unreserve_doorbell(client);
-static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
+ return 0;
+}
+
+static unsigned long __select_cacheline(struct intel_guc* guc)
{
- const uint32_t cacheline_size = cache_line_size();
- uint32_t offset;
+ unsigned long offset;
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
/* Moving to next cache line to reduce contention */
- guc->db_cacheline += cacheline_size;
-
- DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cacheline_size);
+ guc->db_cacheline += cache_line_size();
+ DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
+ offset, guc->db_cacheline, cache_line_size());
return offset;
}
+static inline struct guc_process_desc *
+__get_process_desc(struct i915_guc_client *client)
+{
+ return client->vaddr + client->proc_desc_offset;
+}
+
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
{
struct guc_process_desc *desc;
- desc = client->vaddr + client->proc_desc_offset;
-
- memset(desc, 0, sizeof(*desc));
+ desc = memset(__get_process_desc(client), 0, sizeof(*desc));
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
- desc->context_id = client->ctx_index;
+ desc->stage_id = client->stage_id;
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
/*
- * Initialise/clear the context descriptor shared with the GuC firmware.
+ * Initialise/clear the stage descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-
-static void guc_ctx_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
- struct guc_context_desc desc;
- struct sg_table *sg;
+ struct guc_stage_desc *desc;
unsigned int tmp;
u32 gfx_addr;
- memset(&desc, 0, sizeof(desc));
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
- desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
- desc.context_id = client->ctx_index;
- desc.priority = client->priority;
- desc.db_id = client->doorbell_id;
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->stage_id = client->stage_id;
+ desc->priority = client->priority;
+ desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
+ struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
if (!ce->state)
break; /* XXX: continue? */
+ /*
+ * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
+ * submission or, in other words, not using a direct submission
+ * model) the KMD's LRCA is not used for any work submission.
+ * Instead, the GuC uses the LRCA of the user mode context (see
+ * guc_wq_item_append below).
+ */
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- lrc->ring_lcra =
+ lrc->ring_lrca =
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
- lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
+
+ /* XXX: In direct submission, the GuC wants the HW context id
+ * here. In proxy submission, it wants the stage id */
+ lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << guc_engine_id);
+ desc->engines_used |= (1 << guc_engine_id);
}
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc.engines_used);
- WARN_ON(desc.engines_used == 0);
+ client->engines, desc->engines_used);
+ WARN_ON(desc->engines_used == 0);
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
gfx_addr = guc_ggtt_offset(client->vma);
- desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
+ desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu =
- (uintptr_t)client->vaddr + client->doorbell_offset;
- desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc.process_desc = gfx_addr + client->proc_desc_offset;
- desc.wq_addr = gfx_addr + client->wq_offset;
- desc.wq_size = client->wq_size;
-
- /*
- * XXX: Take LRCs from an existing context if this is not an
- * IsKMDCreatedContext client
- */
- desc.desc_private = (uintptr_t)client;
+ desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+ desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
+ desc->process_desc = gfx_addr + client->proc_desc_offset;
+ desc->wq_addr = gfx_addr + client->wq_offset;
+ desc->wq_size = client->wq_size;
- /* Pool context is pinned already */
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc->desc_private = (uintptr_t)client;
}
-static void guc_ctx_desc_fini(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- struct guc_context_desc desc;
- struct sg_table *sg;
-
- memset(&desc, 0, sizeof(desc));
+ struct guc_stage_desc *desc;
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
}
/**
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
+ struct guc_process_desc *desc = __get_process_desc(client);
u32 freespace;
int ret;
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail;
- GEM_BUG_ON(tail & 7);
+ assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
- wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
+ wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
}
+static void guc_reset_wq(struct i915_guc_client *client)
+{
+ struct guc_process_desc *desc = __get_process_desc(client);
+
+ desc->head = 0;
+ desc->tail = 0;
+
+ client->wq_tail = 0;
+}
+
static int guc_ring_doorbell(struct i915_guc_client *client)
{
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Update the tail so it is visible to GuC */
desc->tail = client->wq_tail;
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = client->vaddr + client->doorbell_offset;
+ db = (union guc_doorbell_qw *)__get_doorbell(client);
while (attempt--) {
/* lets ring the doorbell */
{
struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *last = port[0].request;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return false;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *rq =
RB_CLEAR_NODE(&rq->priotree.node);
rq->priotree.priority = INT_MAX;
- trace_i915_gem_request_in(rq, port - engine->execlist_port);
i915_guc_submit(rq);
+ trace_i915_gem_request_in(rq, port - engine->execlist_port);
last = rq;
submit = true;
}
nested_enable_signaling(last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
return submit;
}
return vma;
}
-static void
-guc_client_free(struct drm_i915_private *dev_priv,
- struct i915_guc_client *client)
+/* Check that a doorbell register is in the expected state */
+static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- if (!client)
- return;
-
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 drbregl;
+ bool valid;
- if (client->vaddr) {
- /*
- * If we got as far as setting up a doorbell, make sure we
- * shut it down before unmapping & deallocating the memory.
- */
- guc_disable_doorbell(guc, client);
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
- i915_gem_object_unpin_map(client->vma->obj);
- }
+ drbregl = I915_READ(GEN8_DRBREGL(db_id));
+ valid = drbregl & GEN8_DRB_VALID;
- i915_vma_unpin_and_release(&client->vma);
+ if (test_bit(db_id, guc->doorbell_bitmap) == valid)
+ return true;
- if (client->ctx_index != GUC_INVALID_CTX_ID) {
- guc_ctx_desc_fini(guc, client);
- ida_simple_remove(&guc->ctx_ids, client->ctx_index);
- }
+ DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
+ db_id, drbregl, yesno(valid));
- kfree(client);
+ return false;
}
-/* Check that a doorbell register is in the expected state */
-static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+/*
+ * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
+ * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
+ * doorbell to the rightful owner.
+ */
+static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- i915_reg_t drbreg = GEN8_DRBREGL(db_id);
- uint32_t value = I915_READ(drbreg);
- bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
- bool expected = test_bit(db_id, guc->doorbell_bitmap);
-
- if (enabled == expected)
- return true;
+ int err;
- DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
- db_id, drbreg.reg, value,
- expected ? "active" : "inactive");
+ __update_doorbell_desc(client, db_id);
+ err = __create_doorbell(client);
+ if (!err)
+ err = __destroy_doorbell(client);
- return false;
+ return err;
}
/*
- * Borrow the first client to set up & tear down each unused doorbell
- * in turn, to ensure that all doorbell h/w is (re)initialised.
+ * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
+ * HW is (re)initialised. For that end, we might have to borrow the first
+ * client. Also, tell GuC about all the doorbells in use by all clients.
+ * We do this because the KMD, the GuC and the doorbell HW can easily go out of
+ * sync (e.g. we can reset the GuC, but not the doorbel HW).
*/
-static void guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_init_doorbell_hw(struct intel_guc *guc)
{
struct i915_guc_client *client = guc->execbuf_client;
- uint16_t db_id;
- int i, err;
-
- guc_disable_doorbell(guc, client);
+ bool recreate_first_client = false;
+ u16 db_id;
+ int ret;
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
- /* Skip if doorbell is OK */
- if (guc_doorbell_check(guc, i))
+ /* For unused doorbells, make sure they are disabled */
+ for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
+ if (doorbell_ok(guc, db_id))
continue;
- err = guc_update_doorbell_id(guc, client, i);
- if (err)
- DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
- i, err);
+ if (has_doorbell(client)) {
+ /* Borrow execbuf_client (we will recreate it later) */
+ destroy_doorbell(client);
+ recreate_first_client = true;
+ }
+
+ ret = __reset_doorbell(client, db_id);
+ WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
}
- db_id = select_doorbell_register(guc, client->priority);
- WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
+ if (recreate_first_client) {
+ ret = __reserve_doorbell(client);
+ if (unlikely(ret)) {
+ DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
+ return ret;
+ }
+
+ __update_doorbell_desc(client, client->doorbell_id);
+ }
- err = guc_update_doorbell_id(guc, client, db_id);
- if (err)
- DRM_WARN("Failed to restore doorbell to %d, err %d\n",
- db_id, err);
+ /* Now for every client (and not only execbuf_client) make sure their
+ * doorbells are known by the GuC */
+ //for (client = client_list; client != NULL; client = client->next)
+ {
+ ret = __create_doorbell(client);
+ if (ret) {
+ DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
+ }
- /* Read back & verify all doorbell registers */
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
- (void)guc_doorbell_check(guc, i);
+ /* Read back & verify all (used & unused) doorbell registers */
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ WARN_ON(!doorbell_ok(guc, db_id));
+
+ return 0;
}
/**
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
- uint16_t db_id;
+ int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- client->owner = ctx;
client->guc = guc;
+ client->owner = ctx;
client->engines = engines;
client->priority = priority;
- client->doorbell_id = GUC_INVALID_DOORBELL_ID;
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+ client->wq_offset = GUC_DB_SIZE;
+ client->wq_size = GUC_WQ_SIZE;
+ spin_lock_init(&client->wq_lock);
- client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
- GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
- if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
- client->ctx_index = GUC_INVALID_CTX_ID;
- goto err;
- }
+ ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err_client;
+
+ client->stage_id = ret;
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma))
- goto err;
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_id;
+ }
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr))
- goto err;
-
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
+ }
client->vaddr = vaddr;
- spin_lock_init(&client->wq_lock);
- client->wq_offset = GUC_DB_SIZE;
- client->wq_size = GUC_WQ_SIZE;
-
- db_id = select_doorbell_register(guc, client->priority);
- if (db_id == GUC_INVALID_DOORBELL_ID)
- /* XXX: evict a doorbell instead? */
- goto err;
-
- client->doorbell_offset = select_doorbell_cacheline(guc);
+ client->doorbell_offset = __select_cacheline(guc);
/*
* Since the doorbell only requires a single cacheline, we can save
client->proc_desc_offset = (GUC_DB_SIZE / 2);
guc_proc_desc_init(guc, client);
- guc_ctx_desc_init(guc, client);
+ guc_stage_desc_init(guc, client);
- /* For runtime client allocation we need to enable the doorbell. Not
- * required yet for the static execbuf_client as this special kernel
- * client is enabled from i915_guc_submission_enable().
- *
- * guc_update_doorbell_id(guc, client, db_id);
- */
+ ret = create_doorbell(client);
+ if (ret)
+ goto err_vaddr;
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
- priority, client, client->engines, client->ctx_index);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
- client->doorbell_id, client->doorbell_offset);
+ DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
+ priority, client, client->engines, client->stage_id);
+ DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
return client;
-err:
- guc_client_free(dev_priv, client);
- return NULL;
+err_vaddr:
+ i915_gem_object_unpin_map(client->vma->obj);
+err_vma:
+ i915_vma_unpin_and_release(&client->vma);
+err_id:
+ ida_simple_remove(&guc->stage_ids, client->stage_id);
+err_client:
+ kfree(client);
+ return ERR_PTR(ret);
}
+static void guc_client_free(struct i915_guc_client *client)
+{
+ /*
+ * XXX: wait for any outstanding submissions before freeing memory.
+ * Be sure to drop any locks
+ */
+ /* FIXME: in many cases, by the time we get here the GuC has been
+ * reset, so we cannot destroy the doorbell properly. Ignore the
+ * error message for now */
+ destroy_doorbell(client);
+ guc_stage_desc_fini(client->guc, client);
+ i915_gem_object_unpin_map(client->vma->obj);
+ i915_vma_unpin_and_release(&client->vma);
+ ida_simple_remove(&client->guc->stage_ids, client->stage_id);
+ kfree(client);
+}
static void guc_policies_init(struct guc_policies *policies)
{
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
- for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+ for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
policies->is_valid = 1;
}
-static void guc_addon_create(struct intel_guc *guc)
+static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma;
enum intel_engine_id id;
u32 base;
- vma = guc->ads_vma;
- if (!vma) {
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return;
+ GEM_BUG_ON(guc->ads_vma);
- guc->ads_vma = vma;
- }
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
page = i915_vma_first_page(vma);
blob = kmap(page);
/* MMIO reg state */
for_each_engine(engine, dev_priv, id) {
- blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
+ blob->reg_state.white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
+ blob->reg_state.white_list[engine->guc_id].count = 0;
}
/*
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page);
+
+ return 0;
+}
+
+static void guc_ads_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->ads_vma);
}
/*
- * Set up the memory resources to be shared with the GuC. At this point,
- * we require just one object that can be mapped through the GGTT.
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
*/
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{
- const size_t ctxsize = sizeof(struct guc_context_desc);
- const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
- const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
+ int ret;
- if (!HAS_GUC_SCHED(dev_priv))
+ if (guc->stage_desc_pool)
return 0;
- /* Wipe bitmap & delete client in case of reinitialisation */
- bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
- i915_guc_submission_disable(dev_priv);
-
- if (!i915.enable_guc_submission)
- return 0; /* not enabled */
-
- if (guc->ctx_pool_vma)
- return 0; /* already allocated */
-
- vma = intel_guc_allocate_vma(guc, gemsize);
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS));
if (IS_ERR(vma))
return PTR_ERR(vma);
- guc->ctx_pool_vma = vma;
- ida_init(&guc->ctx_ids);
- intel_guc_log_create(guc);
- guc_addon_create(guc);
-
- guc->execbuf_client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CTX_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (!guc->execbuf_client) {
- DRM_ERROR("Failed to create GuC client for execbuf!\n");
- goto err;
+ guc->stage_desc_pool = vma;
+
+ vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
}
+ guc->stage_desc_pool_vaddr = vaddr;
+
+ ret = intel_guc_log_create(guc);
+ if (ret < 0)
+ goto err_vaddr;
+
+ ret = guc_ads_create(guc);
+ if (ret < 0)
+ goto err_log;
+
+ ida_init(&guc->stage_ids);
+
return 0;
-err:
- i915_guc_submission_fini(dev_priv);
- return -ENOMEM;
+err_log:
+ intel_guc_log_destroy(guc);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+err_vma:
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ return ret;
}
-static void guc_reset_wq(struct i915_guc_client *client)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
-
- desc->head = 0;
- desc->tail = 0;
+ struct intel_guc *guc = &dev_priv->guc;
- client->wq_tail = 0;
+ ida_destroy(&guc->stage_ids);
+ guc_ads_destroy(guc);
+ intel_guc_log_destroy(guc);
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int irqs;
+
+ /*
+ * tell all command streamers NOT to forward interrupts or vblank
+ * to GuC.
+ */
+ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+ irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+ /* route all GT interrupts to the host */
+ I915_WRITE(GUC_BCS_RCS_IER, 0);
+ I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+ I915_WRITE(GUC_WD_VECS_IER, 0);
+
+ dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+}
+
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err;
+
+ if (!client) {
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for execbuf!\n");
+ return PTR_ERR(client);
+ }
- if (!client)
- return -ENODEV;
+ guc->execbuf_client = client;
+ }
- intel_guc_sample_forcewake(guc);
+ err = intel_guc_sample_forcewake(guc);
+ if (err)
+ goto err_execbuf_client;
guc_reset_wq(client);
- guc_init_doorbell_hw(guc);
+
+ err = guc_init_doorbell_hw(guc);
+ if (err)
+ goto err_execbuf_client;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
}
return 0;
-}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
- /* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
-
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+err_execbuf_client:
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return err;
}
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_interrupts_release(dev_priv);
- if (!guc->execbuf_client)
- return;
-
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-}
-
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client;
- client = fetch_and_zero(&guc->execbuf_client);
- if (!client)
- return;
-
- guc_client_free(dev_priv, client);
-
- i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log.vma);
-
- if (guc->ctx_pool_vma)
- ida_destroy(&guc->ctx_ids);
- i915_vma_unpin_and_release(&guc->ctx_pool_vma);
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
}
/**
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @dev_priv: i915 device private
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
-
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
/* Handle flush interrupt in bottom half */
- queue_work(dev_priv->guc.log.flush_wq,
- &dev_priv->guc.log.flush_work);
+ queue_work(dev_priv->guc.log.runtime.flush_wq,
+ &dev_priv->guc.log.runtime.flush_work);
dev_priv->guc.log.flush_interrupt_count++;
} else {
dev_priv->rps.pm_intrmsk_mbz = 0;
/*
- * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
- if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ if (INTEL_INFO(dev_priv)->gen <= 7)
dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_INFO(dev_priv)->gen >= 8)
.has_overlay = 1, .overlay_needs_physical = 1, \
.has_gmch_display = 1, \
.hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i915gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945g_info = {
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_g33_info = {
*/
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
ret = -ENODEV;
- goto err_alloc;
+ goto err_flags;
}
list_add(&stream->link, &dev_priv->perf.streams);
err_open:
list_del(&stream->link);
+err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
err_alloc:
if (ret)
return ret;
+ if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+ DRM_DEBUG("Unknown i915 perf property ID\n");
+ return -EINVAL;
+ }
+
switch ((enum drm_i915_perf_property_id)id) {
case DRM_I915_PERF_PROP_CTX_HANDLE:
props->single_context = 1;
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
- default:
+ case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
- DRM_DEBUG("Unknown i915 perf property ID\n");
return -EINVAL;
}
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#undef WARN_ON
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+#endif
+
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long)(x), __func__)
+
#if GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
unsigned int result;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
return result;
}
signaler_set_rtpriority();
do {
+ bool do_schedule = true;
+
set_current_state(TASK_INTERRUPTIBLE);
/* We are either woken up by the interrupt bottom-half,
spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request);
- } else {
+
+ /* If the engine is saturated we may be continually
+ * processing completed requests. This angers the
+ * NMI watchdog if we never let anything else
+ * have access to the CPU. Let's pretend to be nice
+ * and relinquish the CPU if we burn through the
+ * entire RT timeslice!
+ */
+ do_schedule = need_resched();
+ }
+
+ if (unlikely(do_schedule)) {
DEFINE_WAIT(exec);
+ if (kthread_should_park())
+ kthread_parkme();
+
if (kthread_should_stop()) {
GEM_BUG_ON(request);
break;
if (request)
remove_wait_queue(&request->execute, &exec);
-
- if (kthread_should_park())
- kthread_parkme();
}
i915_gem_request_put(request);
} while (1);
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
- /* BSpec says "Do not use DisplayPort with CDCLK less than
- * 432 MHz, audio enabled, port width x4, and link rate
- * HBR2 (5.4 GHz), or else there may be audio corruption or
- * screen corruption."
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz and since GLK can output two
+ * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4)
- pixel_rate = max(432000, pixel_rate);
+ crtc_state->lane_count == 4) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 316800, pixel_rate);
+ else
+ pixel_rate = max(432000, pixel_rate);
+ }
+
+ /* According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ * The check for GLK has to be adjusted as the platform can output
+ * two pixels per clock.
+ */
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 2 * 96000, pixel_rate);
+ else
+ pixel_rate = max(2 * 96000, pixel_rate);
+ }
return pixel_rate;
}
{ }
};
-static enum drm_connector_status
-intel_crt_detect(struct drm_connector *connector, bool force)
+static int
+intel_crt_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
- enum drm_connector_status status;
+ int status, ret;
struct intel_load_detect_pipe tmp;
- struct drm_modeset_acquire_ctx ctx;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
goto out;
}
- drm_modeset_acquire_init(&ctx, 0);
-
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
+ ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (INTEL_GEN(dev_priv) < 4)
status = connector_status_disconnected;
else
status = connector_status_unknown;
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
- } else
+ intel_release_load_detect_pipe(connector, &tmp, ctx);
+ } else if (ret == 0)
status = connector_status_unknown;
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ else if (ret < 0)
+ status = ret;
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .detect_ctx = intel_crt_detect,
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
};
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
DP_TP_CTL_ENABLE);
}
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
return ret;
}
-static struct intel_encoder *
+/* Finds the only possible encoder associated with the given CRTC. */
+struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
bxt_ddi_clock_get(encoder, pipe_config);
}
-static bool
-hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state,
- encoder);
- if (!pll)
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- return pll;
-}
-
-static bool
-skl_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- return true;
-}
-
-static bool
-bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
-}
-
-/*
- * Tries to find a *shared* PLL for the CRTC and store it in
- * intel_crtc->ddi_pll_sel.
- *
- * For private DPLLs, compute_config() should do the selection for us. This
- * function should be folded into compute_config() eventually.
- */
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
-
- if (IS_GEN9_BC(dev_priv))
- return skl_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else if (IS_GEN9_LP(dev_priv))
- return bxt_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else
- return hsw_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
-}
-
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ if (crtc_state->hdmi_scrambling)
+ temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+ if (crtc_state->hdmi_high_tmds_clock_ratio)
+ temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
return DDI_BUF_TRANS_SELECT(level);
}
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll)
+static void intel_ddi_clk_select(struct intel_encoder *encoder,
+ struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
+ bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
+ bool scrambling = pipe_config->hdmi_scrambling;
+
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ conn_state->connector,
+ clock_ratio, scrambling);
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
if (old_crtc_state->has_audio)
intel_audio_codec_disable(intel_encoder);
+ if (type == INTEL_OUTPUT_HDMI) {
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ old_conn_state->connector,
+ false, false);
+ }
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
+
+ if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
+ TRANS_DDI_HDMI_SCRAMBLING_MASK)
+ pipe_config->hdmi_scrambling = true;
+ if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->lane_count = 4;
unsigned int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return cpp;
case I915_FORMAT_MOD_X_TILED:
if (IS_GEN2(dev_priv))
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 1;
else
return intel_tile_size(to_i915(fb->dev)) /
return 4096;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
case 8:
return 0;
}
-static void i9xx_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
+ u32 dspcntr;
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
+ if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
+ IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 4) {
- if (intel_crtc->pipe == PIPE_B)
+ if (crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
if (INTEL_GEN(dev_priv) >= 4 &&
if (rotation & DRM_REFLECT_X)
dspcntr |= DISPPLANE_MIRROR;
- if (IS_G4X(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ return dspcntr;
+}
- intel_add_fb_offsets(&x, &y, plane_state, 0);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ int src_x = plane_state->base.src.x1 >> 16;
+ int src_y = plane_state->base.src.y1 >> 16;
+ u32 offset;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
+ offset = intel_compute_tile_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
- if (rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- } else if (rotation & DRM_REFLECT_X) {
- x += crtc_state->pipe_src_w - 1;
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ if (rotation & DRM_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ src_x += src_w - 1;
+ }
}
+ plane_state->main.offset = offset;
+ plane_state->main.x = src_x;
+ plane_state->main.y = src_y;
+
+ return 0;
+}
+
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int plane = intel_crtc->plane;
+ u32 linear_offset;
+ u32 dspcntr = plane_state->ctl;
+ i915_reg_t reg = DSPCNTR(plane);
+ int x = plane_state->main.x;
+ int y = plane_state->main.y;
+ unsigned long irqflags;
+
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_crtc->dspaddr_offset = plane_state->main.offset;
+ else
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
I915_WRITE_FW(reg, dspcntr);
I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ I915_WRITE_FW(DSPSURF(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void ironlake_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
-
- dspcntr = DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- default:
- BUG();
- }
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
-
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- }
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- intel_crtc->adjusted_x = x;
- intel_crtc->adjusted_y = y;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE_FW(DSPSURF(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
- } else {
- I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
- }
- POSTING_READ_FW(reg);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 64;
else
return intel_tile_width_bytes(fb, plane);
return stride;
}
-u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
return 0;
}
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
return PLANE_CTL_TILED_X;
return 0;
}
-u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
case DRM_ROTATE_0:
return 0;
}
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |=
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE |
+ PLANE_CTL_PLANE_GAMMA_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ return plane_ctl;
+}
+
static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = to_intel_plane(plane)->id;
enum pipe pipe = to_intel_plane(plane)->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
u32 surf_addr = plane_state->main.offset;
int dst_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
/* Sizes are 0 based */
src_w--;
src_h--;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- /* Support for kgdboc is disabled, this needs a major rework. */
- DRM_ERROR("legacy panic handler not supported any more.\n");
-
- return -ENODEV;
-}
-
static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n)
{
+ /*
+ * Reduce M/N as much as possible without loss in precision. Several DP
+ * dongles in particular seem to be fussy about too large *link* M/N
+ * values. The passed in values are more likely to have the least
+ * significant bits zero than M after rounding below, so do this first.
+ */
+ while ((m & 1) == 0 && (n & 1) == 0) {
+ m >>= 1;
+ n >>= 1;
+ }
+
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
struct intel_crtc_state *crtc_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
- if (!intel_ddi_pll_select(crtc, crtc_state))
+ struct intel_encoder *encoder =
+ intel_ddi_get_crtc_new_encoder(crtc_state);
+
+ if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
+ }
}
crtc->lowfreq_avail = false;
return active;
}
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int width = plane_state->base.crtc_w;
+ unsigned int stride = roundup_pow_of_two(width) * 4;
+
+ switch (stride) {
+ default:
+ WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+ width, stride);
+ stride = 256;
+ /* fallthrough */
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ }
+
+ return CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(stride);
+}
+
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
if (plane_state && plane_state->base.visible) {
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
- unsigned int stride = roundup_pow_of_two(width) * 4;
-
- switch (stride) {
- default:
- WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
- width, stride);
- stride = 256;
- /* fallthrough */
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- }
-
- cntl |= CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(stride);
+ cntl = plane_state->ctl;
size = (height << 12) | width;
}
}
}
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 cntl;
+
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ cntl |= pipe << 28; /* Connect to correct pipe */
+
+ switch (plane_state->base.crtc_w) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(plane_state->base.crtc_w);
+ return 0;
+ }
+
+ if (plane_state->base.rotation & DRM_ROTATE_180)
+ cntl |= CURSOR_ROTATE_180;
+
+ return cntl;
+}
+
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (plane_state && plane_state->base.visible) {
- cntl = MCURSOR_GAMMA_ENABLE;
- switch (plane_state->base.crtc_w) {
- case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(plane_state->base.crtc_w);
- return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
-
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
-
- if (plane_state->base.rotation & DRM_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
- }
+ if (plane_state && plane_state->base.visible)
+ cntl = plane_state->ctl;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE_FW(CURCNTR(pipe), cntl);
return 0;
}
-bool intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
old->restore_state = NULL;
-retry:
- ret = drm_modeset_lock(&config->connection_mutex, ctx);
- if (ret)
- goto fail;
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
restore_state = NULL;
}
- if (ret == -EDEADLK) {
- drm_modeset_backoff(ctx);
- goto retry;
- }
+ if (ret == -EDEADLK)
+ return ret;
return false;
}
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_state = drm_atomic_get_plane_state(state, primary);
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
.atomic_begin = intel_begin_crtc_commit,
.atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(hdmi_scrambling);
+ PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- /*
- * The intel_legacy_cursor_update() fast path takes care
- * of avoiding the vblank waits for simple cursor
- * movement and flips. For cursor on/off and size changes,
- * we want to perform the vblank waits so that watermark
- * updates happen during the correct frames. Gen9+ have
- * double buffered watermarks and so shouldn't need this.
- */
- if (INTEL_GEN(dev_priv) < 9)
- state->legacy_cursor_update = false;
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
return ret;
}
+ /*
+ * The intel_legacy_cursor_update() fast path takes care
+ * of avoiding the vblank waits for simple cursor
+ * movement and flips. For cursor on/off and size changes,
+ * we want to perform the vblank waits so that watermark
+ * updates happen during the correct frames. Gen9+ have
+ * double buffered watermarks and so shouldn't need this.
+ *
+ * Do this after drm_atomic_helper_setup_commit() and
+ * intel_atomic_prepare_commit() because we still want
+ * to skip the flip and fb cleanup waits. Although that
+ * does risk yanking the mapping from under the display
+ * engine.
+ *
+ * FIXME doing watermarks and fb cleanup from a vblank worker
+ * (assuming we had any) would solve these problems.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ state->legacy_cursor_update = false;
+
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
return;
}
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
drm_atomic_state_put(state);
}
-/*
- * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
- * drm_atomic_helper_legacy_gamma_set() directly.
- */
-static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_crtc_state *state;
- int ret;
-
- ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
- if (ret)
- return ret;
-
- /*
- * Make sure we update the legacy properties so this works when
- * atomic is not enabled.
- */
-
- state = crtc->state;
-
- drm_object_property_set_value(&crtc->base,
- config->degamma_lut_property,
- (state->degamma_lut) ?
- state->degamma_lut->base.id : 0);
-
- drm_object_property_set_value(&crtc->base,
- config->ctm_property,
- (state->ctm) ?
- state->ctm->base.id : 0);
-
- drm_object_property_set_value(&crtc->base,
- config->gamma_lut_property,
- (state->gamma_lut) ?
- state->gamma_lut->base.id : 0);
-
- return 0;
-}
-
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = intel_atomic_legacy_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = i9xx_plane_ctl(crtc_state, state);
}
return 0;
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_primary_formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
-
- primary->update_plane = ironlake_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ if (!cursor_size_ok(dev_priv, state->base.crtc_w,
state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -ENOMEM;
}
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+ state->ctl = i845_cursor_ctl(crtc_state, state);
+ else
+ state->ctl = i9xx_cursor_ctl(crtc_state, state);
+
return 0;
}
mode_cmd->modifier[0]);
goto err;
}
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
goto err;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
+ int ret;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
if (!crt)
return;
- if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+ ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx);
+ WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n");
+
+ if (ret > 0)
intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
intel_dp->has_audio = false;
}
-static enum drm_connector_status
+static int
intel_dp_long_pulse(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
enum drm_connector_status status;
u8 sink_irq_vector = 0;
+ WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
+
intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
*/
status = connector_status_disconnected;
goto out;
- } else if (connector->status == connector_status_connected) {
+ } else {
/*
- * If display was connected already and is still connected
- * check links status, there has been known issues of
- * link loss triggerring long pulse!!!!
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggerring
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
*/
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- goto out;
}
/*
return status;
}
-static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+static int
+intel_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- enum drm_connector_status status = connector->status;
+ int status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = intel_dp_detect,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .detect_ctx = intel_dp_detect,
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
};
int x, y;
} aux;
+ /* plane control register */
+ u32 ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
+
+ /* HDMI scrambling status */
+ bool hdmi_scrambling;
+
+ /* HDMI High TMDS char rate ratio */
+ bool hdmi_high_tmds_clock_ratio;
};
struct intel_crtc {
void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll);
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-bool intel_ddi_pll_select(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
-bool intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
return i915_ggtt_offset(state->vma);
}
-u32 skl_plane_ctl_format(uint32_t pixel_format);
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
-u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
int (*init_execlists)(struct intel_engine_cs *engine);
} intel_engines[] = {
[RCS] = {
- .name = "render ring",
- .exec_id = I915_EXEC_RENDER,
+ .name = "rcs",
.hw_id = RCS_HW,
+ .exec_id = I915_EXEC_RENDER,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
},
[BCS] = {
- .name = "blitter ring",
- .exec_id = I915_EXEC_BLT,
+ .name = "bcs",
.hw_id = BCS_HW,
+ .exec_id = I915_EXEC_BLT,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
},
[VCS] = {
- .name = "bsd ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs",
.hw_id = VCS_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
},
[VCS2] = {
- .name = "bsd2 ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs2",
.hw_id = VCS2_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd2_ring_buffer,
},
[VECS] = {
- .name = "video enhancement ring",
- .exec_id = I915_EXEC_VEBOX,
+ .name = "vecs",
.hw_id = VECS_HW,
+ .exec_id = I915_EXEC_VEBOX,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
void *semaphores;
/* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap(page);
+ semaphores = kmap_atomic(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap(page);
+ kunmap_atomic(semaphores);
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return false;
+
+ /* If the driver is wedged, HW state may be very inconsistent and
+ * report that it is still busy, even though we have stopped using it.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return true;
+
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_is_idle(engine))
return false;
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
-#define GUC_CTX_PRIORITY_KMD_HIGH 0
-#define GUC_CTX_PRIORITY_HIGH 1
-#define GUC_CTX_PRIORITY_KMD_NORMAL 2
-#define GUC_CTX_PRIORITY_NORMAL 3
-#define GUC_CTX_PRIORITY_NUM 4
+#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
+#define GUC_CLIENT_PRIORITY_HIGH 1
+#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
+#define GUC_CLIENT_PRIORITY_NORMAL 3
+#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_GPU_CONTEXTS 1024
-#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_MAX_STAGE_DESCRIPTORS 1024
+#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
-#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
-#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
-#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
-#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
-#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
-#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
-#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
-#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
+#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
+#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
+#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
+#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
+#define GUC_STAGE_DESC_ATTR_RESET BIT(4)
+#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
+#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
+#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
/* The guc control data is 10 DWORDs */
#define GUC_CTL_CTXINFO 0
u64 value_qw;
} __packed;
-#define GUC_MAX_DOORBELLS 256
-#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
+#define GUC_NUM_DOORBELLS 256
+#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
struct guc_wq_item {
u32 header;
u32 context_desc;
- u32 ring_tail;
+ u32 submit_element_info;
u32 fence_id;
} __packed;
struct guc_process_desc {
- u32 context_id;
+ u32 stage_id;
u64 db_base_addr;
u32 head;
u32 tail;
u32 context_desc;
u32 context_id;
u32 ring_status;
- u32 ring_lcra;
+ u32 ring_lrca;
u32 ring_begin;
u32 ring_end;
u32 ring_next_free_location;
u16 engine_submit_queue_count;
} __packed;
-/*Context descriptor for communicating between uKernel and Driver*/
-struct guc_context_desc {
+/*
+ * This structure describes a stage set arranged for a particular communication
+ * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
+ * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
+ * to avoid confusion with all the other things already named "context" in the
+ * driver. A static pool of these descriptors are stored inside a GEM object
+ * (stage_desc_pool) which is held for the entire lifetime of our interaction
+ * with the GuC, being allocated before the GuC is loaded with its firmware.
+ */
+struct guc_stage_desc {
u32 sched_common_area;
- u32 context_id;
+ u32 stage_id;
u32 pas_id;
u8 engines_used;
u64 db_trigger_cpu;
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+ struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
u32 number_of_registers;
} __packed;
+/* MMIO registers that are set as non privileged */
+struct mmio_white_list {
+ u32 mmio_start;
+ u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+ u32 count;
+} __packed;
+
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
-
- /* MMIO registers that are set as non privileged */
- struct __packed {
- u32 mmio_start;
- u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
- u32 count;
- } mmio_white_list[GUC_MAX_ENGINES_NUM];
+ struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
} __packed;
/* GuC Additional Data Struct */
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
-/* User-friendly representation of an enum */
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- default:
- return "UNKNOWN!";
- }
-};
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
} else
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
- if (guc->ads_vma) {
+ /* If GuC submission is enabled, set up additional parameters here */
+ if (i915.enable_guc_submission) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
- }
-
- /* If GuC submission is enabled, set up additional parameters here */
- if (i915.enable_guc_submission) {
- u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
- u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
return 0;
}
-
-/**
- * intel_guc_fini() - clean up all allocated resources
- * @dev_priv: i915 device private
- */
-void intel_guc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct drm_i915_gem_object *obj;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_guc_submission_disable(dev_priv);
- i915_guc_submission_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- obj = fetch_and_zero(&guc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
.remove_buf_file = remove_buf_file_callback,
};
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
- relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
-
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
-
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
- * boot time logs and provides enough leeway to User, in terms of
- * latency, for consuming the logs from relay. Also doesn't take
- * up too much memory.
- */
- n_subbufs = 8;
-
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
- if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- return -ENOMEM;
- }
-
- GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.relay_chan = guc_log_relay_chan;
- return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
+static int guc_log_relay_file_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
+ if (i915.guc_log_level < 0)
+ return 0;
+
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
return -ENODEV;
}
- ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
- if (ret) {
+ ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
+ if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+ relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
- relay_flush(guc->log.relay_chan);
+ relay_flush(guc->log.runtime.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
- if (!guc->log.relay_chan)
+ if (!guc->log.runtime.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
- return relay_reserve(guc->log.relay_chan, 0);
+ return relay_reserve(guc->log.runtime.relay_chan, 0);
}
static bool guc_check_log_buf_overflow(struct intel_guc *guc,
void *src_data, *dst_data;
bool new_overflow;
- if (WARN_ON(!guc->log.buf_addr))
+ if (WARN_ON(!guc->log.runtime.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.buf_addr;
+ log_buf_state = src_data = guc->log.runtime.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
}
}
-static void guc_log_cleanup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* First disable the flush interrupt */
- gen9_disable_guc_interrupts(dev_priv);
-
- if (guc->log.flush_wq)
- destroy_workqueue(guc->log.flush_wq);
-
- guc->log.flush_wq = NULL;
-
- if (guc->log.relay_chan)
- guc_log_remove_relay_file(guc);
-
- guc->log.relay_chan = NULL;
-
- if (guc->log.buf_addr)
- i915_gem_object_unpin_map(guc->log.vma->obj);
-
- guc->log.buf_addr = NULL;
-}
-
static void capture_logs_work(struct work_struct *work)
{
struct intel_guc *guc =
- container_of(work, struct intel_guc, log.flush_work);
+ container_of(work, struct intel_guc, log.runtime.flush_work);
guc_log_capture_logs(guc);
}
-static int guc_log_create_extras(struct intel_guc *guc)
+static bool guc_log_has_runtime(struct intel_guc *guc)
+{
+ return guc->log.runtime.buf_addr != NULL;
+}
+
+static int guc_log_runtime_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- int ret;
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+ int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Nothing to do */
- if (i915.guc_log_level < 0)
- return 0;
-
- if (!guc->log.buf_addr) {
- /* Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
- */
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
- return ret;
- }
+ GEM_BUG_ON(guc_log_has_runtime(guc));
- guc->log.buf_addr = vaddr;
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return PTR_ERR(vaddr);
}
- if (!guc->log.relay_chan) {
- /* Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- ret = guc_log_create_relay_channel(guc);
- if (ret)
- return ret;
- }
+ guc->log.runtime.buf_addr = vaddr;
- if (!guc->log.flush_wq) {
- INIT_WORK(&guc->log.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (guc->log.flush_wq == NULL) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-void intel_guc_log_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- unsigned long offset;
- uint32_t size, flags;
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- vma = guc->log.vma;
- if (!vma) {
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_has_memcpy_from_wc())) {
- /* logging will not be enabled */
- i915.guc_log_level = -1;
- return;
- }
+ ret = -ENOMEM;
+ goto err_vaddr;
+ }
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma)) {
- /* logging will be off */
- i915.guc_log_level = -1;
- return;
- }
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.runtime.relay_chan = guc_log_relay_chan;
+
+ INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ ret = -ENOMEM;
+ goto err_relaychan;
+ }
- guc->log.vma = vma;
+ return 0;
- if (guc_log_create_extras(guc)) {
- guc_log_cleanup(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
- i915.guc_log_level = -1;
- return;
- }
- }
+err_relaychan:
+ relay_close(guc->log.runtime.relay_chan);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
+ return ret;
+}
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+static void guc_log_runtime_destroy(struct intel_guc *guc)
+{
+ /*
+ * It's possible that the runtime stuff was never allocated because
+ * guc_log_level was < 0 at the time
+ **/
+ if (!guc_log_has_runtime(guc))
+ return;
- offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ relay_close(guc->log.runtime.relay_chan);
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
}
static int guc_log_late_setup(struct intel_guc *guc)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (i915.guc_log_level < 0)
- return -EINVAL;
-
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
- */
- ret = guc_log_create_extras(guc);
- if (ret)
- goto err;
+ if (!guc_log_has_runtime(guc)) {
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_runtime_create(guc);
+ if (ret)
+ goto err;
+ }
- ret = guc_log_create_relay_file(guc);
+ ret = guc_log_relay_file_create(guc);
if (ret)
- goto err;
+ goto err_runtime;
return 0;
+
+err_runtime:
+ guc_log_runtime_destroy(guc);
err:
- guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
- flush_work(&guc->log.flush_work);
+ flush_work(&guc->log.runtime.flush_work);
/* Ask GuC to update the log buffer state */
guc_log_flush(guc);
guc_log_capture_logs(guc);
}
+int intel_guc_log_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ unsigned long offset;
+ uint32_t size, flags;
+ int ret;
+
+ GEM_BUG_ON(guc->log.vma);
+
+ if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+ /* The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap */
+ size = (1 + GUC_LOG_DPC_PAGES + 1 +
+ GUC_LOG_ISR_PAGES + 1 +
+ GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_has_memcpy_from_wc())) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ guc->log.vma = vma;
+
+ if (i915.guc_log_level >= 0) {
+ ret = guc_log_runtime_create(guc);
+ if (ret < 0)
+ goto err_vma;
+ }
+
+ /* each allocated unit is a page */
+ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+ (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+ (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+ (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+
+ return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&guc->log.vma);
+err:
+ /* logging will be off */
+ i915.guc_log_level = -1;
+ return ret;
+}
+
+void intel_guc_log_destroy(struct intel_guc *guc)
+{
+ guc_log_runtime_destroy(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+}
+
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
struct intel_guc *guc = &dev_priv->guc;
return ret;
}
- i915.guc_log_level = log_param.verbosity;
+ if (log_param.logging_enabled) {
+ i915.guc_log_level = log_param.verbosity;
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled.
- */
- if (!dev_priv->guc.log.relay_chan) {
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled. Try again now, just in case.
+ */
ret = guc_log_late_setup(guc);
- if (!ret)
- gen9_enable_guc_interrupts(dev_priv);
- } else if (!log_param.logging_enabled) {
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
+ return ret;
+ }
+
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_enable_guc_interrupts(dev_priv);
+ } else {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
- } else {
- /* In case interrupts were disabled, enable them now */
- gen9_enable_guc_interrupts(dev_priv);
}
return ret;
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission)
+ if (!i915.enable_guc_submission || i915.guc_log_level < 0)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
return;
mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_cleanup(&dev_priv->guc);
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_disable_guc_interrupts(dev_priv);
+ guc_log_runtime_destroy(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
return true;
if (IS_SKYLAKE(dev_priv))
return true;
+ if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ return true;
return false;
}
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_scdc_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
{
if (IS_G4X(dev_priv))
return 165000;
+ else if (IS_GEMINILAKE(dev_priv))
+ return 594000;
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
pipe_config->lane_count = 4;
+ if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.low_rates)
+ pipe_config->hdmi_scrambling = true;
+
+ if (pipe_config->port_clock > 340000) {
+ pipe_config->hdmi_scrambling = true;
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
+ }
+ }
+
return true;
}
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+/*
+ * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup
+ * @encoder: intel_encoder
+ * @connector: drm_connector
+ * @high_tmds_clock_ratio = bool to indicate if the function needs to set
+ * or reset the high tmds clock ratio for scrambling
+ * @scrambling: bool to Indicate if the function needs to set or reset
+ * sink scrambling
+ *
+ * This function handles scrambling on HDMI 2.0 capable sinks.
+ * If required clock rate is > 340 Mhz && scrambling is supported by sink
+ * it enables scrambling. This should be called before enabling the HDMI
+ * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
+ * detect a scrambled clock within 100 ms.
+ */
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_scrambling *sink_scrambling =
+ &connector->display_info.hdmi.scdc.scrambling;
+ struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus);
+ bool ret;
+
+ if (!sink_scrambling->supported)
+ return;
+
+ DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
+ encoder->base.name, connector->name);
+
+ /* Set TMDS bit clock ratio to 1/40 or 1/10 */
+ ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
+ if (!ret) {
+ DRM_ERROR("Set TMDS ratio failed\n");
+ return;
+ }
+
+ /* Enable/disable sink scrambling */
+ ret = drm_scdc_set_scrambling(adptr, scrambling);
+ if (!ret) {
+ DRM_ERROR("Set sink scrambling failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("sink scrambling handled\n");
+}
+
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
+
if (old_status == connector->status)
return false;
return err;
}
-/**
- * intel_huc_fini() - clean up resources allocated for HuC
- * @dev_priv: the drm_i915_private device
- *
- * Cleans up by releasing the huc firmware GEM obj.
- */
-void intel_huc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&huc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
/**
* intel_guc_auth_huc() - authenticate ucode
* @dev_priv: the drm_i915_device
static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
{
+ /* XXX Note that platform_device_register_full() allocates a dma_mask
+ * and never frees it. We can't free it here as we cannot guarantee
+ * this is the last reference (i.e. that the dma_mask will not be
+ * used after our unregister). So ee choose to leak the sizeof(u64)
+ * allocation here - it should be fixed in the platform_device rather
+ * than us fiddle with its internals.
+ */
+
platform_device_unregister(dev_priv->lpe_audio.platdev);
- kfree(dev_priv->lpe_audio.platdev->dev.dma_mask);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
* audio driver and i915
* @dev_priv: the i915 drm device private data
* @eld : ELD data
+ * @pipe: pipe id
* @port: port id
* @tmds_clk_speed: tmds clock frequency in Hz
*
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8));
+ assert_ring_tail_valid(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
{
struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return;
-
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
* and context switches) submission.
*/
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *cursor =
i915_gem_request_assign(&port->request, last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
if (submit)
execlists_submit_ports(engine);
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+ * imposing the cost of a locked atomic transaction when submitting a
+ * new request (outside of the context-switch interrupt).
+ */
+ while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
u32 __iomem *csb_mmio =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
u32 __iomem *buf =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
- unsigned int csb, head, tail;
-
- csb = readl(csb_mmio);
- head = GEN8_CSB_READ_PTR(csb);
- tail = GEN8_CSB_WRITE_PTR(csb);
- if (head == tail)
- break;
+ unsigned int head, tail;
+
+ /* The write will be ordered by the uncached read (itself
+ * a memory barrier), so we do not need another in the form
+ * of a locked instruction. The race between the interrupt
+ * handler and the split test/clear is harmless as we order
+ * our clear before the CSB read. If the interrupt arrived
+ * first between the test and the clear, we read the updated
+ * CSB and clear the bit. If the interrupt arrives as we read
+ * the CSB or later (i.e. after we had cleared the bit) the bit
+ * is set and we do a new loop.
+ */
+ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ head = readl(csb_mmio);
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ while (head != tail) {
+ unsigned int status;
- if (tail < head)
- tail += GEN8_CSB_ENTRIES;
- do {
- unsigned int idx = ++head % GEN8_CSB_ENTRIES;
- unsigned int status = readl(buf + 2 * idx);
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
/* We are flying near dragons again.
*
* status notifier.
*/
+ status = readl(buf + 2 * head);
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
/* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
+ GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
port[0].context_id);
GEM_BUG_ON(port[0].count == 0);
GEM_BUG_ON(port[0].count == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- } while (head < tail);
+ }
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- GEN8_CSB_WRITE_PTR(csb) << 8),
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
csb_mmio);
}
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine;
+ struct intel_engine_cs *engine =
+ container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+ GEM_BUG_ON(!locked);
- engine = container_of(pt,
- struct drm_i915_gem_request,
- priotree)->engine;
if (engine != locked) {
- if (locked)
- spin_unlock_irq(&locked->timeline->lock);
- spin_lock_irq(&engine->timeline->lock);
+ spin_unlock(&locked->timeline->lock);
+ spin_lock(&engine->timeline->lock);
}
return engine;
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{
- struct intel_engine_cs *engine = NULL;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
LIST_HEAD(dfs);
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
- list_for_each_entry(p, &pt->signalers_list, signal_link)
+ /* Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
+ }
list_safe_reset_next(dep, p, dfs_link);
- if (!RB_EMPTY_NODE(&pt->node))
- continue;
-
- engine = pt_lock_engine(pt, engine);
-
- /* If it is not already in the rbtree, we can update the
- * priority inplace and skip over it (and its dependencies)
- * if it is referenced *again* as we descend the dfs.
- */
- if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
- pt->priority = prio;
- list_del_init(&dep->dfs_link);
- }
}
+ engine = request->engine;
+ spin_lock_irq(&engine->timeline->lock);
+
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
if (prio <= pt->priority)
continue;
- GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
pt->priority = prio;
- rb_erase(&pt->node, &engine->execlist_queue);
- if (insert_request(pt, &engine->execlist_queue))
- engine->execlist_first = &pt->node;
+ if (!RB_EMPTY_NODE(&pt->node)) {
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
}
- if (engine)
- spin_unlock_irq(&engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* XXX Do we need to preempt to make room for us and our deps? */
}
ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
request->ring->head = request->postfix;
- request->ring->last_retired_head = -1;
intel_ring_update_space(request->ring);
/* Catch up with any missed context-switch interrupts */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ request->tail =
+ intel_ring_wrap(request->ring,
+ request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
+ assert_ring_tail_valid(request->ring, request->tail);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
{
engine->submit_request = execlists_submit_request;
engine->schedule = execlists_schedule;
+ engine->irq_tasklet.func = intel_lrc_irq_handler;
}
static void
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0;
- ce->ring->last_retired_head = -1;
intel_ring_update_space(ce->ring);
}
}
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
void *base;
+ const void *vbt;
+ u32 vbt_size;
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
- if (!dmi_check_system(intel_no_opregion_vbt)) {
- const void *vbt = NULL;
- u32 vbt_size = 0;
-
- if (opregion->header->opregion_ver >= 2 && opregion->asle &&
- opregion->asle->rvda && opregion->asle->rvds) {
- opregion->rvda = memremap(opregion->asle->rvda,
- opregion->asle->rvds,
- MEMREMAP_WB);
- vbt = opregion->rvda;
- vbt_size = opregion->asle->rvds;
- }
+ if (dmi_check_system(intel_no_opregion_vbt))
+ goto out;
+ if (opregion->header->opregion_ver >= 2 && opregion->asle &&
+ opregion->asle->rvda && opregion->asle->rvds) {
+ opregion->rvda = memremap(opregion->asle->rvda,
+ opregion->asle->rvds,
+ MEMREMAP_WB);
+ vbt = opregion->rvda;
+ vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
+ goto out;
} else {
- vbt = base + OPREGION_VBT_OFFSET;
- /*
- * The VBT specification says that if the ASLE ext
- * mailbox is not used its area is reserved, but
- * on some CHT boards the VBT extends into the
- * ASLE ext area. Allow this even though it is
- * against the spec, so we do not end up rejecting
- * the VBT on those boards (and end up not finding the
- * LCD panel because of this).
- */
- vbt_size = (mboxes & MBOX_ASLE_EXT) ?
- OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
- vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
- opregion->vbt = vbt;
- opregion->vbt_size = vbt_size;
- }
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
}
}
+ vbt = base + OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext mailbox is not used
+ * its area is reserved, but on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is against the spec, so we
+ * do not end up rejecting the VBT on those boards (and end up not
+ * finding the LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
+ if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ opregion->vbt = vbt;
+ opregion->vbt_size = vbt_size;
+ } else {
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ }
+
+out:
return 0;
err_out:
goto unlock;
}
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ state->acquire_ctx = crtc->base.dev->mode_config.acquire_ctx;
pipe_config = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
return wm_size;
}
+static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->base.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->base.fb != NULL;
+ else
+ return plane_state->base.visible;
+}
+
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc, *enabled = NULL;
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
{
int cpp;
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (!cstate->base.active || !pstate->base.fb)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
{
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
* Caller should take care of dividing & rounding off the value.
*/
static uint32_t
-skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
+ struct intel_plane *plane = to_intel_plane(pstate->base.plane);
uint32_t downscale_h, downscale_w;
uint32_t src_w, src_h, dst_w, dst_h;
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return DRM_PLANE_HELPER_NO_SCALING;
/* n.b., src is 16.16 fixed point, dst is whole integer */
- src_w = drm_rect_width(&pstate->base.src);
- src_h = drm_rect_height(&pstate->base.src);
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
+ if (plane->id == PLANE_CURSOR) {
+ src_w = pstate->base.src_w;
+ src_h = pstate->base.src_h;
+ dst_w = pstate->base.crtc_w;
+ dst_h = pstate->base.crtc_h;
+ } else {
+ src_w = drm_rect_width(&pstate->base.src);
+ src_h = drm_rect_height(&pstate->base.src);
+ dst_w = drm_rect_width(&pstate->base.dst);
+ dst_h = drm_rect_height(&pstate->base.dst);
+ }
+
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
const struct drm_plane_state *pstate,
int y)
{
+ struct intel_plane *plane = to_intel_plane(pstate->plane);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
fb = pstate->fb;
format = fb->format->format;
- if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
return 0;
data_rate = width * height * fb->format->cpp[0];
}
- down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
return (uint64_t)data_rate * down_scale_amount >> 16;
}
uint64_t pixel_rate;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return 0;
/*
* with additional adjustments for plane-specific scaling.
*/
adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(pstate);
+ downscale_amount = skl_plane_downscale_amount(cstate, pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
uint8_t *out_lines, /* out */
bool *enabled /* out */)
{
+ struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
struct drm_plane_state *pstate = &intel_pstate->base;
struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
bool y_tiled, x_tiled;
- if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
+ if (latency == 0 ||
+ !intel_wm_plane_visible(cstate, intel_pstate)) {
*enabled = false;
return 0;
}
if (apply_memory_bw_wa && x_tiled)
latency += 15;
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ if (plane->id == PLANE_CURSOR) {
+ width = intel_pstate->base.crtc_w;
+ height = intel_pstate->base.crtc_h;
+ } else {
+ width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ }
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT;
default:
- MISSING_CASE(flags)
+ MISSING_CASE(flags);
return 0;
}
}
const i915_reg_t reg)
{
u32 lower, upper, tmp;
+ int loop = 2;
/* The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
I915_WRITE_FW(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = I915_READ_FW(reg);
- } while (upper != tmp);
+ } while (upper != tmp && --loop);
/* Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
void intel_ring_update_space(struct intel_ring *ring)
{
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
- }
-
- ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
- ring->tail, ring->size);
+ ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
}
static int
}
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO) {
- struct intel_ring *ring = request->ring;
-
- ring->head = request->postfix;
- ring->last_retired_head = -1;
- }
+ if (request->fence.error == -EIO)
+ request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
}
i915_gem_request_submit(request);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
I915_WRITE_TAIL(request->engine, request->tail);
}
*cs++ = MI_USER_INTERRUPT;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
*cs++ = MI_NOOP;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
vma = intel_ring_create_vma(engine->i915, size);
ret = context_pin(ctx);
if (ret)
goto error;
+
+ ce->state->obj->mm.dirty = true;
}
/* The kernel context is only used as a placeholder for flushing the
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail;
- engine->buffer->last_retired_head = -1;
- }
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
num_rings =
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
- engine->emit_breadcrumb_sz += num_rings * 6;
+ engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
int space;
int size;
int effective_size;
-
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
};
struct i915_gem_context;
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline unsigned
+static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
- return 1 << engine->id;
-}
-
-static inline void
-intel_flush_status_page(struct intel_engine_cs *engine, int reg)
-{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ return BIT(engine->id);
}
static inline u32
}
static inline void
-intel_write_status_page(struct intel_engine_cs *engine,
- int reg, u32 value)
+intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- engine->status_page.page_addr[reg] = value;
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ /* Writing into the status page should be done sparingly. Since
+ * we do when we are uncertain of the device state, we take a bit
+ * of extra paranoia to try and ensure that the HWS takes the value
+ * we give and that it doesn't end up trapped inside the CPU!
+ */
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
+ engine->status_page.page_addr[reg] = value;
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
+ } else {
+ WRITE_ONCE(engine->status_page.page_addr[reg], value);
+ }
}
/*
}
static inline u32
-intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline u32
+intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - req->ring->vaddr;
GEM_BUG_ON(offset > req->ring->size);
- return offset & (req->ring->size - 1);
+ return intel_ring_wrap(req->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ /* We could combine these into a single tail operation, but keeping
+ * them as seperate tests will help identify the cause should one
+ * ever fire.
+ */
+ GEM_BUG_ON(!IS_ALIGNED(tail, 8));
+ GEM_BUG_ON(tail >= ring->size);
}
void intel_ring_update_space(struct intel_ring *ring);
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct device *kdev = &pdev->dev;
+ int ret;
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- WARN_ON_ONCE(ret < 0);
+ WARN_ONCE(ret < 0,
+ "pm_runtime_get_if_in_use() failed: %d\n", ret);
if (ret <= 0)
return false;
}
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev_priv)) {
+ int ret;
+
pm_runtime_dont_use_autosuspend(kdev);
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
} else {
pm_runtime_use_autosuspend(kdev);
}
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = intel_plane->id;
enum pipe pipe = intel_plane->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
/* Sizes are 0 based */
src_w--;
src_h--;
I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
-static void
-vlv_update_plane(struct drm_plane *dplane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane_id = intel_plane->id;
- u32 sprctl;
- u32 sprsurf_offset, linear_offset;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
- sprctl = SP_ENABLE;
+ sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
sprctl |= SP_FORMAT_RGBA8888;
break;
default:
- /*
- * If we get here one of the upper layers failed to filter
- * out the unsupported plane formats
- */
- BUG();
- break;
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SP_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+vlv_update_plane(struct drm_plane *dplane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
+ u32 sprctl = plane_state->ctl;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
- src_w--;
- src_h--;
crtc_w--;
crtc_h--;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- } else if (rotation & DRM_REFLECT_X) {
- x += src_w;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ivb_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- u32 sprctl, sprscale = 0;
- u32 sprsurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
+
+ sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
+
+ if (IS_IVYBRIDGE(dev_priv))
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl = SPRITE_ENABLE;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SPRITE_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
if (rotation & DRM_ROTATE_180)
sprctl |= SPRITE_ROTATE_180;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
- else
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ u32 sprctl = plane_state->ctl, sprscale = 0;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ilk_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
- u32 dvscntr, dvsscale;
- u32 dvssurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 dvscntr;
- dvscntr = DVS_ENABLE;
+ dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
+
+ if (IS_GEN6(dev_priv))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- dvscntr |= DVS_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
if (rotation & DRM_ROTATE_180)
dvscntr |= DVS_ROTATE_180;
- if (IS_GEN6(dev_priv))
- dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
+ return dvscntr;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int pipe = intel_plane->pipe;
+ u32 dvscntr = plane_state->ctl, dvsscale = 0;
+ u32 dvssurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- dvsscale = 0;
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = vlv_sprite_ctl(crtc_state, state);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ivb_sprite_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ilk_sprite_ctl(crtc_state, state);
}
return 0;
* Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
* we have a pipe programmed in order to probe the TV.
*/
-static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector, bool force)
+static int
+intel_tv_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
struct drm_display_mode mode;
struct intel_tv *intel_tv = intel_attached_tv(connector);
if (force) {
struct intel_load_detect_pipe tmp;
- struct drm_modeset_acquire_ctx ctx;
+ int ret;
- drm_modeset_acquire_init(&ctx, 0);
+ ret = intel_get_load_detect_pipe(connector, &mode, &tmp, ctx);
+ if (ret < 0)
+ return ret;
- if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
+ if (ret > 0) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ intel_release_load_detect_pipe(connector, &tmp, ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
} else
status = connector_status_unknown;
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
} else
return connector->status;
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = intel_tv_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_tv_destroy,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .detect_ctx = intel_tv_detect,
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
};
#include "intel_uc.h"
#include <linux/firmware.h>
+/* Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->guc.send_mutex);
+ struct intel_guc *guc = &dev_priv->guc;
+
+ mutex_init(&guc->send_mutex);
+ guc->send = intel_guc_send_mmio;
+}
+
+static void fetch_uc_fw(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ if (!uc_fw->path)
+ return;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err)
+ goto fail;
+ if (!fw)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_NOTE("Firmware header is missing\n");
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_NOTE("CSS header definition mismatch\n");
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_NOTE("RSA key size is bad\n");
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_NOTE("Missing firmware components\n");
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("Skipping %s firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
+
+ release_firmware(fw);
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ return;
+
+fail:
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- if (dev_priv->huc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+}
- if (dev_priv->guc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
+{
+ __intel_uc_fw_fini(&dev_priv->guc.fw);
+ __intel_uc_fw_fini(&dev_priv->huc.fw);
}
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
int ret, attempts;
- /* GuC not enabled, nothing to do */
if (!i915.enable_guc_loading)
return 0;
i915_ggtt_enable_guc(dev_priv);
if (i915.enable_guc_submission) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
ret = i915_guc_submission_init(dev_priv);
if (ret)
- goto err;
+ goto err_guc;
}
/* WaEnableuKernelHeaderValidFix:skl */
ret = i915_guc_submission_enable(dev_priv);
if (ret)
- goto err_submission;
+ goto err_interrupts;
}
return 0;
* nonfatal error (i.e. it doesn't prevent driver load, but
* marks the GPU as wedged until reset).
*/
+err_interrupts:
+ gen9_disable_guc_interrupts(dev_priv);
err_submission:
if (i915.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
-
-err:
+err_guc:
i915_ggtt_disable_guc(dev_priv);
DRM_ERROR("GuC init failed\n");
return ret;
}
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_loading)
+ return;
+
+ if (i915.enable_guc_submission) {
+ i915_guc_submission_disable(dev_priv);
+ gen9_disable_guc_interrupts(dev_priv);
+ i915_guc_submission_fini(dev_priv);
+ }
+ i915_ggtt_disable_guc(dev_priv);
+}
+
/*
* Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command
*/
-static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
+static bool guc_recv(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
return INTEL_GUC_RECV_IS_RESPONSE(val);
}
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 status;
return -EINVAL;
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = action[0];
* up to that length of time, then switch to a slower sleep-wait loop.
* No inte_guc_send command should ever take longer than 10ms.
*/
- ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+ ret = wait_for_us(guc_recv(guc, &status), 10);
if (ret)
- ret = wait_for(intel_guc_recv(guc, &status), 10);
+ ret = wait_for(guc_recv(guc, &status), 10);
if (status != INTEL_GUC_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
}
dev_priv->guc.action_status = status;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
mutex_unlock(&guc->send_mutex);
return ret;
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("Skipping uC firmware version check\n");
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
-}
/*
* This structure primarily describes the GEM object shared with the GuC.
- * The GEM object is held for the entire lifetime of our interaction with
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process decriptor" which holds sequence data for
+ * kmap'd) includes the "process descriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
uint32_t engines; /* bitmap of (host) engine ids */
uint32_t priority;
- uint32_t ctx_index;
+ u32 stage_id;
uint32_t proc_desc_offset;
- uint32_t doorbell_offset;
- uint32_t doorbell_cookie;
- uint16_t doorbell_id;
- uint16_t padding[3]; /* Maintain alignment */
+ u16 doorbell_id;
+ unsigned long doorbell_offset;
+ u32 doorbell_cookie;
spinlock_t wq_lock;
uint32_t wq_offset;
INTEL_UC_FIRMWARE_SUCCESS
};
+/* User-friendly representation of an enum */
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_NONE:
+ return "NONE";
+ case INTEL_UC_FIRMWARE_PENDING:
+ return "PENDING";
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ return "SUCCESS";
+ }
+ return "<invalid>";
+}
+
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC,
INTEL_UC_FW_TYPE_HUC
};
+/* User-friendly representation of an enum */
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
struct intel_guc_log {
uint32_t flags;
struct i915_vma *vma;
- void *buf_addr;
- struct workqueue_struct *flush_wq;
- struct work_struct flush_work;
- struct rchan *relay_chan;
-
+ /* The runtime stuff gets created only when GuC logging gets enabled */
+ struct {
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+ } runtime;
/* logging related stats */
u32 capture_miss_count;
u32 flush_interrupt_count;
bool interrupts_enabled;
struct i915_vma *ads_vma;
- struct i915_vma *ctx_pool_vma;
- struct ida ctx_ids;
+ struct i915_vma *stage_desc_pool;
+ void *stage_desc_pool_vaddr;
+ struct ida stage_ids;
struct i915_guc_client *execbuf_client;
- DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
+ DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
/* Action status & statistics */
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
+
+ /* GuC's FW specific send function */
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
};
struct intel_huc {
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return guc->send(guc, action, len);
+}
/* intel_guc_loader.c */
int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc);
-void intel_guc_fini(struct drm_i915_private *dev_priv);
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
/* intel_guc_log.c */
-void intel_guc_log_create(struct intel_guc *guc);
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{
/* intel_huc.c */
void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_fini(struct drm_i915_private *dev_priv);
int intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
}
static inline void
-fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- WARN_ON(!i915_mmio_reg_valid(d->reg_set));
- __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
}
static inline void
}
static inline void
-fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
}
static inline void
-fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
}
static inline void
-fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
}
static inline void
-fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
-}
-
-static inline void
-fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
-{
- /* something from same cacheline, but not from the set register */
- if (i915_mmio_reg_valid(d->reg_post))
- __raw_posting_read(d->i915, d->reg_post);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
}
static void
-fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_wait_ack_clear(d);
- fw_domain_get(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear(i915, d);
+ fw_domain_get(i915, d);
}
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_wait_ack(d);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack(i915, d);
- dev_priv->uncore.fw_domains_active |= fw_domains;
+ i915->uncore.fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_put(d);
- fw_domain_posting_read(d);
- }
-
- dev_priv->uncore.fw_domains_active &= ~fw_domains;
-}
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
-static void
-fw_domains_posting_read(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore_forcewake_domain *d;
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_put(i915, d);
- /* No need to do for all, just do for first found */
- for_each_fw_domain(d, dev_priv) {
- fw_domain_posting_read(d);
- break;
- }
+ i915->uncore.fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_reset(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- if (dev_priv->uncore.fw_domains == 0)
+ if (!fw_domains)
return;
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_reset(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
- fw_domains_posting_read(dev_priv);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_reset(i915, d);
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv = domain->i915;
+ struct drm_i915_private *dev_priv =
+ container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
unsigned long irqflags;
assert_rpm_device_not_suspended(dev_priv);
* timers are run before holding.
*/
while (1) {
+ unsigned int tmp;
+
active_domains = 0;
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_cancel(&domain->timer) == 0)
continue;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- fw_domains_reset(dev_priv, FORCEWAKE_ALL);
+ fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
if (restore) { /* If reset with a user forcewake, try to restore */
if (fw)
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
if (domain->wake_count++)
fw_domains &= ~domain->mask;
- }
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
WARN_ON(d->wake_count);
+ WARN_ON(!i915_mmio_reg_valid(reg_set));
+ WARN_ON(!i915_mmio_reg_valid(reg_ack));
+
d->wake_count = 0;
d->reg_set = reg_set;
d->reg_ack = reg_ack;
- if (IS_GEN6(dev_priv)) {
- d->val_reset = 0;
- d->val_set = FORCEWAKE_KERNEL;
- d->val_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- d->val_reset = _MASKED_BIT_DISABLE(0xffff);
- d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- d->reg_post = FORCEWAKE_ACK_VLV;
- else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
- d->reg_post = ECOBUS;
-
- d->i915 = dev_priv;
d->id = domain_id;
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
- d->mask = 1 << domain_id;
+ d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= (1 << domain_id);
+ dev_priv->uncore.fw_domains |= BIT(domain_id);
- fw_domain_reset(d);
+ fw_domain_reset(dev_priv, d);
}
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
+ if (IS_GEN6(dev_priv)) {
+ dev_priv->uncore.fw_reset = 0;
+ dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
+ dev_priv->uncore.fw_clear = 0;
+ } else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
+ dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
return err;
}
- i915_gem_retire_requests(i915);
-
i915->gpu_error.missed_irq_rings = 0;
t->reset_count = i915_reset_count(&i915->gpu_error);
{
struct drm_i915_private *i915 = t->i915;
- if (wait_for(intel_engines_are_idle(i915), 1)) {
+ i915_gem_retire_requests(i915);
+
+ if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
return -EIO;
}
i915_gem_object_put(h->hws);
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
- i915_gem_retire_requests(h->i915);
}
static int igt_hang_sanitycheck(void *arg)
.map_dma_buf = mock_map_dma_buf,
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
- .kmap = mock_dmabuf_kmap,
- .kmap_atomic = mock_dmabuf_kmap_atomic,
- .kunmap = mock_dmabuf_kunmap,
- .kunmap_atomic = mock_dmabuf_kunmap_atomic,
+ .map = mock_dmabuf_kmap,
+ .map_atomic = mock_dmabuf_kmap_atomic,
+ .unmap = mock_dmabuf_kunmap,
+ .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
#include "mock_drm.h"
-static inline struct inode fake_inode(struct drm_i915_private *i915)
-{
- return (struct inode){ .i_rdev = i915->drm.primary->index };
-}
-
struct drm_file *mock_file(struct drm_i915_private *i915)
{
- struct inode inode = fake_inode(i915);
- struct file filp = {};
+ struct file *filp;
+ struct inode *inode;
struct drm_file *file;
int err;
- err = drm_open(&inode, &filp);
- if (unlikely(err))
- return ERR_PTR(err);
+ inode = kzalloc(sizeof(*inode), GFP_KERNEL);
+ if (!inode) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ inode->i_rdev = i915->drm.primary->index;
- file = filp.private_data;
+ filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+ if (!filp) {
+ err = -ENOMEM;
+ goto err_inode;
+ }
+
+ err = drm_open(inode, filp);
+ if (err)
+ goto err_filp;
+
+ file = filp->private_data;
+ memset(&file->filp, POISON_INUSE, sizeof(file->filp));
file->authenticated = true;
+
+ kfree(filp);
+ kfree(inode);
return file;
+
+err_filp:
+ kfree(filp);
+err_inode:
+ kfree(inode);
+err:
+ return ERR_PTR(err);
}
void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
{
- struct inode inode = fake_inode(i915);
struct file filp = { .private_data = file };
- drm_release(&inode, &filp);
+ drm_release(NULL, &filp);
}
ring->vaddr = (void *)(ring + 1);
INIT_LIST_HEAD(&ring->request_list);
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
return ring;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_gem_request_alloc(engine, context);
- if (!request)
+ if (IS_ERR(request))
return NULL;
mock = container_of(request, typeof(*mock), base);
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static inline bool page_contiguous(struct page *first,
+ struct page *last,
+ unsigned long npages)
+{
+ return first + npages == last;
+}
+
static int alloc_table(struct pfn_table *pt,
unsigned long count, unsigned long max,
npages_fn_t npages_fn,
unsigned long npages = npages_fn(n, count, rnd);
/* Nobody expects the Sparse Memmap! */
- if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) {
+ if (!page_contiguous(pfn_to_page(pfn),
+ pfn_to_page(pfn + npages),
+ npages)) {
sg_free_table(&pt->st);
return -ENOSPC;
}
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
-config DRM_IMX_IPUV3
- tristate
- depends on DRM_IMX
- depends on IMX_IPUV3_CORE
- default y if DRM_IMX=y
- default m if DRM_IMX=m
-
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
-imxdrm-objs := imx-drm-core.o
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
.of_match_table = imx_drm_dt_ids,
},
};
-module_platform_driver(imx_drm_pdrv);
+
+static struct platform_driver * const drivers[] = {
+ &imx_drm_pdrv,
+ &ipu_drm_driver,
+};
+
+static int __init imx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(imx_drm_init);
+
+static void __exit imx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(imx_drm_exit);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");
int preferred_bpp);
int imx_drm_exit_drm(void);
+extern struct platform_driver ipu_drm_driver;
+
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
- struct device_node *ep;
int bus_format;
ret = of_property_read_u32(child, "reg", &i);
* The output port is port@4 with an external 4-port mux or
* port@2 with the internal 2-port mux.
*/
- ep = of_graph_get_endpoint_by_regs(child,
- imx_ldb->lvds_mux ? 4 : 2,
- -1);
- if (ep) {
- struct device_node *remote;
-
- remote = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- if (remote) {
- channel->panel = of_drm_find_panel(remote);
- channel->bridge = of_drm_find_bridge(remote);
- } else
- return -EPROBE_DEFER;
- of_node_put(remote);
-
- if (!channel->panel && !channel->bridge) {
- dev_err(dev, "panel/bridge not found: %s\n",
- remote->full_name);
- return -EPROBE_DEFER;
- }
- }
+ ret = drm_of_find_panel_or_bridge(child,
+ imx_ldb->lvds_mux ? 4 : 2, 0,
+ &channel->panel, &channel->bridge);
+ if (ret)
+ return ret;
/* panel ddc only if there is no bridge */
if (!channel->bridge) {
return 0;
}
-static struct platform_driver ipu_drm_driver = {
+struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
-module_platform_driver(ipu_drm_driver);
-
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-crtc");
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
-#include <linux/of_graph.h>
#include "imx-drm.h"
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *ep;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
imxpd->bus_format = bus_format;
/* port@1 is the output port */
- ep = of_graph_get_endpoint_by_regs(np, 1, -1);
- if (ep) {
- struct device_node *remote;
-
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- dev_warn(dev, "endpoint %s not connected\n",
- ep->full_name);
- of_node_put(ep);
- return -ENODEV;
- }
- of_node_put(ep);
-
- imxpd->panel = of_drm_find_panel(remote);
- if (imxpd->panel) {
- dev_dbg(dev, "found panel %s\n", remote->full_name);
- } else {
- imxpd->bridge = of_drm_find_bridge(remote);
- if (imxpd->bridge)
- dev_dbg(dev, "found bridge %s\n",
- remote->full_name);
- }
- if (!imxpd->panel && !imxpd->bridge) {
- dev_dbg(dev, "waiting for panel or bridge %s\n",
- remote->full_name);
- of_node_put(remote);
- return -EPROBE_DEFER;
- }
- of_node_put(remote);
- }
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+ if (ret)
+ return ret;
imxpd->dev = dev;
#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
-#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n))
+#define DISP_REG_OVL_ADDR_MT2701 0x0040
+#define DISP_REG_OVL_ADDR_MT8173 0x0f40
+#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
#define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_CLRFMT_RGB565 (0 << 12)
-#define OVL_CON_CLRFMT_RGB888 (1 << 12)
+#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ 0 : OVL_CON_CLRFMT_RGB)
+#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+struct mtk_disp_ovl_data {
+ unsigned int addr;
+ bool fmt_rgb565_is_0;
+};
+
/**
* struct mtk_disp_ovl - DISP_OVL driver structure
* @ddp_comp - structure containing type enum and hardware resources
struct mtk_disp_ovl {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_ovl_data *data;
};
+static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_ovl, ddp_comp);
+}
+
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = crtc;
+ ovl->crtc = crtc;
writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = NULL;
+ ovl->crtc = NULL;
writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
}
writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
- return OVL_CON_CLRFMT_RGB565;
+ return OVL_CON_CLRFMT_RGB565(ovl);
case DRM_FORMAT_BGR565:
- return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGB888:
- return OVL_CON_CLRFMT_RGB888;
+ return OVL_CON_CLRFMT_RGB888(ovl);
case DRM_FORMAT_BGR888:
- return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return OVL_CON_CLRFMT_ARGB8888;
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state)
{
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
if (!pending->enable)
mtk_ovl_layer_off(comp, idx);
- con = ovl_fmt_convert(fmt);
+ con = ovl_fmt_convert(ovl, fmt);
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx));
+ writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
if (pending->enable)
mtk_ovl_layer_on(comp, idx);
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
return 0;
}
+static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT2701,
+ .fmt_rgb565_is_0 = false,
+};
+
+static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .fmt_rgb565_is_0 = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-ovl", },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = &mt2701_ovl_driver_data},
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = &mt8173_ovl_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
+#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+
+struct mtk_disp_rdma_data {
+ unsigned int fifo_size;
+};
/**
* struct mtk_disp_rdma - DISP_RDMA driver structure
struct mtk_disp_rdma {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_rdma_data *data;
};
+static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_rdma, ddp_comp);
+}
+
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = crtc;
+ rdma->crtc = crtc;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = NULL;
+ rdma->crtc = NULL;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
}
{
unsigned int threshold;
unsigned int reg;
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
*/
threshold = width * height * vrefresh * 4 * 7 / 1000000;
reg = RDMA_FIFO_UNDERFLOW_EN |
- RDMA_FIFO_PSEUDO_SIZE(SZ_8K) |
+ RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_rdma_component_ops);
return 0;
}
+static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
+ .fifo_size = SZ_4K,
+};
+
+static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
+ .fifo_size = SZ_8K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-rdma", },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = &mt2701_rdma_driver_data},
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = &mt8173_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
struct resource *mem;
- struct device_node *ep, *bridge_node = NULL;
+ struct device_node *bridge_node;
int comp_id;
int ret;
return -EINVAL;
}
- ep = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (ep) {
- bridge_node = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
- }
- if (!bridge_node) {
- dev_err(dev, "Failed to find bridge node\n");
+ bridge_node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!bridge_node)
return -ENODEV;
- }
dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
pm_runtime_put(drm->dev);
}
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ unsigned int i;
+
+ /*
+ * TODO: instead of updating the registers here, we should prepare
+ * working registers in atomic_commit and let the hardware command
+ * queue update module registers on vblank.
+ */
+ if (state->pending_config) {
+ mtk_ddp_comp_config(ovl, state->pending_width,
+ state->pending_height,
+ state->pending_vrefresh, 0);
+
+ state->pending_config = false;
+ }
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.config) {
+ mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ plane_state->pending.config = false;
+ }
+ }
+ mtk_crtc->pending_planes = false;
+ }
+}
+
static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0;
int i;
if (crtc->state->color_mgmt_changed)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- unsigned int i;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
- /*
- * TODO: instead of updating the registers here, we should prepare
- * working registers in atomic_commit and let the hardware command
- * queue update module registers on vblank.
- */
- if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
- state->pending_height,
- state->pending_vrefresh, 0);
-
- state->pending_config = false;
- }
-
- if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
-
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
- plane_state->pending.config = false;
- }
- }
- mtk_crtc->pending_planes = false;
- }
+ if (!priv->data->shadow_register)
+ mtk_crtc_ddp_config(crtc);
mtk_drm_finish_page_flip(mtk_crtc);
}
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MUTEX_MOD_DISP_AAL BIT(20)
-#define MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MUTEX_MOD_DISP_UFOE BIT(22)
-#define MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MUTEX_MOD_DISP_OD BIT(25)
+#define INT_MUTEX BIT(1)
+
+#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
+#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
+#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
+#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
+#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
+#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
+#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
+#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
+#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
+#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
+#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
+#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
+#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
+#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
+
+#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
+#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
+#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
+#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
+#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
+#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
#define DPI0_SEL_IN_RDMA1 0x1
#define COLOR1_SEL_IN_OVL1 0x1
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define DSI_SEL_IN_BLS 0x0
+
struct mtk_disp_mutex {
int id;
bool claimed;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
+ const unsigned int *mutex_mod;
+};
+
+static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
+ [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
+ [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
+ [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
-static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL,
- [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1,
- [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD,
- [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0,
- [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1,
- [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0,
- [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1,
- [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0,
- [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1,
- [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2,
- [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE,
- [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0,
- [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1,
+static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
*addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
} else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
} else {
value = 0;
}
return value;
}
+static void mtk_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+}
+
void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
writel_relaxed(reg, config_regs + addr);
}
+ mtk_ddp_sout_sel(config_regs, cur, next);
+
value = mtk_ddp_sel_in(cur, next, &addr);
if (value) {
reg = readl_relaxed(config_regs + addr) | value;
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= mutex_mod[id];
+ reg |= ddp->mutex_mod[id];
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
return;
}
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~mutex_mod[id];
+ reg &= ~(ddp->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
break;
}
writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
}
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ u32 tmp;
+
+ writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
+ if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
+ tmp, tmp & INT_MUTEX, 1, 10000))
+ pr_err("could not acquire mutex %d\n", mutex->id);
+}
+
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
+}
+
static int mtk_ddp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return PTR_ERR(ddp->regs);
}
+ ddp->mutex_mod = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, ddp);
return 0;
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-mutex" },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
enum mtk_ddp_comp_id id);
void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
#endif /* MTK_DRM_DDP_H */
#define DISP_REG_UFO_START 0x0000
#define DISP_COLOR_CFG_MAIN 0x0400
-#define DISP_COLOR_START 0x0c00
-#define DISP_COLOR_WIDTH 0x0c50
-#define DISP_COLOR_HEIGHT 0x0c54
+#define DISP_COLOR_START_MT2701 0x0f00
+#define DISP_COLOR_START_MT8173 0x0c00
+#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
+#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
+#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+struct mtk_disp_color_data {
+ unsigned int color_offset;
+};
+
+struct mtk_disp_color {
+ struct mtk_ddp_comp ddp_comp;
+ const struct mtk_disp_color_data *data;
+};
+
+static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_color, ddp_comp);
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG)
{
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
{
- writel(w, comp->regs + DISP_COLOR_WIDTH);
- writel(h, comp->regs + DISP_COLOR_HEIGHT);
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(w, comp->regs + DISP_COLOR_WIDTH(color));
+ writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START);
+ writel(0x1, comp->regs + DISP_COLOR_START(color));
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
[MTK_DISP_PWM] = "pwm",
[MTK_DISP_MUTEX] = "mutex",
[MTK_DISP_OD] = "od",
+ [MTK_DISP_BLS] = "bls",
};
struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
+static const struct mtk_disp_color_data mt2701_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT2701,
+};
+
+static const struct mtk_disp_color_data mt8173_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT8173,
+};
+
+static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = &mt2701_color_driver_data},
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = &mt8173_color_driver_data},
+ {},
+};
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+ const struct of_device_id *match;
+ struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
+ type = mtk_ddp_matches[comp_id].type;
+ if (type == MTK_DISP_COLOR) {
+ devm_kfree(dev, comp);
+ color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
+ if (!color)
+ return -ENOMEM;
+
+ match = of_match_node(mtk_disp_color_driver_dt_match, node);
+ color->data = match->data;
+ comp = &color->ddp_comp;
+ }
+
comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
- if (comp_id == DDP_COMPONENT_DPI0 ||
+ if (comp_id == DDP_COMPONENT_BLS ||
+ comp_id == DDP_COMPONENT_DPI0 ||
comp_id == DDP_COMPONENT_DSI0 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
if (IS_ERR(comp->clk))
comp->clk = NULL;
- type = mtk_ddp_matches[comp_id].type;
-
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
MTK_DISP_PWM,
MTK_DISP_MUTEX,
MTK_DISP_OD,
+ MTK_DISP_BLS,
MTK_DDP_COMP_TYPE_MAX,
};
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL,
+ DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
.atomic_commit = mtk_atomic_commit,
};
-static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_AAL,
DDP_COMPONENT_PWM0,
};
-static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL1,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_DPI0,
};
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .main_path = mt2701_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
+ .ext_path = mt2701_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
+ .shadow_register = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .main_path = mt8173_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
+ .ext_path = mt8173_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
* and each statically assigned to a crtc:
* OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
*/
- ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main));
+ ret = mtk_drm_crtc_create(drm, private->data->main_path,
+ private->data->main_len);
if (ret < 0)
goto err_component_unbind;
/* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
- ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext));
+ ret = mtk_drm_crtc_create(drm, private->data->ext_path,
+ private->data->ext_len);
if (ret < 0)
goto err_component_unbind;
/* Use OVL device for all DMA memory allocations */
- np = private->comp_node[mtk_ddp_main[0]] ?:
- private->comp_node[mtk_ddp_ext[0]];
+ np = private->comp_node[private->data->main_path[0]] ?:
+ private->comp_node[private->data->ext_path[0]];
pdev = of_find_device_by_node(np);
if (!pdev) {
ret = -ENODEV;
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
{ }
mutex_init(&private->commit.lock);
INIT_WORK(&private->commit.work, mtk_atomic_work);
+ private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
private->config_regs = devm_ioremap_resource(dev, mem);
mtk_drm_sys_resume);
static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt8173-mmsys", },
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
{ }
};
struct drm_property;
struct regmap;
+struct mtk_mmsys_driver_data {
+ const enum mtk_ddp_comp_id *main_path;
+ unsigned int main_len;
+ const enum mtk_ddp_comp_id *ext_path;
+ unsigned int ext_len;
+ bool shadow_register;
+};
+
struct mtk_drm_private {
struct drm_device *drm;
struct device *dma_dev;
void __iomem *config_regs;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+ const struct mtk_mmsys_driver_data *data;
struct {
struct drm_atomic_state *state;
#include <drm/drm_crtc_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <video/mipi_display.h>
#include <video/videomode.h>
#include "mtk_drm_ddp_comp.h"
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
#define DSI_START 0x00
+#define DSI_INTEN 0x08
+
+#define DSI_INTSTA 0x0c
+#define LPRX_RD_RDY_INT_FLAG BIT(0)
+#define CMD_DONE_INT_FLAG BIT(1)
+#define TE_RDY_INT_FLAG BIT(2)
+#define VM_DONE_INT_FLAG BIT(3)
+#define EXT_TE_RDY_INT_FLAG BIT(4)
+#define DSI_BUSY BIT(31)
+
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
#define MIX_MODE BIT(17)
#define DSI_TXRX_CTRL 0x18
-#define VC_NUM (2 << 0)
+#define VC_NUM BIT(1)
#define LANE_NUM (0xf << 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define DSI_CMDQ_SIZE 0x60
+#define CMDQ_SIZE 0x3f
+
#define DSI_HSTX_CKL_WC 0x64
+#define DSI_RX_DATA0 0x74
+#define DSI_RX_DATA1 0x78
+#define DSI_RX_DATA2 0x7c
+#define DSI_RX_DATA3 0x80
+
+#define DSI_RACK 0x84
+#define RACK BIT(0)
+
#define DSI_PHY_LCCON 0x104
#define LC_HS_TX_EN BIT(0)
#define LC_ULPM_EN BIT(1)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define DSI_VM_CMD_CON 0x130
+#define VM_CMD_EN BIT(0)
+#define TS_VFP_EN BIT(5)
+
+#define DSI_CMDQ0 0x180
+#define CONFIG (0xff << 0)
+#define SHORT_PACKET 0
+#define LONG_PACKET 2
+#define BTA BIT(2)
+#define DATA_ID (0xff << 8)
+#define DATA_0 (0xff << 16)
+#define DATA_1 (0xff << 24)
+
#define T_LPX 5
#define T_HS_PREP 6
#define T_HS_TRAIL 8
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
+#define MTK_DSI_HOST_IS_READ(type) \
+ ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
+ (type == MIPI_DSI_DCS_READ))
+
struct phy;
struct mtk_dsi {
struct videomode vm;
int refcount;
bool enabled;
+ u32 irq_data;
+ wait_queue_head_t irq_wait_queue;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
writel((temp & ~mask) | (data & mask), dsi->regs + offset);
}
-static void dsi_phy_timconfig(struct mtk_dsi *dsi)
+static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
}
-static void mtk_dsi_reset(struct mtk_dsi *dsi)
+static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
-static int mtk_dsi_poweron(struct mtk_dsi *dsi)
-{
- struct device *dev = dsi->dev;
- int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
-
- if (++dsi->refcount != 1)
- return 0;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB565:
- bit_per_pixel = 16;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- bit_per_pixel = 18;
- break;
- case MIPI_DSI_FMT_RGB666:
- case MIPI_DSI_FMT_RGB888:
- default:
- bit_per_pixel = 24;
- break;
- }
-
- /**
- * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock * 1000;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
-
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
- if (ret < 0) {
- dev_err(dev, "Failed to set data rate: %d\n", ret);
- goto err_refcount;
- }
-
- phy_power_on(dsi->phy);
-
- ret = clk_prepare_enable(dsi->engine_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable engine clock: %d\n", ret);
- goto err_phy_power_off;
- }
-
- ret = clk_prepare_enable(dsi->digital_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable digital clock: %d\n", ret);
- goto err_disable_engine_clk;
- }
-
- mtk_dsi_enable(dsi);
- mtk_dsi_reset(dsi);
- dsi_phy_timconfig(dsi);
-
- return 0;
-
-err_disable_engine_clk:
- clk_disable_unprepare(dsi->engine_clk);
-err_phy_power_off:
- phy_power_off(dsi->phy);
-err_refcount:
- dsi->refcount--;
- return ret;
-}
-
-static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
}
-static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
}
-static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
}
-static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
}
-static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
+static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
u32 tmp_reg1;
return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
}
-static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
{
- if (enter && !dsi_clk_hs_state(dsi))
+ if (enter && !mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
- else if (!enter && dsi_clk_hs_state(dsi))
+ else if (!enter && mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
}
-static void dsi_set_mode(struct mtk_dsi *dsi)
+static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
{
u32 vid_mode = CMD_MODE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- vid_mode = SYNC_PULSE_MODE;
-
- if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
vid_mode = BURST_MODE;
+ else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ vid_mode = SYNC_PULSE_MODE;
+ else
+ vid_mode = SYNC_EVENT_MODE;
}
writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
}
-static void dsi_ps_control_vact(struct mtk_dsi *dsi)
+static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+}
+
+static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
u32 dsi_buf_bpp, ps_wc;
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
-static void dsi_rxtx_control(struct mtk_dsi *dsi)
+static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
u32 tmp_reg;
break;
}
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
+
writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
}
-static void dsi_ps_control(struct mtk_dsi *dsi)
+static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
{
- unsigned int dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp;
u32 tmp_reg;
switch (dsi->format) {
writel(tmp_reg, dsi->regs + DSI_PSCTRL);
}
-static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
{
- unsigned int horizontal_sync_active_byte;
- unsigned int horizontal_backporch_byte;
- unsigned int horizontal_frontporch_byte;
- unsigned int dsi_tmp_buf_bpp;
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 dsi_tmp_buf_bpp;
struct videomode *vm = &dsi->vm;
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
- dsi_ps_control(dsi);
+ mtk_dsi_ps_control(dsi);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
writel(1, dsi->regs + DSI_START);
}
+static void mtk_dsi_stop(struct mtk_dsi *dsi)
+{
+ writel(0, dsi->regs + DSI_START);
+}
+
+static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
+{
+ writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
+}
+
+static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
+{
+ u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ writel(inten, dsi->regs + DSI_INTEN);
+}
+
+static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data |= irq_bit;
+}
+
+static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data &= ~irq_bit;
+}
+
+static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
+ unsigned int timeout)
+{
+ s32 ret = 0;
+ unsigned long jiffies = msecs_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
+ dsi->irq_data & irq_flag,
+ jiffies);
+ if (ret == 0) {
+ DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+
+ return ret;
+}
+
+static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
+{
+ struct mtk_dsi *dsi = dev_id;
+ u32 status, tmp;
+ u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ status = readl(dsi->regs + DSI_INTSTA) & flag;
+
+ if (status) {
+ do {
+ mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
+ tmp = readl(dsi->regs + DSI_INTSTA);
+ } while (tmp & DSI_BUSY);
+
+ mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
+ mtk_dsi_irq_data_set(dsi, status);
+ wake_up_interruptible(&dsi->irq_wait_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
+{
+ mtk_dsi_irq_data_clear(dsi, irq_flag);
+ mtk_dsi_set_cmd_mode(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
+ DRM_ERROR("failed to switch cmd mode\n");
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+
+ if (++dsi->refcount != 1)
+ return 0;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
+ /**
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
+ */
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
+
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
+
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set data rate: %d\n", ret);
+ goto err_refcount;
+ }
+
+ phy_power_on(dsi->phy);
+
+ ret = clk_prepare_enable(dsi->engine_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable engine clock: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = clk_prepare_enable(dsi->digital_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable digital clock: %d\n", ret);
+ goto err_disable_engine_clk;
+ }
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_phy_timconfig(dsi);
+
+ mtk_dsi_rxtx_control(dsi);
+ mtk_dsi_ps_control_vact(dsi);
+ mtk_dsi_set_vm_cmd(dsi);
+ mtk_dsi_config_vdo_timing(dsi);
+ mtk_dsi_set_interrupt_enable(dsi);
+
+ mtk_dsi_clk_ulp_mode_leave(dsi);
+ mtk_dsi_lane0_ulp_mode_leave(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 0);
+
+ if (dsi->panel) {
+ if (drm_panel_prepare(dsi->panel)) {
+ DRM_ERROR("failed to prepare the panel\n");
+ goto err_disable_digital_clk;
+ }
+ }
+
+ return 0;
+err_disable_digital_clk:
+ clk_disable_unprepare(dsi->digital_clk);
+err_disable_engine_clk:
+ clk_disable_unprepare(dsi->engine_clk);
+err_phy_power_off:
+ phy_power_off(dsi->phy);
+err_refcount:
+ dsi->refcount--;
+ return ret;
+}
+
static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
{
if (WARN_ON(dsi->refcount == 0))
if (--dsi->refcount != 0)
return;
- dsi_lane0_ulp_mode_enter(dsi);
- dsi_clk_ulp_mode_enter(dsi);
+ if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
+ if (dsi->panel) {
+ if (drm_panel_unprepare(dsi->panel)) {
+ DRM_ERROR("failed to unprepare the panel\n");
+ return;
+ }
+ }
+ }
+
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_lane0_ulp_mode_enter(dsi);
+ mtk_dsi_clk_ulp_mode_enter(dsi);
mtk_dsi_disable(dsi);
if (dsi->enabled)
return;
- if (dsi->panel) {
- if (drm_panel_prepare(dsi->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return;
- }
- }
-
ret = mtk_dsi_poweron(dsi);
if (ret < 0) {
DRM_ERROR("failed to power on dsi\n");
return;
}
- dsi_rxtx_control(dsi);
-
- dsi_clk_ulp_mode_leave(dsi);
- dsi_lane0_ulp_mode_leave(dsi);
- dsi_clk_hs_mode(dsi, 0);
- dsi_set_mode(dsi);
-
- dsi_ps_control_vact(dsi);
- dsi_config_vdo_timing(dsi);
-
- dsi_set_mode(dsi);
- dsi_clk_hs_mode(dsi, 1);
+ mtk_dsi_set_mode(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 1);
mtk_dsi_start(dsi);
+ if (dsi->panel) {
+ if (drm_panel_enable(dsi->panel)) {
+ DRM_ERROR("failed to enable the panel\n");
+ goto err_dsi_power_off;
+ }
+ }
+
dsi->enabled = true;
+
+ return;
+err_dsi_power_off:
+ mtk_dsi_stop(dsi);
+ mtk_dsi_poweroff(dsi);
}
static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
}
+ mtk_dsi_stop(dsi);
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
return 0;
}
+static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
+{
+ u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
+
+ while (timeout_ms--) {
+ if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
+ break;
+
+ usleep_range(2, 4);
+ }
+
+ if (timeout_ms == 0) {
+ DRM_WARN("polling dsi wait not busy timeout!\n");
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+}
+
+static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
+{
+ switch (type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ return 1;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ return read_data[1] + read_data[2] * 16;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ DRM_INFO("type is 0x02, try again\n");
+ break;
+ default:
+ DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
+{
+ const char *tx_buf = msg->tx_buf;
+ u8 config, cmdq_size, cmdq_off, type = msg->type;
+ u32 reg_val, cmdq_mask, i;
+
+ if (MTK_DSI_HOST_IS_READ(type))
+ config = BTA;
+ else
+ config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
+
+ if (msg->tx_len > 2) {
+ cmdq_size = 1 + (msg->tx_len + 3) / 4;
+ cmdq_off = 4;
+ cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
+ reg_val = (msg->tx_len << 16) | (type << 8) | config;
+ } else {
+ cmdq_size = 1;
+ cmdq_off = 2;
+ cmdq_mask = CONFIG | DATA_ID;
+ reg_val = (type << 8) | config;
+ }
+
+ for (i = 0; i < msg->tx_len; i++)
+ writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+
+ mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
+}
+
+static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
+ const struct mipi_dsi_msg *msg, u8 flag)
+{
+ mtk_dsi_wait_for_idle(dsi);
+ mtk_dsi_irq_data_clear(dsi, flag);
+ mtk_dsi_cmdq(dsi, msg);
+ mtk_dsi_start(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
+ return -ETIME;
+ else
+ return 0;
+}
+
+static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+ u32 recv_cnt, i;
+ u8 read_data[16];
+ void *src_addr;
+ u8 irq_flag = CMD_DONE_INT_FLAG;
+
+ if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
+ DRM_ERROR("dsi engine is not command mode\n");
+ return -EINVAL;
+ }
+
+ if (MTK_DSI_HOST_IS_READ(msg->type))
+ irq_flag |= LPRX_RD_RDY_INT_FLAG;
+
+ if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
+ return -ETIME;
+
+ if (!MTK_DSI_HOST_IS_READ(msg->type))
+ return 0;
+
+ if (!msg->rx_buf) {
+ DRM_ERROR("dsi receive buffer size may be NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
+
+ recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
+
+ if (recv_cnt > 2)
+ src_addr = &read_data[4];
+ else
+ src_addr = &read_data[1];
+
+ if (recv_cnt > 10)
+ recv_cnt = 10;
+
+ if (recv_cnt > msg->rx_len)
+ recv_cnt = msg->rx_len;
+
+ if (recv_cnt)
+ memcpy(msg->rx_buf, src_addr, recv_cnt);
+
+ DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
+ recv_cnt, *((u8 *)(msg->tx_buf)));
+
+ return recv_cnt;
+}
+
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
.detach = mtk_dsi_host_detach,
+ .transfer = mtk_dsi_host_transfer,
};
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct device_node *remote_node, *endpoint;
struct resource *regs;
+ int irq_num;
int comp_id;
int ret;
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- remote_node = of_graph_get_remote_port_parent(endpoint);
- if (!remote_node) {
- dev_err(dev, "No panel connected\n");
- return -ENODEV;
- }
-
- dsi->bridge = of_drm_find_bridge(remote_node);
- dsi->panel = of_drm_find_panel(remote_node);
- of_node_put(remote_node);
- if (!dsi->bridge && !dsi->panel) {
- dev_info(dev, "Waiting for bridge or panel driver\n");
- return -EPROBE_DEFER;
- }
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+ &dsi->panel, &dsi->bridge);
+ if (ret)
+ return ret;
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
return ret;
}
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0) {
+ dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ return -EPROBE_DEFER;
+ }
+
+ irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
+ ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
+ IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
+ return -EPROBE_DEFER;
+ }
+
+ init_waitqueue_head(&dsi->irq_wait_queue);
+
platform_set_drvdata(pdev, dsi);
return component_add(&pdev->dev, &mtk_dsi_component_ops);
}
static const struct of_device_id mtk_dsi_of_match[] = {
+ { .compatible = "mediatek,mt2701-dsi" },
{ .compatible = "mediatek,mt8173-dsi" },
{ },
};
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
+ struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
struct regmap *regmap;
struct resource *mem;
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
- port = of_graph_get_port_by_id(np, 1);
- if (!port) {
- dev_err(dev, "Missing output port node\n");
+ remote = of_graph_get_remote_node(np, 1, 0);
+ if (!remote)
return -EINVAL;
- }
-
- ep = of_get_child_by_name(port, "endpoint");
- if (!ep) {
- dev_err(dev, "Missing endpoint node in port %s\n",
- port->full_name);
- of_node_put(port);
- return -EINVAL;
- }
- of_node_put(port);
-
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
- ep->full_name);
- of_node_put(ep);
- return -EINVAL;
- }
- of_node_put(ep);
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#define MIPITX_DSI_PLL_CON2 0x58
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
#define SW_LNT2_HSTX_PRE_OE BIT(24)
#define SW_LNT2_HSTX_OE BIT(25)
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+};
+
struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
- unsigned int data_rate;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
};
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- unsigned int txdiv, txdiv0, txdiv1;
+ u8 txdiv, txdiv0, txdiv1;
u64 pcw;
dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
RG_DSI_MPPLL_SDM_SSC_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
return 0;
}
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
RG_DSI_MPPLL_PLL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
RG_DSI_MPPLL_SDM_ISO_EN |
RG_DSI_MPPLL_SDM_PWR_ON,
static int mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
RG_DSI_PAD_TIE_LOW_EN);
if (!mipi_tx)
return -ENOMEM;
+ mipi_tx->driver_data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
return 0;
}
+static const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8)
+};
+
+static const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8)
+};
+
static const struct of_device_id mtk_mipi_tx_match[] = {
- { .compatible = "mediatek,mt8173-mipi-tx", },
+ { .compatible = "mediatek,mt2701-mipi-tx",
+ .data = &mt2701_mipitx_data },
+ { .compatible = "mediatek,mt8173-mipi-tx",
+ .data = &mt8173_mipitx_data },
{},
};
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select REGMAP_MMIO
+
+config DRM_MESON_DW_HDMI
+ tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
+ depends on DRM_MESON
+ default y if DRM_MESON
+ select DRM_DW_HDMI
meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
+obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
#include "meson_canvas.h"
#include "meson_registers.h"
-/*
+/**
+ * DOC: Canvas
+ *
* CANVAS is a memory zone where physical memory frames information
* are stored for the VIU to scanout.
*/
static void meson_crtc_enable(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
- struct drm_plane *plane = meson_crtc->priv->primary_plane;
+ struct drm_crtc_state *crtc_state = crtc->state;
struct meson_drm *priv = meson_crtc->priv;
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
/* Enable VPP Postblend */
- writel(plane->state->crtc_w,
+ writel(crtc_state->mode.hdisplay,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
struct meson_drm *priv = meson_crtc->priv;
priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
/* Disable VPP Postblend */
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
- if (priv->viu.osd1_enabled)
- priv->viu.osd1_commit = true;
+ priv->viu.osd1_commit = true;
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/component.h>
#include <linux/of_graph.h>
#include <drm/drmP.h>
#define DRIVER_NAME "meson"
#define DRIVER_DESC "Amlogic Meson DRM driver"
-/*
- * Video Processing Unit
+/**
+ * DOC: Video Processing Unit
*
* VPU Handles the Global Video Processing, it includes management of the
* clocks gates, blocks reset lines and power domains.
*
* What is missing :
+ *
* - Full reset of entire video processing HW blocks
* - Scaling and setup of the VPU clock
* - Bus clock gates
.max_register = 0x1000,
};
-static int meson_drv_probe(struct platform_device *pdev)
+static int meson_drv_bind(struct device *dev)
{
- struct device *dev = &pdev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct meson_drm *priv;
struct drm_device *drm;
struct resource *res;
drm_vblank_init(drm, 1);
drm_mode_config_init(drm);
+ drm->mode_config.max_width = 3840;
+ drm->mode_config.max_height = 2160;
+ drm->mode_config.funcs = &meson_mode_config_funcs;
+
+ /* Hardware Initialization */
+
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
/* Encoder Initialization */
if (ret)
goto free_drm;
- /* Hardware Initialization */
-
- meson_venc_init(priv);
- meson_vpp_init(priv);
- meson_viu_init(priv);
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't bind all components\n");
+ goto free_drm;
+ }
ret = meson_plane_create(priv);
if (ret)
goto free_drm;
drm_mode_config_reset(drm);
- drm->mode_config.max_width = 8192;
- drm->mode_config.max_height = 8192;
- drm->mode_config.funcs = &meson_mode_config_funcs;
priv->fbdev = drm_fbdev_cma_init(drm, 32,
drm->mode_config.num_connector);
return ret;
}
-static int meson_drv_remove(struct platform_device *pdev)
+static void meson_drv_unbind(struct device *dev)
{
- struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct drm_device *drm = dev_get_drvdata(dev);
struct meson_drm *priv = drm->dev_private;
drm_dev_unregister(drm);
drm_vblank_cleanup(drm);
drm_dev_unref(drm);
- return 0;
}
+static const struct component_master_ops meson_drv_master_ops = {
+ .bind = meson_drv_bind,
+ .unbind = meson_drv_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
+ of_node_full_name(dev->of_node),
+ of_node_full_name(data));
+
+ return dev->of_node == data;
+}
+
+/* Possible connectors nodes to ignore */
+static const struct of_device_id connectors_match[] = {
+ { .compatible = "composite-video-connector" },
+ { .compatible = "svideo-connector" },
+ { .compatible = "hdmi-connector" },
+ { .compatible = "dvi-connector" },
+ {}
+};
+
+static int meson_probe_remote(struct platform_device *pdev,
+ struct component_match **match,
+ struct device_node *parent,
+ struct device_node *remote)
+{
+ struct device_node *ep, *remote_node;
+ int count = 1;
+
+ /* If node is a connector, return and do not add to match table */
+ if (of_match_node(connectors_match, remote))
+ return 1;
+
+ component_match_add(&pdev->dev, match, compare_of, remote);
+
+ for_each_endpoint_of_node(remote, ep) {
+ remote_node = of_graph_get_remote_port_parent(ep);
+ if (!remote_node ||
+ remote_node == parent || /* Ignore parent endpoint */
+ !of_device_is_available(remote_node))
+ continue;
+
+ count += meson_probe_remote(pdev, match, remote, remote_node);
+
+ of_node_put(remote_node);
+ }
+
+ return count;
+}
+
+static int meson_drv_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ep, *remote;
+ int count = 0;
+
+ for_each_endpoint_of_node(np, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote))
+ continue;
+
+ count += meson_probe_remote(pdev, &match, np, remote);
+ }
+
+ /* If some endpoints were found, initialize the nodes */
+ if (count) {
+ dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
+
+ return component_master_add_with_match(&pdev->dev,
+ &meson_drv_master_ops,
+ match);
+ }
+
+ /* If no output endpoints were available, simply bail out */
+ return 0;
+};
+
static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu" },
{ .compatible = "amlogic,meson-gxl-vpu" },
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
- .remove = meson_drv_remove,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
struct {
unsigned int current_mode;
+ bool hdmi_repeat;
+ bool venc_repeat;
+ bool hdmi_use_enci;
} venc;
};
--- /dev/null
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/bridge/dw_hdmi.h>
+
+#include <uapi/linux/media-bus-format.h>
+#include <uapi/linux/videodev2.h>
+
+#include "meson_drv.h"
+#include "meson_venc.h"
+#include "meson_vclk.h"
+#include "meson_dw_hdmi.h"
+#include "meson_registers.h"
+
+#define DRIVER_NAME "meson-dw-hdmi"
+#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
+
+/**
+ * DOC: HDMI Output
+ *
+ * HDMI Output is composed of :
+ *
+ * - A Synopsys DesignWare HDMI Controller IP
+ * - A TOP control block controlling the Clocks and PHY
+ * - A custom HDMI PHY in order convert video to TMDS signal
+ *
+ * .. code::
+ *
+ * ___________________________________
+ * | HDMI TOP |<= HPD
+ * |___________________________________|
+ * | | |
+ * | Synopsys HDMI | HDMI PHY |=> TMDS
+ * | Controller |________________|
+ * |___________________________________|<=> DDC
+ *
+ *
+ * The HDMI TOP block only supports HPD sensing.
+ * The Synopsys HDMI Controller interrupt is routed
+ * through the TOP Block interrupt.
+ * Communication to the TOP Block and the Synopsys
+ * HDMI Controller is done a pair of addr+read/write
+ * registers.
+ * The HDMI PHY is configured by registers in the
+ * HHI register block.
+ *
+ * Pixel data arrives in 4:4:4 format from the VENC
+ * block and the VPU HDMI mux selects either the ENCI
+ * encoder for the 576i or 480i formats or the ENCP
+ * encoder for all the other formats including
+ * interlaced HD formats.
+ * The VENC uses a DVI encoder on top of the ENCI
+ * or ENCP encoders to generate DVI timings for the
+ * HDMI controller.
+ *
+ * GXBB, GXL and GXM embeds the Synopsys DesignWare
+ * HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
+ * audio source interfaces.
+ *
+ * We handle the following features :
+ *
+ * - HPD Rise & Fall interrupt
+ * - HDMI Controller Interrupt
+ * - HDMI PHY Init for 480i to 1080p60
+ * - VENC & HDMI Clock setup for 480i to 1080p60
+ * - VENC Mode setup for 480i to 1080p60
+ *
+ * What is missing :
+ *
+ * - PHY, Clock and Mode setup for 2k && 4k modes
+ * - SDDC Scrambling mode for HDMI 2.0a
+ * - HDCP Setup
+ * - CEC Management
+ */
+
+/* TOP Block Communication Channel */
+#define HDMITX_TOP_ADDR_REG 0x0
+#define HDMITX_TOP_DATA_REG 0x4
+#define HDMITX_TOP_CTRL_REG 0x8
+
+/* Controller Communication Channel */
+#define HDMITX_DWC_ADDR_REG 0x10
+#define HDMITX_DWC_DATA_REG 0x14
+#define HDMITX_DWC_CTRL_REG 0x18
+
+/* HHI Registers */
+#define HHI_MEM_PD_REG0 0x100 /* 0x40 */
+#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
+#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
+#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
+#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
+#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
+
+static DEFINE_SPINLOCK(reg_lock);
+
+enum meson_venc_source {
+ MESON_VENC_SOURCE_NONE = 0,
+ MESON_VENC_SOURCE_ENCI = 1,
+ MESON_VENC_SOURCE_ENCP = 2,
+};
+
+struct meson_dw_hdmi {
+ struct drm_encoder encoder;
+ struct dw_hdmi_plat_data dw_plat_data;
+ struct meson_drm *priv;
+ struct device *dev;
+ void __iomem *hdmitx;
+ struct reset_control *hdmitx_apb;
+ struct reset_control *hdmitx_ctrl;
+ struct reset_control *hdmitx_phy;
+ struct clk *hdmi_pclk;
+ struct clk *venci_clk;
+ u32 irq_stat;
+};
+#define encoder_to_meson_dw_hdmi(x) \
+ container_of(x, struct meson_dw_hdmi, encoder)
+
+static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
+ const char *compat)
+{
+ return of_device_is_compatible(dw_hdmi->dev->of_node, compat);
+}
+
+/* PHY (via TOP bridge) and Controller dedicated register interface */
+
+static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ unsigned long flags;
+ unsigned int data;
+
+ spin_lock_irqsave(®_lock, flags);
+
+ /* ADDR must be written twice */
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
+
+ /* Read needs a second DATA read */
+ data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
+ data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
+
+ spin_unlock_irqrestore(®_lock, flags);
+
+ return data;
+}
+
+static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(®_lock, flags);
+
+ /* ADDR must be written twice */
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
+
+ /* Write needs single DATA write */
+ writel(data, dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
+
+ spin_unlock_irqrestore(®_lock, flags);
+}
+
+/* Helper to change specific bits in PHY registers */
+static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int data = dw_hdmi_top_read(dw_hdmi, addr);
+
+ data &= ~mask;
+ data |= val;
+
+ dw_hdmi_top_write(dw_hdmi, addr, data);
+}
+
+static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ unsigned long flags;
+ unsigned int data;
+
+ spin_lock_irqsave(®_lock, flags);
+
+ /* ADDR must be written twice */
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
+
+ /* Read needs a second DATA read */
+ data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
+ data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
+
+ spin_unlock_irqrestore(®_lock, flags);
+
+ return data;
+}
+
+static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(®_lock, flags);
+
+ /* ADDR must be written twice */
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
+ writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
+
+ /* Write needs single DATA write */
+ writel(data, dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
+
+ spin_unlock_irqrestore(®_lock, flags);
+}
+
+/* Helper to change specific bits in controller registers */
+static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int data = dw_hdmi_dwc_read(dw_hdmi, addr);
+
+ data &= ~mask;
+ data |= val;
+
+ dw_hdmi_dwc_write(dw_hdmi, addr, data);
+}
+
+/* Bridge */
+
+/* Setup PHY bandwidth modes */
+static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
+ struct drm_display_mode *mode)
+{
+ struct meson_drm *priv = dw_hdmi->priv;
+ unsigned int pixel_clock = mode->clock;
+
+ if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
+ if (pixel_clock >= 371250) {
+ /* 5.94Gbps, 3.7125Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x333d3282);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2136315b);
+ } else if (pixel_clock >= 297000) {
+ /* 2.97Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303382);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2036315b);
+ } else if (pixel_clock >= 148500) {
+ /* 1.485Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303362);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2016315b);
+ } else {
+ /* 742.5Mbps, and below */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33604142);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x0016315b);
+ }
+ } else if (dw_hdmi_is_compatible(dw_hdmi,
+ "amlogic,meson-gxbb-dw-hdmi")) {
+ if (pixel_clock >= 371250) {
+ /* 5.94Gbps, 3.7125Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33353245);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2100115b);
+ } else if (pixel_clock >= 297000) {
+ /* 2.97Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33634283);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0xb000115b);
+ } else {
+ /* 1.485Gbps, and below */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
+ }
+ }
+}
+
+static inline void dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
+{
+ struct meson_drm *priv = dw_hdmi->priv;
+
+ /* Enable and software reset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xf);
+
+ mdelay(2);
+
+ /* Enable and unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xe);
+
+ mdelay(2);
+}
+
+static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
+ struct drm_display_mode *mode)
+{
+ struct meson_drm *priv = dw_hdmi->priv;
+ int vic = drm_match_cea_mode(mode);
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
+ unsigned int hdmi_freq;
+
+ vclk_freq = mode->clock;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ vclk_freq *= 2;
+
+ venc_freq = vclk_freq;
+ hdmi_freq = vclk_freq;
+
+ if (meson_venc_hdmi_venc_repeat(vic))
+ venc_freq *= 2;
+
+ vclk_freq = max(venc_freq, hdmi_freq);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ venc_freq /= 2;
+
+ DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
+ vclk_freq, venc_freq, hdmi_freq,
+ priv->venc.hdmi_use_enci);
+
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+ venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
+}
+
+static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
+ struct drm_display_mode *mode)
+{
+ struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
+ struct meson_drm *priv = dw_hdmi->priv;
+ unsigned int wr_clk =
+ readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ DRM_DEBUG_DRIVER("%d:\"%s\"\n", mode->base.id, mode->name);
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Bring out of reset */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+
+ /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
+ dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+ 0x3, 0x3);
+ dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+ 0x3 << 4, 0x3 << 4);
+
+ /* Enable normal output to PHY */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+
+ /* TMDS pattern setup (TOFIX pattern for 4k2k scrambling) */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0x001f001f);
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, 0x001f001f);
+
+ /* Load TMDS pattern */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
+ msleep(20);
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
+
+ /* Setup PHY parameters */
+ meson_hdmi_phy_setup_mode(dw_hdmi, mode);
+
+ /* Setup PHY */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ 0xffff << 16, 0x0390 << 16);
+
+ /* BIT_INVERT */
+ if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ BIT(17), 0);
+ else
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ BIT(17), BIT(17));
+
+ /* Disable clock, fifo, fifo_wr */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
+
+ msleep(100);
+
+ /* Reset PHY 3 times in a row */
+ dw_hdmi_phy_reset(dw_hdmi);
+ dw_hdmi_phy_reset(dw_hdmi);
+ dw_hdmi_phy_reset(dw_hdmi);
+
+ /* Temporary Disable VENC video stream */
+ if (priv->venc.hdmi_use_enci)
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ else
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+
+ /* Temporary Disable HDMI video stream to HDMI-TX */
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+ writel_bits_relaxed(0xf << 8, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ /* Re-Enable VENC video stream */
+ if (priv->venc.hdmi_use_enci)
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ else
+ writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
+
+ /* Push back HDMI clock settings */
+ writel_bits_relaxed(0xf << 8, wr_clk & (0xf << 8),
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ /* Enable and Select HDMI video source for HDMI-TX */
+ if (priv->venc.hdmi_use_enci)
+ writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCI,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+ else
+ writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCP,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ return 0;
+}
+
+static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
+ void *data)
+{
+ struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
+ struct meson_drm *priv = dw_hdmi->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+}
+
+static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
+ void *data)
+{
+ struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
+
+ return !!dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
+ void *data)
+{
+ struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
+
+ /* Setup HPD Filter */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
+ (0xa << 12) | 0xa0);
+
+ /* Clear interrupts */
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
+
+ /* Unmask interrupts */
+ dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL,
+ HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
+}
+
+static const struct dw_hdmi_phy_ops meson_dw_hdmi_phy_ops = {
+ .init = dw_hdmi_phy_init,
+ .disable = dw_hdmi_phy_disable,
+ .read_hpd = dw_hdmi_read_hpd,
+ .setup_hpd = dw_hdmi_setup_hpd,
+};
+
+static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
+{
+ struct meson_dw_hdmi *dw_hdmi = dev_id;
+ u32 stat;
+
+ stat = dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
+ dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
+
+ /* HPD Events, handle in the threaded interrupt handler */
+ if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
+ dw_hdmi->irq_stat = stat;
+ return IRQ_WAKE_THREAD;
+ }
+
+ /* HDMI Controller Interrupt */
+ if (stat & 1)
+ return IRQ_NONE;
+
+ /* TOFIX Handle HDCP Interrupts */
+
+ return IRQ_HANDLED;
+}
+
+/* Threaded interrupt handler to manage HPD events */
+static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
+{
+ struct meson_dw_hdmi *dw_hdmi = dev_id;
+ u32 stat = dw_hdmi->irq_stat;
+
+ /* HPD Events */
+ if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
+ bool hpd_connected = false;
+
+ if (stat & HDMITX_TOP_INTR_HPD_RISE)
+ hpd_connected = true;
+
+ dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
+ hpd_connected);
+
+ drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* TOFIX Enable support for non-vic modes */
+static enum drm_mode_status dw_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
+ unsigned int hdmi_freq;
+ int vic = drm_match_cea_mode(mode);
+
+ DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ /* For now, only accept VIC modes */
+ if (!vic)
+ return MODE_BAD;
+
+ /* For now, filter by supported VIC modes */
+ if (!meson_venc_hdmi_supported_vic(vic))
+ return MODE_BAD;
+
+ vclk_freq = mode->clock;
+
+ /* 480i/576i needs global pixel doubling */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ vclk_freq *= 2;
+
+ venc_freq = vclk_freq;
+ hdmi_freq = vclk_freq;
+
+ /* VENC double pixels for 1080i and 720p modes */
+ if (meson_venc_hdmi_venc_repeat(vic))
+ venc_freq *= 2;
+
+ vclk_freq = max(venc_freq, hdmi_freq);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ venc_freq /= 2;
+
+ dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
+ vclk_freq, venc_freq, hdmi_freq);
+
+ /* Finally filter by configurable vclk frequencies */
+ switch (vclk_freq) {
+ case 54000:
+ case 74250:
+ case 148500:
+ case 297000:
+ case 594000:
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+
+/* Encoder */
+
+static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
+ .destroy = meson_venc_hdmi_encoder_destroy,
+};
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_drm *priv = dw_hdmi->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+}
+
+static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_drm *priv = dw_hdmi->priv;
+
+ DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
+
+ if (priv->venc.hdmi_use_enci)
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ else
+ writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
+}
+
+static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_drm *priv = dw_hdmi->priv;
+ int vic = drm_match_cea_mode(mode);
+
+ DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
+ mode->base.id, mode->name, vic);
+
+ /* Should have been filtered */
+ if (!vic)
+ return;
+
+ /* VENC + VENC-DVI Mode setup */
+ meson_venc_hdmi_mode_set(priv, vic, mode);
+
+ /* VCLK Set clock */
+ dw_hdmi_set_vclk(dw_hdmi, mode);
+
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+}
+
+static const struct drm_encoder_helper_funcs
+ meson_venc_hdmi_encoder_helper_funcs = {
+ .atomic_check = meson_venc_hdmi_encoder_atomic_check,
+ .disable = meson_venc_hdmi_encoder_disable,
+ .enable = meson_venc_hdmi_encoder_enable,
+ .mode_set = meson_venc_hdmi_encoder_mode_set,
+};
+
+/* DW HDMI Regmap */
+
+static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
+ unsigned int *result)
+{
+ *result = dw_hdmi_dwc_read(context, reg);
+
+ return 0;
+
+}
+
+static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ dw_hdmi_dwc_write(context, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_config meson_dw_hdmi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .reg_read = meson_dw_hdmi_reg_read,
+ .reg_write = meson_dw_hdmi_reg_write,
+ .max_register = 0x10000,
+};
+
+static bool meson_hdmi_connector_is_available(struct device *dev)
+{
+ struct device_node *ep, *remote;
+
+ /* HDMI Connector is on the second port, first endpoint */
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0);
+ if (!ep)
+ return false;
+
+ /* If the endpoint node exists, consider it enabled */
+ remote = of_graph_get_remote_port(ep);
+ if (remote) {
+ of_node_put(ep);
+ return true;
+ }
+
+ of_node_put(ep);
+ of_node_put(remote);
+
+ return false;
+}
+
+static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct meson_dw_hdmi *meson_dw_hdmi;
+ struct drm_device *drm = data;
+ struct meson_drm *priv = drm->dev_private;
+ struct dw_hdmi_plat_data *dw_plat_data;
+ struct drm_encoder *encoder;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!meson_hdmi_connector_is_available(dev)) {
+ dev_info(drm->dev, "HDMI Output connector not available\n");
+ return -ENODEV;
+ }
+
+ meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
+ GFP_KERNEL);
+ if (!meson_dw_hdmi)
+ return -ENOMEM;
+
+ meson_dw_hdmi->priv = priv;
+ meson_dw_hdmi->dev = dev;
+ dw_plat_data = &meson_dw_hdmi->dw_plat_data;
+ encoder = &meson_dw_hdmi->encoder;
+
+ meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
+ "hdmitx_apb");
+ if (IS_ERR(meson_dw_hdmi->hdmitx_apb)) {
+ dev_err(dev, "Failed to get hdmitx_apb reset\n");
+ return PTR_ERR(meson_dw_hdmi->hdmitx_apb);
+ }
+
+ meson_dw_hdmi->hdmitx_ctrl = devm_reset_control_get_exclusive(dev,
+ "hdmitx");
+ if (IS_ERR(meson_dw_hdmi->hdmitx_ctrl)) {
+ dev_err(dev, "Failed to get hdmitx reset\n");
+ return PTR_ERR(meson_dw_hdmi->hdmitx_ctrl);
+ }
+
+ meson_dw_hdmi->hdmitx_phy = devm_reset_control_get_exclusive(dev,
+ "hdmitx_phy");
+ if (IS_ERR(meson_dw_hdmi->hdmitx_phy)) {
+ dev_err(dev, "Failed to get hdmitx_phy reset\n");
+ return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
+ if (IS_ERR(meson_dw_hdmi->hdmitx))
+ return PTR_ERR(meson_dw_hdmi->hdmitx);
+
+ meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr");
+ if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) {
+ dev_err(dev, "Unable to get HDMI pclk\n");
+ return PTR_ERR(meson_dw_hdmi->hdmi_pclk);
+ }
+ clk_prepare_enable(meson_dw_hdmi->hdmi_pclk);
+
+ meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci");
+ if (IS_ERR(meson_dw_hdmi->venci_clk)) {
+ dev_err(dev, "Unable to get venci clk\n");
+ return PTR_ERR(meson_dw_hdmi->venci_clk);
+ }
+ clk_prepare_enable(meson_dw_hdmi->venci_clk);
+
+ dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
+ &meson_dw_hdmi_regmap_config);
+ if (IS_ERR(dw_plat_data->regm))
+ return PTR_ERR(dw_plat_data->regm);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Failed to get hdmi top irq\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq,
+ dw_hdmi_top_thread_irq, IRQF_SHARED,
+ "dw_hdmi_top_irq", meson_dw_hdmi);
+ if (ret) {
+ dev_err(dev, "Failed to request hdmi top irq\n");
+ return ret;
+ }
+
+ /* Encoder */
+
+ drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
+
+ ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, "meson_hdmi");
+ if (ret) {
+ dev_err(priv->dev, "Failed to init HDMI encoder\n");
+ return ret;
+ }
+
+ encoder->possible_crtcs = BIT(0);
+
+ DRM_DEBUG_DRIVER("encoder initialized\n");
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Reset HDMITX APB & TX & PHY */
+ reset_control_reset(meson_dw_hdmi->hdmitx_apb);
+ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
+ reset_control_reset(meson_dw_hdmi->hdmitx_phy);
+
+ /* Enable APB3 fail on error */
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+
+ /* Bring out of reset */
+ dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+
+ msleep(20);
+
+ dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff);
+
+ /* Enable HDMI-TX Interrupt */
+ dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+
+ dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
+
+ /* Bridge / Connector */
+
+ dw_plat_data->mode_valid = dw_hdmi_mode_valid;
+ dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
+ dw_plat_data->phy_name = "meson_dw_hdmi_phy";
+ dw_plat_data->phy_data = meson_dw_hdmi;
+ dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
+ dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+
+ ret = dw_hdmi_bind(pdev, encoder, &meson_dw_hdmi->dw_plat_data);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_DRIVER("HDMI controller initialized\n");
+
+ return 0;
+}
+
+static void meson_dw_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ dw_hdmi_unbind(dev);
+}
+
+static const struct component_ops meson_dw_hdmi_ops = {
+ .bind = meson_dw_hdmi_bind,
+ .unbind = meson_dw_hdmi_unbind,
+};
+
+static int meson_dw_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &meson_dw_hdmi_ops);
+}
+
+static int meson_dw_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &meson_dw_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id meson_dw_hdmi_of_table[] = {
+ { .compatible = "amlogic,meson-gxbb-dw-hdmi" },
+ { .compatible = "amlogic,meson-gxl-dw-hdmi" },
+ { .compatible = "amlogic,meson-gxm-dw-hdmi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
+
+static struct platform_driver meson_dw_hdmi_platform_driver = {
+ .probe = meson_dw_hdmi_probe,
+ .remove = meson_dw_hdmi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = meson_dw_hdmi_of_table,
+ },
+};
+module_platform_driver(meson_dw_hdmi_platform_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MESON_DW_HDMI_H
+#define __MESON_DW_HDMI_H
+
+/*
+ * Bit 7 RW Reserved. Default 1.
+ * Bit 6 RW Reserved. Default 1.
+ * Bit 5 RW Reserved. Default 1.
+ * Bit 4 RW sw_reset_phyif: PHY interface. 1=Apply reset; 0=Release from reset.
+ * Default 1.
+ * Bit 3 RW sw_reset_intr: interrupt module. 1=Apply reset;
+ * 0=Release from reset.
+ * Default 1.
+ * Bit 2 RW sw_reset_mem: KSV/REVOC mem. 1=Apply reset; 0=Release from reset.
+ * Default 1.
+ * Bit 1 RW sw_reset_rnd: random number interface to HDCP. 1=Apply reset;
+ * 0=Release from reset. Default 1.
+ * Bit 0 RW sw_reset_core: connects to IP's ~irstz. 1=Apply reset;
+ * 0=Release from reset. Default 1.
+ */
+#define HDMITX_TOP_SW_RESET (0x000)
+
+/*
+ * Bit 12 RW i2s_ws_inv:1=Invert i2s_ws; 0=No invert. Default 0.
+ * Bit 11 RW i2s_clk_inv: 1=Invert i2s_clk; 0=No invert. Default 0.
+ * Bit 10 RW spdif_clk_inv: 1=Invert spdif_clk; 0=No invert. Default 0.
+ * Bit 9 RW tmds_clk_inv: 1=Invert tmds_clk; 0=No invert. Default 0.
+ * Bit 8 RW pixel_clk_inv: 1=Invert pixel_clk; 0=No invert. Default 0.
+ * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0.
+ * Bit 3 RW i2s_clk_en: 1=enable i2s_clk; 0=disable. Default 0.
+ * Bit 2 RW spdif_clk_en: 1=enable spdif_clk; 0=disable. Default 0.
+ * Bit 1 RW tmds_clk_en: 1=enable tmds_clk; 0=disable. Default 0.
+ * Bit 0 RW pixel_clk_en: 1=enable pixel_clk; 0=disable. Default 0.
+ */
+#define HDMITX_TOP_CLK_CNTL (0x001)
+
+/*
+ * Bit 11: 0 RW hpd_valid_width: filter out width <= M*1024. Default 0.
+ * Bit 15:12 RW hpd_glitch_width: filter out glitch <= N. Default 0.
+ */
+#define HDMITX_TOP_HPD_FILTER (0x002)
+
+/*
+ * intr_maskn: MASK_N, one bit per interrupt source.
+ * 1=Enable interrupt source; 0=Disable interrupt source. Default 0.
+ * [ 4] hdcp22_rndnum_err
+ * [ 3] nonce_rfrsh_rise
+ * [ 2] hpd_fall_intr
+ * [ 1] hpd_rise_intr
+ * [ 0] core_intr
+ */
+#define HDMITX_TOP_INTR_MASKN (0x003)
+
+/*
+ * Bit 30: 0 RW intr_stat: For each bit, write 1 to manually set the interrupt
+ * bit, read back the interrupt status.
+ * Bit 31 R IP interrupt status
+ * Bit 2 RW hpd_fall
+ * Bit 1 RW hpd_rise
+ * Bit 0 RW IP interrupt
+ */
+#define HDMITX_TOP_INTR_STAT (0x004)
+
+/*
+ * [4] hdcp22_rndnum_err
+ * [3] nonce_rfrsh_rise
+ * [2] hpd_fall
+ * [1] hpd_rise
+ * [0] core_intr_rise
+ */
+#define HDMITX_TOP_INTR_STAT_CLR (0x005)
+
+#define HDMITX_TOP_INTR_CORE BIT(0)
+#define HDMITX_TOP_INTR_HPD_RISE BIT(1)
+#define HDMITX_TOP_INTR_HPD_FALL BIT(2)
+
+/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
+ * 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
+ * Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern
+ * every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0.
+ * Bit 8 RW shift_pttn_en: 1= Enable shift pattern generator; 0=Disable.
+ * Default 0.
+ * Bit 4: 3 RW prbs_pttn_mode: 0=PRBS11; 1=PRBS15; 2=PRBS7; 3=PRBS31. Default 0.
+ * Bit 2: 1 RW prbs_pttn_width: 0=idle; 1=output 8-bit pattern;
+ * 2=Output 1-bit pattern; 3=output 10-bit pattern. Default 0.
+ * Bit 0 RW prbs_pttn_en: 1=Enable PRBS generator; 0=Disable. Default 0.
+ */
+#define HDMITX_TOP_BIST_CNTL (0x006)
+
+/* Bit 29:20 RW shift_pttn_data[59:50]. Default 0. */
+/* Bit 19:10 RW shift_pttn_data[69:60]. Default 0. */
+/* Bit 9: 0 RW shift_pttn_data[79:70]. Default 0. */
+#define HDMITX_TOP_SHIFT_PTTN_012 (0x007)
+
+/* Bit 29:20 RW shift_pttn_data[29:20]. Default 0. */
+/* Bit 19:10 RW shift_pttn_data[39:30]. Default 0. */
+/* Bit 9: 0 RW shift_pttn_data[49:40]. Default 0. */
+#define HDMITX_TOP_SHIFT_PTTN_345 (0x008)
+
+/* Bit 19:10 RW shift_pttn_data[ 9: 0]. Default 0. */
+/* Bit 9: 0 RW shift_pttn_data[19:10]. Default 0. */
+#define HDMITX_TOP_SHIFT_PTTN_67 (0x009)
+
+/* Bit 25:16 RW tmds_clk_pttn[19:10]. Default 0. */
+/* Bit 9: 0 RW tmds_clk_pttn[ 9: 0]. Default 0. */
+#define HDMITX_TOP_TMDS_CLK_PTTN_01 (0x00A)
+
+/* Bit 25:16 RW tmds_clk_pttn[39:30]. Default 0. */
+/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */
+#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B)
+
+/* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
+ * used when TMDS CLK rate = TMDS character rate /4. Default 0.
+ * Bit 0 R Reserved. Default 0.
+ * [ 1] shift_tmds_clk_pttn
+ * [ 0] load_tmds_clk_pttn
+ */
+#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C)
+
+/* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
+ * failure, write 1 to clear the failure flag. Default 0.
+ */
+#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
+
+/* Bit 0 R filtered HPD status. */
+#define HDMITX_TOP_STAT0 (0x00E)
+
+#endif /* __MESON_DW_HDMI_H */
#define VPU_MISC_CTRL 0x2740
#define VPU_ISP_GCLK_CTRL0 0x2741
#define VPU_ISP_GCLK_CTRL1 0x2742
+#define VPU_HDMI_FMT_CTRL 0x2743
#define VPU_VDIN_ASYNC_HOLD_CTRL 0x2743
#define VPU_VDISP_ASYNC_HOLD_CTRL 0x2744
#define VPU_VPUARB2_ASYNC_HOLD_CTRL 0x2745
#include "meson_drv.h"
#include "meson_vclk.h"
-/*
+/**
+ * DOC: Video Clocks
+ *
* VCLK is the "Pixel Clock" frequency generator from a dedicated PLL.
* We handle the following encodings :
+ *
* - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks
+ * - HDMI Pixel Clocks generation
*
* What is missing :
- * - HDMI Pixel Clocks generation
+ *
+ * - Genenate Pixel clocks for 2K/4K 10bit formats
+ *
+ * Clock generator scheme :
+ *
+ * .. code::
+ *
+ * __________ _________ _____
+ * | | | | | |--ENCI
+ * | HDMI PLL |-| PLL_DIV |--- VCLK--| |--ENCL
+ * |__________| |_________| \ | MUX |--ENCP
+ * --VCLK2-| |--VDAC
+ * |_____|--HDMI-TX
+ *
+ * Final clocks can take input for either VCLK or VCLK2, but
+ * VCLK is the preferred path for HDMI clocking and VCLK2 is the
+ * preferred path for CVBS VDAC clocking.
+ *
+ * VCLK and VCLK2 have fixed divided clocks paths for /1, /2, /4, /6 or /12.
+ *
+ * The PLL_DIV can achieve an additional fractional dividing like
+ * 1.5, 3.5, 3.75... to generate special 2K and 4K 10bit clocks.
*/
/* HHI Registers */
#define VCLK2_SOFT_RESET BIT(15)
#define VCLK2_DIV1_EN BIT(0)
#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
+#define VCLK_DIV_MASK 0xff
+#define VCLK_DIV_EN BIT(16)
+#define VCLK_DIV_RESET BIT(17)
+#define CTS_ENCP_SEL_MASK (0xf << 24)
+#define CTS_ENCP_SEL_SHIFT 24
#define CTS_ENCI_SEL_MASK (0xf << 28)
#define CTS_ENCI_SEL_SHIFT 28
+#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
+#define VCLK_EN BIT(19)
+#define VCLK_SEL_MASK (0x7 << 16)
+#define VCLK_SEL_SHIFT 16
+#define VCLK_SOFT_RESET BIT(15)
+#define VCLK_DIV1_EN BIT(0)
+#define VCLK_DIV2_EN BIT(1)
+#define VCLK_DIV4_EN BIT(2)
+#define VCLK_DIV6_EN BIT(3)
+#define VCLK_DIV12_EN BIT(4)
#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
#define CTS_ENCI_EN BIT(0)
+#define CTS_ENCP_EN BIT(2)
#define CTS_VDAC_EN BIT(4)
+#define HDMI_TX_PIXEL_EN BIT(5)
+#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
+#define HDMI_TX_PIXEL_SEL_MASK (0xf << 16)
+#define HDMI_TX_PIXEL_SEL_SHIFT 16
+#define CTS_HDMI_SYS_SEL_MASK (0x7 << 9)
+#define CTS_HDMI_SYS_DIV_MASK (0x7f)
+#define CTS_HDMI_SYS_EN BIT(8)
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HDMI_PLL_RESET BIT(28)
#define HDMI_PLL_LOCK BIT(31)
+/* VID PLL Dividers */
+enum {
+ VID_PLL_DIV_1 = 0,
+ VID_PLL_DIV_2,
+ VID_PLL_DIV_2p5,
+ VID_PLL_DIV_3,
+ VID_PLL_DIV_3p5,
+ VID_PLL_DIV_3p75,
+ VID_PLL_DIV_4,
+ VID_PLL_DIV_5,
+ VID_PLL_DIV_6,
+ VID_PLL_DIV_6p25,
+ VID_PLL_DIV_7,
+ VID_PLL_DIV_7p5,
+ VID_PLL_DIV_12,
+ VID_PLL_DIV_14,
+ VID_PLL_DIV_15,
+};
+
+void meson_vid_pll_set(struct meson_drm *priv, unsigned int div)
+{
+ unsigned int shift_val = 0;
+ unsigned int shift_sel = 0;
+
+ /* Disable vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
+
+ switch (div) {
+ case VID_PLL_DIV_2:
+ shift_val = 0x0aaa;
+ shift_sel = 0;
+ break;
+ case VID_PLL_DIV_2p5:
+ shift_val = 0x5294;
+ shift_sel = 2;
+ break;
+ case VID_PLL_DIV_3:
+ shift_val = 0x0db6;
+ shift_sel = 0;
+ break;
+ case VID_PLL_DIV_3p5:
+ shift_val = 0x36cc;
+ shift_sel = 1;
+ break;
+ case VID_PLL_DIV_3p75:
+ shift_val = 0x6666;
+ shift_sel = 2;
+ break;
+ case VID_PLL_DIV_4:
+ shift_val = 0x0ccc;
+ shift_sel = 0;
+ break;
+ case VID_PLL_DIV_5:
+ shift_val = 0x739c;
+ shift_sel = 2;
+ break;
+ case VID_PLL_DIV_6:
+ shift_val = 0x0e38;
+ shift_sel = 0;
+ break;
+ case VID_PLL_DIV_6p25:
+ shift_val = 0x0000;
+ shift_sel = 3;
+ break;
+ case VID_PLL_DIV_7:
+ shift_val = 0x3c78;
+ shift_sel = 1;
+ break;
+ case VID_PLL_DIV_7p5:
+ shift_val = 0x78f0;
+ shift_sel = 2;
+ break;
+ case VID_PLL_DIV_12:
+ shift_val = 0x0fc0;
+ shift_sel = 0;
+ break;
+ case VID_PLL_DIV_14:
+ shift_val = 0x3f80;
+ shift_sel = 1;
+ break;
+ case VID_PLL_DIV_15:
+ shift_val = 0x7f80;
+ shift_sel = 2;
+ break;
+ }
+
+ if (div == VID_PLL_DIV_1)
+ /* Enable vid_pll bypass to HDMI pll */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_BYPASS, VID_PLL_BYPASS);
+ else {
+ /* Disable Bypass */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_BYPASS, 0);
+ /* Clear sel */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ 3 << 16, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_PRESET, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ 0x7fff, 0);
+
+ /* Setup sel and val */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ 3 << 16, shift_sel << 16);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_PRESET, VID_PLL_PRESET);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ 0x7fff, shift_val);
+
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_PRESET, 0);
+ }
+
+ /* Enable the vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_EN, VID_PLL_EN);
+}
+
/*
* Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC
*
/* Disable VCLK2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
- /* Disable vid_pll output clock */
- regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
- regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
- /* Enable vid_pll bypass to HDMI pll */
- regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
- VID_PLL_BYPASS, VID_PLL_BYPASS);
- /* Enable the vid_pll output clock */
- regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
- VID_PLL_EN, VID_PLL_EN);
+ /* Setup vid_pll to /1 */
+ meson_vid_pll_set(priv, VID_PLL_DIV_1);
/* Setup the VCLK2 divider value to achieve 27MHz */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
CTS_VDAC_EN, CTS_VDAC_EN);
}
+
+/* PLL O1 O2 O3 VP DV EN TX */
+/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
+#define MESON_VCLK_HDMI_ENCI_54000 1
+/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
+#define MESON_VCLK_HDMI_DDR_54000 2
+/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
+#define MESON_VCLK_HDMI_DDR_148500 3
+/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
+#define MESON_VCLK_HDMI_74250 4
+/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
+#define MESON_VCLK_HDMI_148500 5
+/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_297000 6
+/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
+#define MESON_VCLK_HDMI_594000 7
+
+struct meson_vclk_params {
+ unsigned int pll_base_freq;
+ unsigned int pll_od1;
+ unsigned int pll_od2;
+ unsigned int pll_od3;
+ unsigned int vid_pll_div;
+ unsigned int vclk_div;
+} params[] = {
+ [MESON_VCLK_HDMI_ENCI_54000] = {
+ .pll_base_freq = 4320000,
+ .pll_od1 = 4,
+ .pll_od2 = 4,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+ [MESON_VCLK_HDMI_DDR_54000] = {
+ .pll_base_freq = 4320000,
+ .pll_od1 = 4,
+ .pll_od2 = 4,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+ [MESON_VCLK_HDMI_DDR_148500] = {
+ .pll_base_freq = 2970000,
+ .pll_od1 = 4,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+ [MESON_VCLK_HDMI_74250] = {
+ .pll_base_freq = 2970000,
+ .pll_od1 = 2,
+ .pll_od2 = 2,
+ .pll_od3 = 2,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+ [MESON_VCLK_HDMI_148500] = {
+ .pll_base_freq = 2970000,
+ .pll_od1 = 1,
+ .pll_od2 = 2,
+ .pll_od3 = 2,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+ [MESON_VCLK_HDMI_297000] = {
+ .pll_base_freq = 2970000,
+ .pll_od1 = 1,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_594000] = {
+ .pll_base_freq = 5940000,
+ .pll_od1 = 1,
+ .pll_od2 = 1,
+ .pll_od3 = 2,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
+};
+
+static inline unsigned int pll_od_to_reg(unsigned int od)
+{
+ switch (od) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 4:
+ return 2;
+ case 8:
+ return 3;
+ }
+
+ /* Invalid */
+ return 0;
+}
+
+void meson_hdmi_pll_set(struct meson_drm *priv,
+ unsigned int base,
+ unsigned int od1,
+ unsigned int od2,
+ unsigned int od3)
+{
+ unsigned int val;
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ switch (base) {
+ case 2970000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* Enable and unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x7 << 28, 0x4 << 28);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4e00);
+ break;
+
+ case 4320000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+ break;
+
+ case 5940000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4c00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+ break;
+ };
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ switch (base) {
+ case 2970000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 4320000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 5940000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ };
+
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+ };
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 3 << 16, pll_od_to_reg(od1) << 16);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
+ 3 << 21, pll_od_to_reg(od1) << 21);
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 3 << 22, pll_od_to_reg(od2) << 22);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
+ 3 << 23, pll_od_to_reg(od2) << 23);
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 3 << 18, pll_od_to_reg(od3) << 18);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
+ 3 << 19, pll_od_to_reg(od3) << 19);
+}
+
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int freq)
+ unsigned int vclk_freq, unsigned int venc_freq,
+ unsigned int dac_freq, bool hdmi_use_enci)
{
- if (target == MESON_VCLK_TARGET_CVBS && freq == MESON_VCLK_CVBS)
+ unsigned int freq;
+ unsigned int hdmi_tx_div;
+ unsigned int venc_div;
+
+ if (target == MESON_VCLK_TARGET_CVBS) {
meson_venci_cvbs_clock_config(priv);
+ return;
+ }
+
+ hdmi_tx_div = vclk_freq / dac_freq;
+
+ if (hdmi_tx_div == 0) {
+ pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ dac_freq);
+ return;
+ }
+
+ venc_div = vclk_freq / venc_freq;
+
+ if (venc_div == 0) {
+ pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ venc_freq);
+ return;
+ }
+
+ switch (vclk_freq) {
+ case 54000:
+ if (hdmi_use_enci)
+ freq = MESON_VCLK_HDMI_ENCI_54000;
+ else
+ freq = MESON_VCLK_HDMI_DDR_54000;
+ break;
+ case 74250:
+ freq = MESON_VCLK_HDMI_74250;
+ break;
+ case 148500:
+ if (dac_freq != 148500)
+ freq = MESON_VCLK_HDMI_DDR_148500;
+ else
+ freq = MESON_VCLK_HDMI_148500;
+ break;
+ case 297000:
+ freq = MESON_VCLK_HDMI_297000;
+ break;
+ case 594000:
+ freq = MESON_VCLK_HDMI_594000;
+ break;
+ default:
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
+ vclk_freq);
+ return;
+ }
+
+ /* Set HDMI-TX sys clock */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ CTS_HDMI_SYS_SEL_MASK, 0);
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ CTS_HDMI_SYS_DIV_MASK, 0);
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
+
+ /* Set HDMI PLL rate */
+ meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
+ params[freq].pll_od1,
+ params[freq].pll_od2,
+ params[freq].pll_od3);
+
+ /* Setup vid_pll divider */
+ meson_vid_pll_set(priv, params[freq].vid_pll_div);
+
+ /* Set VCLK div */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_SEL_MASK, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ VCLK_DIV_MASK, params[freq].vclk_div - 1);
+
+ /* Set HDMI-TX source */
+ switch (hdmi_tx_div) {
+ case 1:
+ /* enable vclk_div1 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV1_EN, VCLK_DIV1_EN);
+
+ /* select vclk_div1 for HDMI-TX */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ HDMI_TX_PIXEL_SEL_MASK, 0);
+ break;
+ case 2:
+ /* enable vclk_div2 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV2_EN, VCLK_DIV2_EN);
+
+ /* select vclk_div2 for HDMI-TX */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ HDMI_TX_PIXEL_SEL_MASK, 1 << HDMI_TX_PIXEL_SEL_SHIFT);
+ break;
+ case 4:
+ /* enable vclk_div4 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV4_EN, VCLK_DIV4_EN);
+
+ /* select vclk_div4 for HDMI-TX */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ HDMI_TX_PIXEL_SEL_MASK, 2 << HDMI_TX_PIXEL_SEL_SHIFT);
+ break;
+ case 6:
+ /* enable vclk_div6 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV6_EN, VCLK_DIV6_EN);
+
+ /* select vclk_div6 for HDMI-TX */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ HDMI_TX_PIXEL_SEL_MASK, 3 << HDMI_TX_PIXEL_SEL_SHIFT);
+ break;
+ case 12:
+ /* enable vclk_div12 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV12_EN, VCLK_DIV12_EN);
+
+ /* select vclk_div12 for HDMI-TX */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
+ HDMI_TX_PIXEL_SEL_MASK, 4 << HDMI_TX_PIXEL_SEL_SHIFT);
+ break;
+ }
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ HDMI_TX_PIXEL_EN, HDMI_TX_PIXEL_EN);
+
+ /* Set ENCI/ENCP Source */
+ switch (venc_div) {
+ case 1:
+ /* enable vclk_div1 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV1_EN, VCLK_DIV1_EN);
+
+ if (hdmi_use_enci)
+ /* select vclk_div1 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, 0);
+ else
+ /* select vclk_div1 for encp */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCP_SEL_MASK, 0);
+ break;
+ case 2:
+ /* enable vclk_div2 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV2_EN, VCLK_DIV2_EN);
+
+ if (hdmi_use_enci)
+ /* select vclk_div2 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, 1 << CTS_ENCI_SEL_SHIFT);
+ else
+ /* select vclk_div2 for encp */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCP_SEL_MASK, 1 << CTS_ENCP_SEL_SHIFT);
+ break;
+ case 4:
+ /* enable vclk_div4 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV4_EN, VCLK_DIV4_EN);
+
+ if (hdmi_use_enci)
+ /* select vclk_div4 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, 2 << CTS_ENCI_SEL_SHIFT);
+ else
+ /* select vclk_div4 for encp */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCP_SEL_MASK, 2 << CTS_ENCP_SEL_SHIFT);
+ break;
+ case 6:
+ /* enable vclk_div6 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV6_EN, VCLK_DIV6_EN);
+
+ if (hdmi_use_enci)
+ /* select vclk_div6 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, 3 << CTS_ENCI_SEL_SHIFT);
+ else
+ /* select vclk_div6 for encp */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCP_SEL_MASK, 3 << CTS_ENCP_SEL_SHIFT);
+ break;
+ case 12:
+ /* enable vclk_div12 gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
+ VCLK_DIV12_EN, VCLK_DIV12_EN);
+
+ if (hdmi_use_enci)
+ /* select vclk_div12 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, 4 << CTS_ENCI_SEL_SHIFT);
+ else
+ /* select vclk_div12 for encp */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCP_SEL_MASK, 4 << CTS_ENCP_SEL_SHIFT);
+ break;
+ }
+
+ if (hdmi_use_enci)
+ /* Enable ENCI clock gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_ENCI_EN, CTS_ENCI_EN);
+ else
+ /* Enable ENCP clock gate */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_ENCP_EN, CTS_ENCP_EN);
+
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
}
+EXPORT_SYMBOL_GPL(meson_vclk_setup);
enum {
MESON_VCLK_TARGET_CVBS = 0,
+ MESON_VCLK_TARGET_HDMI = 1,
};
/* 27MHz is the CVBS Pixel Clock */
-#define MESON_VCLK_CVBS 27000
+#define MESON_VCLK_CVBS 27000
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int freq);
+ unsigned int vclk_freq, unsigned int venc_freq,
+ unsigned int dac_freq, bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
#include "meson_vclk.h"
#include "meson_registers.h"
-/*
+/**
+ * DOC: Video Encoder
+ *
* VENC Handle the pixels encoding to the output formats.
* We handle the following encodings :
- * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
*
- * What is missing :
+ * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
* - TMDS/HDMI Encoding via ENCI_DIV and ENCP
* - Setup of more clock rates for HDMI modes
+ *
+ * What is missing :
+ *
* - LCD Panel encoding via ENCL
* - TV Panel encoding via ENCT
+ *
+ * VENC paths :
+ *
+ * .. code::
+ *
+ * _____ _____ ____________________
+ * vd1---| |-| | | VENC /---------|----VDAC
+ * vd2---| VIU |-| VPP |-|-----ENCI/-ENCI_DVI-|-|
+ * osd1--| |-| | | \ | X--HDMI-TX
+ * osd2--|_____|-|_____| | |\-ENCP--ENCP_DVI-|-|
+ * | | |
+ * | \--ENCL-----------|----LVDS
+ * |____________________|
+ *
+ * The ENCI is designed for PAl or NTSC encoding and can go through the VDAC
+ * directly for CVBS encoding or through the ENCI_DVI encoder for HDMI.
+ * The ENCP is designed for Progressive encoding but can also generate
+ * 1080i interlaced pixels, and was initialy desined to encode pixels for
+ * VDAC to output RGB ou YUV analog outputs.
+ * It's output is only used through the ENCP_DVI encoder for HDMI.
+ * The ENCL LVDS encoder is not implemented.
+ *
+ * The ENCI and ENCP encoders needs specially defined parameters for each
+ * supported mode and thus cannot be determined from standard video timings.
+ *
+ * The ENCI end ENCP DVI encoders are more generic and can generate any timings
+ * from the pixel data generated by ENCI or ENCP, so can use the standard video
+ * timings are source for HW parameters.
*/
/* HHI Registers */
.analog_sync_adj = 0x9c00,
};
+union meson_hdmi_venc_mode {
+ struct {
+ unsigned int mode_tag;
+ unsigned int hso_begin;
+ unsigned int hso_end;
+ unsigned int vso_even;
+ unsigned int vso_odd;
+ unsigned int macv_max_amp;
+ unsigned int video_prog_mode;
+ unsigned int video_mode;
+ unsigned int sch_adjust;
+ unsigned int yc_delay;
+ unsigned int pixel_start;
+ unsigned int pixel_end;
+ unsigned int top_field_line_start;
+ unsigned int top_field_line_end;
+ unsigned int bottom_field_line_start;
+ unsigned int bottom_field_line_end;
+ } enci;
+ struct {
+ unsigned int dvi_settings;
+ unsigned int video_mode;
+ unsigned int video_mode_adv;
+ unsigned int video_prog_mode;
+ bool video_prog_mode_present;
+ unsigned int video_sync_mode;
+ bool video_sync_mode_present;
+ unsigned int video_yc_dly;
+ bool video_yc_dly_present;
+ unsigned int video_rgb_ctrl;
+ bool video_rgb_ctrl_present;
+ unsigned int video_filt_ctrl;
+ bool video_filt_ctrl_present;
+ unsigned int video_ofld_voav_ofst;
+ bool video_ofld_voav_ofst_present;
+ unsigned int yfp1_htime;
+ unsigned int yfp2_htime;
+ unsigned int max_pxcnt;
+ unsigned int hspuls_begin;
+ unsigned int hspuls_end;
+ unsigned int hspuls_switch;
+ unsigned int vspuls_begin;
+ unsigned int vspuls_end;
+ unsigned int vspuls_bline;
+ unsigned int vspuls_eline;
+ unsigned int eqpuls_begin;
+ bool eqpuls_begin_present;
+ unsigned int eqpuls_end;
+ bool eqpuls_end_present;
+ unsigned int eqpuls_bline;
+ bool eqpuls_bline_present;
+ unsigned int eqpuls_eline;
+ bool eqpuls_eline_present;
+ unsigned int havon_begin;
+ unsigned int havon_end;
+ unsigned int vavon_bline;
+ unsigned int vavon_eline;
+ unsigned int hso_begin;
+ unsigned int hso_end;
+ unsigned int vso_begin;
+ unsigned int vso_end;
+ unsigned int vso_bline;
+ unsigned int vso_eline;
+ bool vso_eline_present;
+ unsigned int sy_val;
+ bool sy_val_present;
+ unsigned int sy2_val;
+ bool sy2_val_present;
+ unsigned int max_lncnt;
+ } encp;
+};
+
+union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
+ .enci = {
+ .hso_begin = 5,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 0x810b,
+ .video_prog_mode = 0xf0,
+ .video_mode = 0x8,
+ .sch_adjust = 0x20,
+ .yc_delay = 0,
+ .pixel_start = 227,
+ .pixel_end = 1667,
+ .top_field_line_start = 18,
+ .top_field_line_end = 258,
+ .bottom_field_line_start = 19,
+ .bottom_field_line_end = 259,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
+ .enci = {
+ .hso_begin = 3,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 8107,
+ .video_prog_mode = 0xff,
+ .video_mode = 0x13,
+ .sch_adjust = 0x28,
+ .yc_delay = 0x333,
+ .pixel_start = 251,
+ .pixel_end = 1691,
+ .top_field_line_start = 22,
+ .top_field_line_end = 310,
+ .bottom_field_line_start = 23,
+ .bottom_field_line_end = 311,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_480p = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4000,
+ .video_mode_adv = 0x9,
+ .video_prog_mode = 0,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 7,
+ .video_sync_mode_present = true,
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x2052,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 244,
+ .yfp2_htime = 1630,
+ .max_pxcnt = 1715,
+ .hspuls_begin = 0x22,
+ .hspuls_end = 0xa0,
+ .hspuls_switch = 88,
+ .vspuls_begin = 0,
+ .vspuls_end = 1589,
+ .vspuls_bline = 0,
+ .vspuls_eline = 5,
+ .havon_begin = 249,
+ .havon_end = 1689,
+ .vavon_bline = 42,
+ .vavon_eline = 521,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 3,
+ .hso_end = 5,
+ .vso_begin = 3,
+ .vso_end = 5,
+ .vso_bline = 0,
+ /* vso_eline */
+ .sy_val = 8,
+ .sy_val_present = true,
+ .sy2_val = 0x1d8,
+ .sy2_val_present = true,
+ .max_lncnt = 524,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_576p = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4000,
+ .video_mode_adv = 0x9,
+ .video_prog_mode = 0,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 7,
+ .video_sync_mode_present = true,
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x52,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 235,
+ .yfp2_htime = 1674,
+ .max_pxcnt = 1727,
+ .hspuls_begin = 0,
+ .hspuls_end = 0x80,
+ .hspuls_switch = 88,
+ .vspuls_begin = 0,
+ .vspuls_end = 1599,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 235,
+ .havon_end = 1674,
+ .vavon_bline = 44,
+ .vavon_eline = 619,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0x80,
+ .hso_end = 0,
+ .vso_begin = 0,
+ .vso_end = 5,
+ .vso_bline = 0,
+ /* vso_eline */
+ .sy_val = 8,
+ .sy_val_present = true,
+ .sy2_val = 0x1d8,
+ .sy2_val_present = true,
+ .max_lncnt = 624,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p60 = {
+ .encp = {
+ .dvi_settings = 0x2029,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x19,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 648,
+ .yfp2_htime = 3207,
+ .max_pxcnt = 3299,
+ .hspuls_begin = 80,
+ .hspuls_end = 240,
+ .hspuls_switch = 80,
+ .vspuls_begin = 688,
+ .vspuls_end = 3248,
+ .vspuls_bline = 4,
+ .vspuls_eline = 8,
+ .havon_begin = 648,
+ .havon_end = 3207,
+ .vavon_bline = 29,
+ .vavon_eline = 748,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 256,
+ .hso_end = 168,
+ .vso_begin = 168,
+ .vso_end = 256,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 749,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p50 = {
+ .encp = {
+ .dvi_settings = 0x202d,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x19,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 0x407,
+ .video_sync_mode_present = true,
+ .video_yc_dly = 0,
+ .video_yc_dly_present = true,
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 648,
+ .yfp2_htime = 3207,
+ .max_pxcnt = 3959,
+ .hspuls_begin = 80,
+ .hspuls_end = 240,
+ .hspuls_switch = 80,
+ .vspuls_begin = 688,
+ .vspuls_end = 3248,
+ .vspuls_bline = 4,
+ .vspuls_eline = 8,
+ .havon_begin = 648,
+ .havon_end = 3207,
+ .vavon_bline = 29,
+ .vavon_eline = 748,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 128,
+ .hso_end = 208,
+ .vso_begin = 128,
+ .vso_end = 128,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 749,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i60 = {
+ .encp = {
+ .dvi_settings = 0x2029,
+ .video_mode = 0x5ffc,
+ .video_mode_adv = 0x19,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 0x207,
+ .video_sync_mode_present = true,
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ .video_ofld_voav_ofst = 0x11,
+ .video_ofld_voav_ofst_present = true,
+ .yfp1_htime = 516,
+ .yfp2_htime = 4355,
+ .max_pxcnt = 4399,
+ .hspuls_begin = 88,
+ .hspuls_end = 264,
+ .hspuls_switch = 88,
+ .vspuls_begin = 440,
+ .vspuls_end = 2200,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 516,
+ .havon_end = 4355,
+ .vavon_bline = 20,
+ .vavon_eline = 559,
+ .eqpuls_begin = 2288,
+ .eqpuls_begin_present = true,
+ .eqpuls_end = 2464,
+ .eqpuls_end_present = true,
+ .eqpuls_bline = 0,
+ .eqpuls_bline_present = true,
+ .eqpuls_eline = 4,
+ .eqpuls_eline_present = true,
+ .hso_begin = 264,
+ .hso_end = 176,
+ .vso_begin = 88,
+ .vso_end = 88,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i50 = {
+ .encp = {
+ .dvi_settings = 0x202d,
+ .video_mode = 0x5ffc,
+ .video_mode_adv = 0x19,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 0x7,
+ .video_sync_mode_present = true,
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ .video_ofld_voav_ofst = 0x11,
+ .video_ofld_voav_ofst_present = true,
+ .yfp1_htime = 526,
+ .yfp2_htime = 4365,
+ .max_pxcnt = 5279,
+ .hspuls_begin = 88,
+ .hspuls_end = 264,
+ .hspuls_switch = 88,
+ .vspuls_begin = 440,
+ .vspuls_end = 2200,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 526,
+ .havon_end = 4365,
+ .vavon_bline = 20,
+ .vavon_eline = 559,
+ .eqpuls_begin = 2288,
+ .eqpuls_begin_present = true,
+ .eqpuls_end = 2464,
+ .eqpuls_end_present = true,
+ .eqpuls_bline = 0,
+ .eqpuls_bline_present = true,
+ .eqpuls_eline = 4,
+ .eqpuls_eline_present = true,
+ .hso_begin = 142,
+ .hso_end = 230,
+ .vso_begin = 142,
+ .vso_end = 142,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p24 = {
+ .encp = {
+ .dvi_settings = 0xd,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 0x7,
+ .video_sync_mode_present = true,
+ .video_yc_dly = 0,
+ .video_yc_dly_present = true,
+ .video_rgb_ctrl = 2,
+ .video_rgb_ctrl_present = true,
+ .video_filt_ctrl = 0x1052,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 271,
+ .yfp2_htime = 2190,
+ .max_pxcnt = 2749,
+ .hspuls_begin = 44,
+ .hspuls_end = 132,
+ .hspuls_switch = 44,
+ .vspuls_begin = 220,
+ .vspuls_end = 2140,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 271,
+ .havon_end = 2190,
+ .vavon_bline = 41,
+ .vavon_eline = 1120,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ .eqpuls_bline = 0,
+ .eqpuls_bline_present = true,
+ .eqpuls_eline = 4,
+ .eqpuls_eline_present = true,
+ .hso_begin = 79,
+ .hso_end = 123,
+ .vso_begin = 79,
+ .vso_end = 79,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p30 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1052,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 2060,
+ .max_pxcnt = 2199,
+ .hspuls_begin = 2156,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 2067,
+ .vavon_bline = 41,
+ .vavon_eline = 1120,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156,
+ .vso_begin = 2100,
+ .vso_end = 2164,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p50 = {
+ .encp = {
+ .dvi_settings = 0xd,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ .video_sync_mode = 0x7,
+ .video_sync_mode_present = true,
+ .video_yc_dly = 0,
+ .video_yc_dly_present = true,
+ .video_rgb_ctrl = 2,
+ .video_rgb_ctrl_present = true,
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 271,
+ .yfp2_htime = 2190,
+ .max_pxcnt = 2639,
+ .hspuls_begin = 44,
+ .hspuls_end = 132,
+ .hspuls_switch = 44,
+ .vspuls_begin = 220,
+ .vspuls_end = 2140,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 271,
+ .havon_end = 2190,
+ .vavon_bline = 41,
+ .vavon_eline = 1120,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ .eqpuls_bline = 0,
+ .eqpuls_bline_present = true,
+ .eqpuls_eline = 4,
+ .eqpuls_eline_present = true,
+ .hso_begin = 79,
+ .hso_end = 123,
+ .vso_begin = 79,
+ .vso_end = 79,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ .video_prog_mode = 0x100,
+ .video_prog_mode_present = true,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1052,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 2060,
+ .max_pxcnt = 2199,
+ .hspuls_begin = 2156,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 2067,
+ .vavon_bline = 41,
+ .vavon_eline = 1120,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156,
+ .vso_begin = 2100,
+ .vso_end = 2164,
+ .vso_bline = 0,
+ .vso_eline = 5,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 1124,
+ },
+};
+
+struct meson_hdmi_venc_vic_mode {
+ unsigned int vic;
+ union meson_hdmi_venc_mode *mode;
+} meson_hdmi_venc_vic_modes[] = {
+ { 6, &meson_hdmi_enci_mode_480i },
+ { 7, &meson_hdmi_enci_mode_480i },
+ { 21, &meson_hdmi_enci_mode_576i },
+ { 22, &meson_hdmi_enci_mode_576i },
+ { 2, &meson_hdmi_encp_mode_480p },
+ { 3, &meson_hdmi_encp_mode_480p },
+ { 17, &meson_hdmi_encp_mode_576p },
+ { 18, &meson_hdmi_encp_mode_576p },
+ { 4, &meson_hdmi_encp_mode_720p60 },
+ { 19, &meson_hdmi_encp_mode_720p50 },
+ { 5, &meson_hdmi_encp_mode_1080i60 },
+ { 20, &meson_hdmi_encp_mode_1080i50 },
+ { 32, &meson_hdmi_encp_mode_1080p24 },
+ { 34, &meson_hdmi_encp_mode_1080p30 },
+ { 31, &meson_hdmi_encp_mode_1080p50 },
+ { 16, &meson_hdmi_encp_mode_1080p60 },
+ { 0, NULL}, /* sentinel */
+};
+
+static signed int to_signed(unsigned int a)
+{
+ if (a <= 7)
+ return a;
+ else
+ return a - 16;
+}
+
+static unsigned long modulo(unsigned long a, unsigned long b)
+{
+ if (a >= b)
+ return a - b;
+ else
+ return a;
+}
+
+bool meson_venc_hdmi_supported_vic(int vic)
+{
+ struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
+
+ while (vmode->vic && vmode->mode) {
+ if (vmode->vic == vic)
+ return true;
+ vmode++;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
+
+static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
+{
+ struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
+
+ while (vmode->vic && vmode->mode) {
+ if (vmode->vic == vic)
+ return vmode->mode;
+ vmode++;
+ }
+
+ return NULL;
+}
+
+bool meson_venc_hdmi_venc_repeat(int vic)
+{
+ /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
+ if (vic == 6 || vic == 7 || /* 480i */
+ vic == 21 || vic == 22 || /* 576i */
+ vic == 17 || vic == 18 || /* 576p */
+ vic == 2 || vic == 3 || /* 480p */
+ vic == 4 || /* 720p60 */
+ vic == 19 || /* 720p50 */
+ vic == 5 || /* 1080i60 */
+ vic == 20) /* 1080i50 */
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
+
+void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
+ struct drm_display_mode *mode)
+{
+ union meson_hdmi_venc_mode *vmode = NULL;
+ bool use_enci = false;
+ bool venc_repeat = false;
+ bool hdmi_repeat = false;
+ unsigned int venc_hdmi_latency = 2;
+ unsigned long total_pixels_venc = 0;
+ unsigned long active_pixels_venc = 0;
+ unsigned long front_porch_venc = 0;
+ unsigned long hsync_pixels_venc = 0;
+ unsigned long de_h_begin = 0;
+ unsigned long de_h_end = 0;
+ unsigned long de_v_begin_even = 0;
+ unsigned long de_v_end_even = 0;
+ unsigned long de_v_begin_odd = 0;
+ unsigned long de_v_end_odd = 0;
+ unsigned long hs_begin = 0;
+ unsigned long hs_end = 0;
+ unsigned long vs_adjust = 0;
+ unsigned long vs_bline_evn = 0;
+ unsigned long vs_eline_evn = 0;
+ unsigned long vs_bline_odd = 0;
+ unsigned long vs_eline_odd = 0;
+ unsigned long vso_begin_evn = 0;
+ unsigned long vso_begin_odd = 0;
+ unsigned int eof_lines;
+ unsigned int sof_lines;
+ unsigned int vsync_lines;
+
+ vmode = meson_venc_hdmi_get_vic_vmode(vic);
+ if (!vmode) {
+ dev_err(priv->dev, "%s: Fatal Error, unsupported vic %d\n",
+ __func__, vic);
+ return;
+ }
+
+ /* Use VENCI for 480i and 576i and double HDMI pixels */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ hdmi_repeat = true;
+ use_enci = true;
+ venc_hdmi_latency = 1;
+ }
+
+ /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
+ if (meson_venc_hdmi_venc_repeat(vic))
+ venc_repeat = true;
+
+ eof_lines = mode->vsync_start - mode->vdisplay;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ eof_lines /= 2;
+ sof_lines = mode->vtotal - mode->vsync_end;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ sof_lines /= 2;
+ vsync_lines = mode->vsync_end - mode->vsync_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vsync_lines /= 2;
+
+ total_pixels_venc = mode->htotal;
+ if (hdmi_repeat)
+ total_pixels_venc /= 2;
+ if (venc_repeat)
+ total_pixels_venc *= 2;
+
+ active_pixels_venc = mode->hdisplay;
+ if (hdmi_repeat)
+ active_pixels_venc /= 2;
+ if (venc_repeat)
+ active_pixels_venc *= 2;
+
+ front_porch_venc = (mode->hsync_start - mode->hdisplay);
+ if (hdmi_repeat)
+ front_porch_venc /= 2;
+ if (venc_repeat)
+ front_porch_venc *= 2;
+
+ hsync_pixels_venc = (mode->hsync_end - mode->hsync_start);
+ if (hdmi_repeat)
+ hsync_pixels_venc /= 2;
+ if (venc_repeat)
+ hsync_pixels_venc *= 2;
+
+ /* Disable VDACs */
+ writel_bits_relaxed(0x1f, 0x1f,
+ priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+
+ if (use_enci) {
+ unsigned int lines_f0;
+ unsigned int lines_f1;
+
+ /* CVBS Filter settings */
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+
+ /* Digital Video Select : Interlace, clk27 clk, external */
+ writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
+
+ /* Reset Video Mode */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ /* Horizontal sync signal output */
+ writel_relaxed(vmode->enci.hso_begin,
+ priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
+ writel_relaxed(vmode->enci.hso_end,
+ priv->io_base + _REG(ENCI_SYNC_HSO_END));
+
+ /* Vertical Sync lines */
+ writel_relaxed(vmode->enci.vso_even,
+ priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
+ writel_relaxed(vmode->enci.vso_odd,
+ priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
+
+ /* Macrovision max amplitude change */
+ writel_relaxed(vmode->enci.macv_max_amp,
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+
+ /* Video mode */
+ writel_relaxed(vmode->enci.video_prog_mode,
+ priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
+ writel_relaxed(vmode->enci.video_mode,
+ priv->io_base + _REG(ENCI_VIDEO_MODE));
+
+ /* Advanced Video Mode :
+ * Demux shifting 0x2
+ * Blank line end at line17/22
+ * High bandwidth Luma Filter
+ * Low bandwidth Chroma Filter
+ * Bypass luma low pass filter
+ * No macrovision on CSYNC
+ */
+ writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ writel(vmode->enci.sch_adjust,
+ priv->io_base + _REG(ENCI_VIDEO_SCH));
+
+ /* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
+ writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
+
+ if (vmode->enci.yc_delay)
+ writel_relaxed(vmode->enci.yc_delay,
+ priv->io_base + _REG(ENCI_YC_DELAY));
+
+
+ /* UNreset Interlaced TV Encoder */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
+
+ /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
+ writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+
+ /* Timings */
+ writel_relaxed(vmode->enci.pixel_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
+ writel_relaxed(vmode->enci.pixel_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
+
+ writel_relaxed(vmode->enci.top_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
+ writel_relaxed(vmode->enci.top_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
+
+ writel_relaxed(vmode->enci.bottom_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
+ writel_relaxed(vmode->enci.bottom_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
+
+ /* Select ENCI for VIU */
+ meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
+
+ /* Interlace video enable */
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+
+ lines_f0 = mode->vtotal >> 1;
+ lines_f1 = lines_f0 + 1;
+
+ de_h_begin = modulo(readl_relaxed(priv->io_base +
+ _REG(ENCI_VFIFO2VD_PIXEL_START))
+ + venc_hdmi_latency,
+ total_pixels_venc);
+ de_h_end = modulo(de_h_begin + active_pixels_venc,
+ total_pixels_venc);
+
+ writel_relaxed(de_h_begin,
+ priv->io_base + _REG(ENCI_DE_H_BEGIN));
+ writel_relaxed(de_h_end,
+ priv->io_base + _REG(ENCI_DE_H_END));
+
+ de_v_begin_even = readl_relaxed(priv->io_base +
+ _REG(ENCI_VFIFO2VD_LINE_TOP_START));
+ de_v_end_even = de_v_begin_even + mode->vdisplay;
+ de_v_begin_odd = readl_relaxed(priv->io_base +
+ _REG(ENCI_VFIFO2VD_LINE_BOT_START));
+ de_v_end_odd = de_v_begin_odd + mode->vdisplay;
+
+ writel_relaxed(de_v_begin_even,
+ priv->io_base + _REG(ENCI_DE_V_BEGIN_EVEN));
+ writel_relaxed(de_v_end_even,
+ priv->io_base + _REG(ENCI_DE_V_END_EVEN));
+ writel_relaxed(de_v_begin_odd,
+ priv->io_base + _REG(ENCI_DE_V_BEGIN_ODD));
+ writel_relaxed(de_v_end_odd,
+ priv->io_base + _REG(ENCI_DE_V_END_ODD));
+
+ /* Program Hsync timing */
+ hs_begin = de_h_end + front_porch_venc;
+ if (de_h_end + front_porch_venc >= total_pixels_venc) {
+ hs_begin -= total_pixels_venc;
+ vs_adjust = 1;
+ } else {
+ hs_begin = de_h_end + front_porch_venc;
+ vs_adjust = 0;
+ }
+
+ hs_end = modulo(hs_begin + hsync_pixels_venc,
+ total_pixels_venc);
+ writel_relaxed(hs_begin,
+ priv->io_base + _REG(ENCI_DVI_HSO_BEGIN));
+ writel_relaxed(hs_end,
+ priv->io_base + _REG(ENCI_DVI_HSO_END));
+
+ /* Program Vsync timing for even field */
+ if (((de_v_end_odd - 1) + eof_lines + vs_adjust) >= lines_f1) {
+ vs_bline_evn = (de_v_end_odd - 1)
+ + eof_lines
+ + vs_adjust
+ - lines_f1;
+ vs_eline_evn = vs_bline_evn + vsync_lines;
+
+ writel_relaxed(vs_bline_evn,
+ priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
+
+ writel_relaxed(vs_eline_evn,
+ priv->io_base + _REG(ENCI_DVI_VSO_ELINE_EVN));
+
+ writel_relaxed(hs_begin,
+ priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_EVN));
+ writel_relaxed(hs_begin,
+ priv->io_base + _REG(ENCI_DVI_VSO_END_EVN));
+ } else {
+ vs_bline_odd = (de_v_end_odd - 1)
+ + eof_lines
+ + vs_adjust;
+
+ writel_relaxed(vs_bline_odd,
+ priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
+
+ writel_relaxed(hs_begin,
+ priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
+
+ if ((vs_bline_odd + vsync_lines) >= lines_f1) {
+ vs_eline_evn = vs_bline_odd
+ + vsync_lines
+ - lines_f1;
+
+ writel_relaxed(vs_eline_evn, priv->io_base
+ + _REG(ENCI_DVI_VSO_ELINE_EVN));
+
+ writel_relaxed(hs_begin, priv->io_base
+ + _REG(ENCI_DVI_VSO_END_EVN));
+ } else {
+ vs_eline_odd = vs_bline_odd
+ + vsync_lines;
+
+ writel_relaxed(vs_eline_odd, priv->io_base
+ + _REG(ENCI_DVI_VSO_ELINE_ODD));
+
+ writel_relaxed(hs_begin, priv->io_base
+ + _REG(ENCI_DVI_VSO_END_ODD));
+ }
+ }
+
+ /* Program Vsync timing for odd field */
+ if (((de_v_end_even - 1) + (eof_lines + 1)) >= lines_f0) {
+ vs_bline_odd = (de_v_end_even - 1)
+ + (eof_lines + 1)
+ - lines_f0;
+ vs_eline_odd = vs_bline_odd + vsync_lines;
+
+ writel_relaxed(vs_bline_odd,
+ priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
+
+ writel_relaxed(vs_eline_odd,
+ priv->io_base + _REG(ENCI_DVI_VSO_ELINE_ODD));
+
+ vso_begin_odd = modulo(hs_begin
+ + (total_pixels_venc >> 1),
+ total_pixels_venc);
+
+ writel_relaxed(vso_begin_odd,
+ priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
+ writel_relaxed(vso_begin_odd,
+ priv->io_base + _REG(ENCI_DVI_VSO_END_ODD));
+ } else {
+ vs_bline_evn = (de_v_end_even - 1)
+ + (eof_lines + 1);
+
+ writel_relaxed(vs_bline_evn,
+ priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
+
+ vso_begin_evn = modulo(hs_begin
+ + (total_pixels_venc >> 1),
+ total_pixels_venc);
+
+ writel_relaxed(vso_begin_evn, priv->io_base
+ + _REG(ENCI_DVI_VSO_BEGIN_EVN));
+
+ if (vs_bline_evn + vsync_lines >= lines_f0) {
+ vs_eline_odd = vs_bline_evn
+ + vsync_lines
+ - lines_f0;
+
+ writel_relaxed(vs_eline_odd, priv->io_base
+ + _REG(ENCI_DVI_VSO_ELINE_ODD));
+
+ writel_relaxed(vso_begin_evn, priv->io_base
+ + _REG(ENCI_DVI_VSO_END_ODD));
+ } else {
+ vs_eline_evn = vs_bline_evn + vsync_lines;
+
+ writel_relaxed(vs_eline_evn, priv->io_base
+ + _REG(ENCI_DVI_VSO_ELINE_EVN));
+
+ writel_relaxed(vso_begin_evn, priv->io_base
+ + _REG(ENCI_DVI_VSO_END_EVN));
+ }
+ }
+ } else {
+ writel_relaxed(vmode->encp.dvi_settings,
+ priv->io_base + _REG(VENC_DVI_SETTING));
+ writel_relaxed(vmode->encp.video_mode,
+ priv->io_base + _REG(ENCP_VIDEO_MODE));
+ writel_relaxed(vmode->encp.video_mode_adv,
+ priv->io_base + _REG(ENCP_VIDEO_MODE_ADV));
+ if (vmode->encp.video_prog_mode_present)
+ writel_relaxed(vmode->encp.video_prog_mode,
+ priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
+ if (vmode->encp.video_sync_mode_present)
+ writel_relaxed(vmode->encp.video_sync_mode,
+ priv->io_base + _REG(ENCP_VIDEO_SYNC_MODE));
+ if (vmode->encp.video_yc_dly_present)
+ writel_relaxed(vmode->encp.video_yc_dly,
+ priv->io_base + _REG(ENCP_VIDEO_YC_DLY));
+ if (vmode->encp.video_rgb_ctrl_present)
+ writel_relaxed(vmode->encp.video_rgb_ctrl,
+ priv->io_base + _REG(ENCP_VIDEO_RGB_CTRL));
+ if (vmode->encp.video_filt_ctrl_present)
+ writel_relaxed(vmode->encp.video_filt_ctrl,
+ priv->io_base + _REG(ENCP_VIDEO_FILT_CTRL));
+ if (vmode->encp.video_ofld_voav_ofst_present)
+ writel_relaxed(vmode->encp.video_ofld_voav_ofst,
+ priv->io_base
+ + _REG(ENCP_VIDEO_OFLD_VOAV_OFST));
+ writel_relaxed(vmode->encp.yfp1_htime,
+ priv->io_base + _REG(ENCP_VIDEO_YFP1_HTIME));
+ writel_relaxed(vmode->encp.yfp2_htime,
+ priv->io_base + _REG(ENCP_VIDEO_YFP2_HTIME));
+ writel_relaxed(vmode->encp.max_pxcnt,
+ priv->io_base + _REG(ENCP_VIDEO_MAX_PXCNT));
+ writel_relaxed(vmode->encp.hspuls_begin,
+ priv->io_base + _REG(ENCP_VIDEO_HSPULS_BEGIN));
+ writel_relaxed(vmode->encp.hspuls_end,
+ priv->io_base + _REG(ENCP_VIDEO_HSPULS_END));
+ writel_relaxed(vmode->encp.hspuls_switch,
+ priv->io_base + _REG(ENCP_VIDEO_HSPULS_SWITCH));
+ writel_relaxed(vmode->encp.vspuls_begin,
+ priv->io_base + _REG(ENCP_VIDEO_VSPULS_BEGIN));
+ writel_relaxed(vmode->encp.vspuls_end,
+ priv->io_base + _REG(ENCP_VIDEO_VSPULS_END));
+ writel_relaxed(vmode->encp.vspuls_bline,
+ priv->io_base + _REG(ENCP_VIDEO_VSPULS_BLINE));
+ writel_relaxed(vmode->encp.vspuls_eline,
+ priv->io_base + _REG(ENCP_VIDEO_VSPULS_ELINE));
+ if (vmode->encp.eqpuls_begin_present)
+ writel_relaxed(vmode->encp.eqpuls_begin,
+ priv->io_base + _REG(ENCP_VIDEO_EQPULS_BEGIN));
+ if (vmode->encp.eqpuls_end_present)
+ writel_relaxed(vmode->encp.eqpuls_end,
+ priv->io_base + _REG(ENCP_VIDEO_EQPULS_END));
+ if (vmode->encp.eqpuls_bline_present)
+ writel_relaxed(vmode->encp.eqpuls_bline,
+ priv->io_base + _REG(ENCP_VIDEO_EQPULS_BLINE));
+ if (vmode->encp.eqpuls_eline_present)
+ writel_relaxed(vmode->encp.eqpuls_eline,
+ priv->io_base + _REG(ENCP_VIDEO_EQPULS_ELINE));
+ writel_relaxed(vmode->encp.havon_begin,
+ priv->io_base + _REG(ENCP_VIDEO_HAVON_BEGIN));
+ writel_relaxed(vmode->encp.havon_end,
+ priv->io_base + _REG(ENCP_VIDEO_HAVON_END));
+ writel_relaxed(vmode->encp.vavon_bline,
+ priv->io_base + _REG(ENCP_VIDEO_VAVON_BLINE));
+ writel_relaxed(vmode->encp.vavon_eline,
+ priv->io_base + _REG(ENCP_VIDEO_VAVON_ELINE));
+ writel_relaxed(vmode->encp.hso_begin,
+ priv->io_base + _REG(ENCP_VIDEO_HSO_BEGIN));
+ writel_relaxed(vmode->encp.hso_end,
+ priv->io_base + _REG(ENCP_VIDEO_HSO_END));
+ writel_relaxed(vmode->encp.vso_begin,
+ priv->io_base + _REG(ENCP_VIDEO_VSO_BEGIN));
+ writel_relaxed(vmode->encp.vso_end,
+ priv->io_base + _REG(ENCP_VIDEO_VSO_END));
+ writel_relaxed(vmode->encp.vso_bline,
+ priv->io_base + _REG(ENCP_VIDEO_VSO_BLINE));
+ if (vmode->encp.vso_eline_present)
+ writel_relaxed(vmode->encp.vso_eline,
+ priv->io_base + _REG(ENCP_VIDEO_VSO_ELINE));
+ if (vmode->encp.sy_val_present)
+ writel_relaxed(vmode->encp.sy_val,
+ priv->io_base + _REG(ENCP_VIDEO_SY_VAL));
+ if (vmode->encp.sy2_val_present)
+ writel_relaxed(vmode->encp.sy2_val,
+ priv->io_base + _REG(ENCP_VIDEO_SY2_VAL));
+ writel_relaxed(vmode->encp.max_lncnt,
+ priv->io_base + _REG(ENCP_VIDEO_MAX_LNCNT));
+
+ writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
+
+ /* Set DE signal’s polarity is active high */
+ writel_bits_relaxed(BIT(14), BIT(14),
+ priv->io_base + _REG(ENCP_VIDEO_MODE));
+
+ /* Program DE timing */
+ de_h_begin = modulo(readl_relaxed(priv->io_base +
+ _REG(ENCP_VIDEO_HAVON_BEGIN))
+ + venc_hdmi_latency,
+ total_pixels_venc);
+ de_h_end = modulo(de_h_begin + active_pixels_venc,
+ total_pixels_venc);
+
+ writel_relaxed(de_h_begin,
+ priv->io_base + _REG(ENCP_DE_H_BEGIN));
+ writel_relaxed(de_h_end,
+ priv->io_base + _REG(ENCP_DE_H_END));
+
+ /* Program DE timing for even field */
+ de_v_begin_even = readl_relaxed(priv->io_base
+ + _REG(ENCP_VIDEO_VAVON_BLINE));
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ de_v_end_even = de_v_begin_even +
+ (mode->vdisplay / 2);
+ else
+ de_v_end_even = de_v_begin_even + mode->vdisplay;
+
+ writel_relaxed(de_v_begin_even,
+ priv->io_base + _REG(ENCP_DE_V_BEGIN_EVEN));
+ writel_relaxed(de_v_end_even,
+ priv->io_base + _REG(ENCP_DE_V_END_EVEN));
+
+ /* Program DE timing for odd field if needed */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ unsigned int ofld_voav_ofst =
+ readl_relaxed(priv->io_base +
+ _REG(ENCP_VIDEO_OFLD_VOAV_OFST));
+ de_v_begin_odd = to_signed((ofld_voav_ofst & 0xf0) >> 4)
+ + de_v_begin_even
+ + ((mode->vtotal - 1) / 2);
+ de_v_end_odd = de_v_begin_odd + (mode->vdisplay / 2);
+
+ writel_relaxed(de_v_begin_odd,
+ priv->io_base + _REG(ENCP_DE_V_BEGIN_ODD));
+ writel_relaxed(de_v_end_odd,
+ priv->io_base + _REG(ENCP_DE_V_END_ODD));
+ }
+
+ /* Program Hsync timing */
+ if ((de_h_end + front_porch_venc) >= total_pixels_venc) {
+ hs_begin = de_h_end
+ + front_porch_venc
+ - total_pixels_venc;
+ vs_adjust = 1;
+ } else {
+ hs_begin = de_h_end
+ + front_porch_venc;
+ vs_adjust = 0;
+ }
+
+ hs_end = modulo(hs_begin + hsync_pixels_venc,
+ total_pixels_venc);
+
+ writel_relaxed(hs_begin,
+ priv->io_base + _REG(ENCP_DVI_HSO_BEGIN));
+ writel_relaxed(hs_end,
+ priv->io_base + _REG(ENCP_DVI_HSO_END));
+
+ /* Program Vsync timing for even field */
+ if (de_v_begin_even >=
+ (sof_lines + vsync_lines + (1 - vs_adjust)))
+ vs_bline_evn = de_v_begin_even
+ - sof_lines
+ - vsync_lines
+ - (1 - vs_adjust);
+ else
+ vs_bline_evn = mode->vtotal
+ + de_v_begin_even
+ - sof_lines
+ - vsync_lines
+ - (1 - vs_adjust);
+
+ vs_eline_evn = modulo(vs_bline_evn + vsync_lines,
+ mode->vtotal);
+
+ writel_relaxed(vs_bline_evn,
+ priv->io_base + _REG(ENCP_DVI_VSO_BLINE_EVN));
+ writel_relaxed(vs_eline_evn,
+ priv->io_base + _REG(ENCP_DVI_VSO_ELINE_EVN));
+
+ vso_begin_evn = hs_begin;
+ writel_relaxed(vso_begin_evn,
+ priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_EVN));
+ writel_relaxed(vso_begin_evn,
+ priv->io_base + _REG(ENCP_DVI_VSO_END_EVN));
+
+ /* Program Vsync timing for odd field if needed */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vs_bline_odd = (de_v_begin_odd - 1)
+ - sof_lines
+ - vsync_lines;
+ vs_eline_odd = (de_v_begin_odd - 1)
+ - vsync_lines;
+ vso_begin_odd = modulo(hs_begin
+ + (total_pixels_venc >> 1),
+ total_pixels_venc);
+
+ writel_relaxed(vs_bline_odd,
+ priv->io_base + _REG(ENCP_DVI_VSO_BLINE_ODD));
+ writel_relaxed(vs_eline_odd,
+ priv->io_base + _REG(ENCP_DVI_VSO_ELINE_ODD));
+ writel_relaxed(vso_begin_odd,
+ priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_ODD));
+ writel_relaxed(vso_begin_odd,
+ priv->io_base + _REG(ENCP_DVI_VSO_END_ODD));
+ }
+
+ /* Select ENCP for VIU */
+ meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
+ }
+
+ writel_relaxed((use_enci ? 1 : 2) |
+ (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) |
+ 4 << 5 |
+ (venc_repeat ? 1 << 8 : 0) |
+ (hdmi_repeat ? 1 << 12 : 0),
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ priv->venc.hdmi_repeat = hdmi_repeat;
+ priv->venc.venc_repeat = venc_repeat;
+ priv->venc.hdmi_use_enci = use_enci;
+
+ priv->venc.current_mode = MESON_VENC_MODE_HDMI;
+}
+EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
+
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode)
{
writel_relaxed(mode->analog_sync_adj,
priv->io_base + _REG(ENCI_SYNC_ADJ));
- /* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS);
-
priv->venc.current_mode = mode->mode_tag;
}
MESON_VENC_MODE_NONE = 0,
MESON_VENC_MODE_CVBS_PAL,
MESON_VENC_MODE_CVBS_NTSC,
+ MESON_VENC_MODE_HDMI,
};
struct meson_cvbs_enci_mode {
unsigned int analog_sync_adj;
};
+/* HDMI Clock parameters */
+bool meson_venc_hdmi_supported_vic(int vic);
+bool meson_venc_hdmi_venc_repeat(int vic);
+
/* CVBS Timings and Parameters */
extern struct meson_cvbs_enci_mode meson_cvbs_enci_pal;
extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode);
+void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
+ struct drm_display_mode *mode);
unsigned int meson_venci_get_field(struct meson_drm *priv);
void meson_venc_enable_vsync(struct meson_drm *priv);
#include "meson_venc_cvbs.h"
#include "meson_venc.h"
+#include "meson_vclk.h"
#include "meson_registers.h"
/* HHI VDAC Registers */
{
struct meson_venc_cvbs *meson_venc_cvbs =
encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
int i;
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
if (drm_mode_equal(mode, &meson_mode->mode)) {
- meson_venci_cvbs_mode_set(meson_venc_cvbs->priv,
+ meson_venci_cvbs_mode_set(priv,
meson_mode->enci);
+
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, true);
break;
}
}
static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
{
- struct device_node *ep, *remote;
+ struct device_node *remote;
- /* CVBS VDAC output is on the first port, first endpoint */
- ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0);
- if (!ep)
+ remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
+ if (!remote)
return false;
-
- /* If the endpoint node exists, consider it enabled */
- remote = of_graph_get_remote_port(ep);
- if (remote) {
- of_node_put(ep);
- return true;
- }
-
- of_node_put(ep);
of_node_put(remote);
-
- return false;
+ return true;
}
int meson_venc_cvbs_create(struct meson_drm *priv)
if (!meson_venc_cvbs_connector_is_available(priv)) {
dev_info(drm->dev, "CVBS Output connector not available\n");
- return -ENODEV;
+ return 0;
}
meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
#include "meson_canvas.h"
#include "meson_registers.h"
-/*
+/**
+ * DOC: Video Input Unit
+ *
* VIU Handles the Pixel scanout and the basic Colorspace conversions
* We handle the following features :
+ *
* - OSD1 RGB565/RGB888/xRGB8888 scanout
* - RGB conversion to x/cb/cr
* - Progressive or Interlace buffer scanout
* - HDR OSD matrix for GXL/GXM
*
* What is missing :
+ *
* - BGR888/xBGR8888/BGRx8888/BGRx8888 modes
* - YUV4:2:2 Y0CbY1Cr scanout
* - Conversion to YUV 4:4:4 from 4:2:2 input
#include "meson_vpp.h"
#include "meson_registers.h"
-/*
+/**
+ * DOC: Video Post Processing
+ *
* VPP Handles all the Post Processing after the Scanout from the VIU
* We handle the following post processings :
- * - Postblend : Blends the OSD1 only
+ *
+ * - Postblend, Blends the OSD1 only
* We exclude OSD2, VS1, VS1 and Preblend output
* - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and
* use it only for interlace scanout
* - Intermediate FIFO with default Amlogic values
*
* What is missing :
+ *
* - Preblend for video overlay pre-scaling
* - OSD2 support for cursor framebuffer
* - Video pre-scaling before postblend
/* Mux VIU/VPP to ENCI */
#define MESON_VIU_VPP_MUX_ENCI 0x5
+/* Mux VIU/VPP to ENCP */
+#define MESON_VIU_VPP_MUX_ENCP 0xA
void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux);
* but it's a requirement that we provide the function
*/
static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
int i;
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_pipe.o \
+ mdp/mdp5/mdp5_mixer.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
-
adreno_show(gpu, m);
}
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_bo)
+ if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
}
}
}
-static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
{
- u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
-
if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
/* Clear the error */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
}
if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
{
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
- gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
+ /*
+ * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
+ * before the source is cleared the interrupt will storm.
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
- a5xx_rbbm_err_irq(gpu);
+ a5xx_rbbm_err_irq(gpu, status);
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
-
adreno_show(gpu, m);
}
#endif
.idle = a5xx_idle,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
+#endif
},
.get_timestamp = a5xx_get_timestamp,
};
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#define ANY_ID 0xff
if (gpu) {
int ret;
- mutex_lock(&dev->struct_mutex);
- gpu->funcs->pm_resume(gpu);
- mutex_unlock(&dev->struct_mutex);
- disable_irq(gpu->irq);
-
- ret = gpu->funcs->hw_init(gpu);
+ pm_runtime_get_sync(&pdev->dev);
+ ret = msm_gpu_hw_init(gpu);
+ pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
- } else {
- enable_irq(gpu->irq);
- /* give inactive pm a chance to kick in: */
- msm_gpu_retire(gpu);
}
}
return 0;
}
+/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
+static int adreno_get_legacy_pwrlevels(struct device *dev)
+{
+ struct device_node *child, *node;
+ int ret;
+
+ node = of_find_compatible_node(dev->of_node, NULL,
+ "qcom,gpu-pwrlevels");
+ if (!node) {
+ dev_err(dev, "Could not find the GPU powerlevels\n");
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int val;
+
+ ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
+ if (ret)
+ continue;
+
+ /*
+ * Skip the intentionally bogus clock value found at the bottom
+ * of most legacy frequency tables
+ */
+ if (val != 27000000)
+ dev_pm_opp_add(dev, val, 0);
+ }
+
+ return 0;
+}
+
+static int adreno_get_pwrlevels(struct device *dev,
+ struct adreno_platform_config *config)
+{
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ /* You down with OPP? */
+ if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
+ ret = adreno_get_legacy_pwrlevels(dev);
+ else
+ ret = dev_pm_opp_of_add_table(dev);
+
+ if (ret)
+ return ret;
+
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (!IS_ERR(opp))
+ config->fast_rate = dev_pm_opp_get_freq(opp);
+
+ if (!config->fast_rate) {
+ DRM_DEV_INFO(dev,
+ "Could not find clock rate. Using default\n");
+ /* Pick a suitably safe clock speed for any target */
+ config->fast_rate = 200000000;
+ }
+
+ return 0;
+}
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
- struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
/* find clock rates: */
config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
- if (!config.fast_rate) {
- dev_warn(dev, "could not find clk rates\n");
- /* This is a safe low speed for all devices: */
- config.fast_rate = 200000000;
- config.slow_rate = 27000000;
- }
+ ret = adreno_get_pwrlevels(dev, &config);
+ if (ret)
+ return ret;
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
{}
};
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
},
};
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
return ret;
}
+ /* reset ringbuffer: */
+ gpu->rb->cur = gpu->rb->start;
+
+ /* reset completed fence seqno: */
+ adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
+ adreno_gpu->memptrs->rptr = 0;
+ adreno_gpu->memptrs->wptr = 0;
+
/* Setup REG_CP_RB_CNTL: */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
void adreno_recover(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
int ret;
- gpu->funcs->pm_suspend(gpu);
-
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
-
- /* reset completed fence seqno: */
- adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ // XXX pm-runtime?? we *need* the device to be off after this
+ // so maybe continuing to call ->pm_suspend/resume() is better?
+ gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
- disable_irq(gpu->irq);
- ret = gpu->funcs->hw_init(gpu);
+ ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
- enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
- gpu->funcs->pm_resume(gpu);
-
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
adreno_gpu->rev = config->rev;
gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
gpu->bus_scale_table = config->bus_scale_table;
#endif
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
+ gpu->fast_rate, gpu->bus_freq);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
if (ret)
return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
return 0;
}
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- if (gpu->memptrs_bo) {
- if (gpu->memptrs)
- msm_gem_put_vaddr(gpu->memptrs_bo);
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (adreno_gpu->memptrs_bo) {
+ if (adreno_gpu->memptrs)
+ msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+ if (adreno_gpu->memptrs_iova)
+ msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+ drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+ }
+ release_firmware(adreno_gpu->pm4);
+ release_firmware(adreno_gpu->pfp);
- if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gpu_cleanup(gpu);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(gpu->aspace);
}
- release_firmware(gpu->pm4);
- release_firmware(gpu->pfp);
- msm_gpu_cleanup(&gpu->base);
}
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
+ uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
}
/* Get panel node from the output port's endpoint data */
- device_node = of_graph_get_remote_port_parent(endpoint);
+ device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
dev_dbg(dev, "%s: no valid device\n", __func__);
goto err;
}
}
} else {
- msm_dsi_host_reset_phy(mdsi->host);
+ msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
if (ret)
return ret;
#include <linux/hdmi.h>
#include "hdmi.h"
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
static int nchannels[] = { 2, 4, 6, 8 };
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp4_crtc->event;
if (event) {
- /* if regular vblank case (!file) or if cancel-flip from
- * preclose on file that requested flip, then send the
- * event:
- */
- if (!file || (event->base.file_priv == file)) {
- mdp4_crtc->event = NULL;
- DBG("%s: send event: %p", mdp4_crtc->name, event);
- drm_crtc_send_vblank_event(crtc, event);
- }
+ mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
}
if (mdp4_kms->rpm_enabled)
return 0;
}
-static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
-{
- struct device_node *endpoint, *panel_node;
- struct device_node *np = dev->dev->of_node;
-
- /*
- * LVDS/LCDC is the first port described in the list of ports in the
- * MDP4 DT node.
- */
- endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
- if (!endpoint) {
- DBG("no LVDS remote endpoint\n");
- return NULL;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- if (!panel_node) {
- DBG("no valid panel node in LVDS endpoint\n");
- of_node_put(endpoint);
- return NULL;
- }
-
- of_node_put(endpoint);
-
- return panel_node;
-}
static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
int intf_type)
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
- panel_node = mdp4_detect_lcdc_panel(dev);
+ panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
if (!panel_node)
return 0;
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
.nb_stages = 5,
},
.dspp = {
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
.lm = {
.count = 6,
.base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
MDP5_SUB_BLOCK_DEFINITION;
};
+struct mdp5_lm_instance {
+ int id;
+ int pp;
+ int dspp;
+ uint32_t caps;
+};
+
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
+ struct mdp5_lm_instance instances[MAX_BASES];
uint32_t nb_stages; /* number of stages per blender */
uint32_t max_width; /* Maximum output resolution */
uint32_t max_height;
struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
long vsync_clk_speed;
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
dev_err(dev, "vsync_clk is not initialized\n");
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
int ret;
ret = clk_set_rate(mdp5_kms->vsync_clk,
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
clk_disable_unprepare(mdp5_kms->vsync_clk);
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
-
mode = adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
pingpong_tearcheck_setup(encoder, mode);
- mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
- mdp5_cmd_enc->ctl);
+ mdp5_crtc_set_pipeline(encoder->crtc);
}
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
- struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(!mdp5_cmd_enc->enabled))
return;
pingpong_tearcheck_disable(encoder);
- mdp5_ctl_set_encoder_state(ctl, false);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
bs_set(mdp5_cmd_enc, 0);
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
- struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
if (pingpong_tearcheck_enable(encoder))
return;
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
- mdp5_ctl_set_encoder_state(ctl, true);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_cmd_enc->enabled = true;
}
return -EINVAL;
mdp5_kms = get_kms(encoder);
- intf_num = mdp5_cmd_enc->intf.num;
+ intf_num = mdp5_cmd_enc->intf->num;
/* Switch slave encoder's trigger MUX, to use the master's
* start signal for the slave encoder
int id;
bool enabled;
- /* layer mixer used for this CRTC (+ its lock): */
-#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
- int lm;
- spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
-
- /* CTL used for this CRTC: */
- struct mdp5_ctl *ctl;
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct completion pp_completion;
- bool cmd_mode;
-
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
DBG("%s: flush=%08x", crtc->name, flush_mask);
- return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask);
}
/*
*/
static u32 crtc_flush_all(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_hw_mixer *mixer, *r_mixer;
struct drm_plane *plane;
uint32_t flush_mask = 0;
/* this should not happen: */
- if (WARN_ON(!mdp5_crtc->ctl))
+ if (WARN_ON(!mdp5_cstate->ctl))
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
+ mixer = mdp5_cstate->pipeline.mixer;
+ flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
+
+ r_mixer = mdp5_cstate->pipeline.r_mixer;
+ if (r_mixer)
+ flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
return crtc_flush(crtc, flush_mask);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
if (event) {
- /* if regular vblank case (!file) or if cancel-flip from
- * preclose on file that requested flip, then send the
- * event:
- */
- if (!file || (event->base.file_priv == file)) {
- mdp5_crtc->event = NULL;
- DBG("%s: send event: %p", crtc->name, event);
- drm_crtc_send_vblank_event(crtc, event);
- }
+ mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- if (mdp5_crtc->ctl && !crtc->state->enable) {
+ if (ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
- mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
- mdp5_crtc->ctl = NULL;
+ mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
+ /* XXX: What to do here? */
+ /* mdp5_crtc->ctl = NULL; */
}
}
}
}
+/*
+ * left/right pipe offsets for the stage array used in blend_setup()
+ */
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+
/*
* blend_setup() - blend all the planes of a CRTC
*
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
- uint32_t lm = mdp5_crtc->lm;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ uint32_t lm = mixer->lm;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
+ u32 val;
#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
- if (!mdp5_crtc->ctl)
+ /* XXX: Can this happen now? */
+ if (!ctl)
goto out;
/* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp5_pipe right_pipe;
+
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
- stage[pstate->stage] = mdp5_plane_pipe(plane);
+ stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
+ /*
+ * if we have a right mixer, stage the same pipe as we
+ * have on the left mixer
+ */
+ if (r_mixer)
+ r_stage[pstate->stage][PIPE_LEFT] =
+ mdp5_plane_pipe(plane);
+ /*
+ * if we have a right pipe (i.e, the plane comprises of 2
+ * hwpipes, then stage the right pipe on the right side of both
+ * the layer mixers
+ */
+ right_pipe = mdp5_plane_right_pipe(plane);
+ if (right_pipe) {
+ stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ }
+
plane_cnt++;
}
blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
blender(i)), bg_alpha);
+ if (r_mixer) {
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
+ blender(i)), bg_alpha);
+ }
}
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
-
- mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
+ val | mixer_op_mode);
+ if (r_mixer) {
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
+ val | mixer_op_mode);
+ }
+ mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
+ ctl_blend_flags);
out:
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
+ struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
+ uint32_t lm = mixer->lm;
+ u32 mixer_width, val;
unsigned long flags;
struct drm_display_mode *mode;
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ mixer_width = mode->hdisplay;
+ if (r_mixer)
+ mixer_width /= 2;
+
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
- MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to LEFT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
+
+ if (r_mixer) {
+ u32 r_lm = r_mixer->lm;
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to RIGHT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
+ }
+
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_disable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
DBG("%s", crtc->name);
if (WARN_ON(!mdp5_crtc->enabled))
return;
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
static void mdp5_crtc_enable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
DBG("%s", crtc->name);
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp5_crtc->enabled = true;
}
+int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ bool need_right_mixer)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_interface *intf;
+ bool new_mixer = false;
+
+ new_mixer = !pipeline->mixer;
+
+ if ((need_right_mixer && !pipeline->r_mixer) ||
+ (!need_right_mixer && pipeline->r_mixer))
+ new_mixer = true;
+
+ if (new_mixer) {
+ struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
+ u32 caps;
+ int ret;
+
+ caps = MDP_LM_CAP_DISPLAY;
+ if (need_right_mixer)
+ caps |= MDP_LM_CAP_PAIR;
+
+ ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
+ &pipeline->mixer, need_right_mixer ?
+ &pipeline->r_mixer : NULL);
+ if (ret)
+ return ret;
+
+ mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (old_r_mixer) {
+ mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (!need_right_mixer)
+ pipeline->r_mixer = NULL;
+ }
+ }
+
+ /*
+ * these should have been already set up in the encoder's atomic
+ * check (called by drm_atomic_helper_check_modeset)
+ */
+ intf = pipeline->intf;
+
+ mdp5_cstate->err_irqmask = intf2err(intf->num);
+ mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+ mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
+ mdp5_cstate->cmd_mode = true;
+ } else {
+ mdp5_cstate->pp_done_irqmask = 0;
+ mdp5_cstate->cmd_mode = false;
+ }
+
+ return 0;
+}
+
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
+enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_plane_state *bpstate)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+
+ /*
+ * if we're in source split mode, it's mandatory to have
+ * border out on the base stage
+ */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return STAGE0;
+
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if (!is_fullscreen(new_crtc_state, bpstate))
+ return STAGE0;
+
+ return STAGE_BASE;
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
+ const struct drm_display_mode *mode = &state->adjusted_mode;
bool cursor_plane = false;
- int cnt = 0, base = 0, i;
+ bool need_right_mixer = false;
+ int cnt = 0, i;
+ int ret;
+ enum mdp_mixer_stage_id start;
DBG("%s: check", crtc->name);
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
+ /*
+ * if any plane on this crtc uses 2 hwpipes, then we need
+ * the crtc to have a right hwmixer.
+ */
+ if (pstates[cnt].state->r_hwpipe)
+ need_right_mixer = true;
cnt++;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
cursor_plane = true;
}
- /* assign a stage based on sorted zpos property */
- sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* bail out early if there aren't any planes */
+ if (!cnt)
+ return 0;
- /* if the bottom-most layer is not fullscreen, we need to use
- * it for solid-color:
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /*
+ * we need a right hwmixer if the mode's width is greater than a single
+ * LM's max width
*/
- if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
- base++;
+ if (mode->hdisplay > hw_cfg->lm.max_width)
+ need_right_mixer = true;
+
+ ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
+ if (ret) {
+ dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ return ret;
+ }
+
+ /* assign a stage based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
/* trigger a warning if cursor isn't the highest zorder */
WARN_ON(cursor_plane &&
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+ start = get_start_stage(crtc, state, &pstates[0].state->base);
+
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
- if ((cnt + base) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
+ if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ cnt, start);
return -EINVAL;
}
if (cursor_plane && (i == (cnt - 1)))
pstates[i].state->stage = hw_cfg->lm.nb_stages;
else
- pstates[i].state->stage = STAGE_BASE + i + base;
+ pstates[i].state->stage = start + i;
DBG("%s: assign pipe %s on stage=%d", crtc->name,
pstates[i].plane->name,
pstates[i].state->stage);
struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
unsigned long flags;
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!mdp5_crtc->ctl))
+ /* XXX: Can this happen now ? */
+ if (unlikely(!mdp5_cstate->ctl))
return;
blend_setup(crtc);
* This is safe because no pp_done will happen before SW trigger
* in command mode.
*/
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
request_pp_done_pending(crtc);
mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
+ /* XXX are we leaking out state here? */
+ mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
+ mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
+ mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
+
request_pending(crtc, PENDING_FLIP);
}
uint32_t width, uint32_t height)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
uint64_t cursor_addr;
+ struct mdp5_ctl *ctl;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
return -EINVAL;
}
- if (NULL == mdp5_crtc->ctl)
+ ctl = mdp5_cstate->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ /* don't support LM cursors when we we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
if (!handle) {
if (ret)
return -EINVAL;
- lm = mdp5_crtc->lm;
+ lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
set_cursor:
- ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
+ ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
dev_err(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
{
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
+ /* don't support LM cursors when we we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
/* In case the CRTC is disabled, just drop the cursor update */
if (unlikely(!crtc->state->enable))
return 0;
get_roi(crtc, &roi_w, &roi_h);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
return 0;
}
+static void
+mdp5_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
+
+ if (WARN_ON(!pipeline))
+ return;
+
+ drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
+ pipeline->mixer->name : "(null)");
+
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
+ pipeline->r_mixer->name : "(null)");
+}
+
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (crtc->state) {
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(to_mdp5_crtc_state(crtc->state));
+ }
+
+ mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (mdp5_cstate) {
+ mdp5_cstate->base.crtc = crtc;
+ crtc->state = &mdp5_cstate->base;
+ }
+}
+
+static struct drm_crtc_state *
+mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
+ sizeof(*mdp5_cstate), GFP_KERNEL);
+ if (!mdp5_cstate)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
+
+ return &mdp5_cstate->base;
+}
+
+static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(mdp5_cstate);
+}
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.set_property = drm_atomic_helper_crtc_set_property,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.set_property = drm_atomic_helper_crtc_set_property,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
int ret;
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
+ dev_warn(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
int ret;
/* Should not call this function if crtc is disabled. */
- if (!mdp5_crtc->ctl)
+ if (!ctl)
return;
ret = drm_crtc_vblank_get(crtc);
return;
ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
- ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
+ ((mdp5_ctl_get_commit_status(ctl) &
mdp5_crtc->flushed_mask) == 0),
msecs_to_jiffies(50));
if (ret <= 0)
return mdp5_crtc->vblank.irqmask;
}
-void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int lm = mdp5_crtc_get_lm(crtc);
-
- /* now that we know what irq's we want: */
- mdp5_crtc->err.irqmask = intf2err(intf->num);
- mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
-
- if ((intf->type == INTF_DSI) &&
- (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
- mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
- mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
- mdp5_crtc->cmd_mode = true;
- } else {
- mdp5_crtc->pp_done.irqmask = 0;
- mdp5_crtc->pp_done.irq = NULL;
- mdp5_crtc->cmd_mode = false;
- }
+ /* should this be done elsewhere ? */
mdp_irq_update(&mdp5_kms->base);
- mdp5_crtc->ctl = ctl;
- mdp5_ctl_set_pipeline(ctl, intf, lm);
+ mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
}
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- return mdp5_crtc->ctl;
+ return mdp5_cstate->ctl;
}
-int mdp5_crtc_get_lm(struct drm_crtc *crtc)
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
+ ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
+}
+
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return &mdp5_cstate->pipeline;
}
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp5_crtc_wait_for_pp_done(crtc);
else
mdp5_crtc_wait_for_flush_done(crtc);
crtc = &mdp5_crtc->base;
mdp5_crtc->id = id;
- mdp5_crtc->lm = GET_LM_ID(id);
spin_lock_init(&mdp5_crtc->lm_lock);
spin_lock_init(&mdp5_crtc->cursor.lock);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+ mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
if (cursor_plane)
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
#define CTL_STAT_BUSY 0x1
#define CTL_STAT_BOOKED 0x2
-struct op_mode {
- struct mdp5_interface intf;
-
- bool encoder_enabled;
- uint32_t start_mask;
-};
-
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
- int lm;
/* CTL status bitmask */
u32 status;
- /* Operation Mode Configuration for the Pipeline */
- struct op_mode pipeline;
+ bool encoder_enabled;
+ uint32_t start_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
-static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
unsigned long flags;
+ struct mdp5_interface *intf = pipeline->intf;
u32 ctl_op = 0;
if (!mdp5_cfg_intf_is_virtual(intf->type))
break;
}
+ if (pipeline->r_mixer)
+ ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
+ MDP5_CTL_OP_PACK_3D(1);
+
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
- struct mdp5_interface *intf, int lm)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ struct mdp5_interface *intf = pipeline->intf;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
- if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
- dev_err(mdp5_kms->dev->dev,
- "CTL %d is allocated by INTF %d, but used by INTF %d\n",
- ctl->id, ctl->pipeline.intf.num, intf->num);
- return -EINVAL;
- }
-
- ctl->lm = lm;
-
- memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
-
- ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
- mdp_ctl_flush_mask_encoder(intf);
+ ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
+ mdp_ctl_flush_mask_encoder(intf);
+ if (r_mixer)
+ ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
set_display_intf(mdp5_kms, intf);
- set_ctl_op(ctl, intf);
+ set_ctl_op(ctl, pipeline);
return 0;
}
-static bool start_signal_needed(struct mdp5_ctl *ctl)
+static bool start_signal_needed(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
{
- struct op_mode *pipeline = &ctl->pipeline;
+ struct mdp5_interface *intf = pipeline->intf;
- if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
+ if (!ctl->encoder_enabled || ctl->start_mask != 0)
return false;
- switch (pipeline->intf.type) {
+ switch (intf->type) {
case INTF_WB:
return true;
case INTF_DSI:
- return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
+ return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
default:
return false;
}
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-static void refill_start_mask(struct mdp5_ctl *ctl)
+static void refill_start_mask(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
{
- struct op_mode *pipeline = &ctl->pipeline;
- struct mdp5_interface *intf = &ctl->pipeline.intf;
+ struct mdp5_interface *intf = pipeline->intf;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
- pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+ ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/*
* Writeback encoder needs to program & flush
* address registers for each page flip..
*/
if (intf->type == INTF_WB)
- pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
+ ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
}
/**
* Note:
* This encoder state is needed to trigger START signal (data path kickoff).
*/
-int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ bool enabled)
{
+ struct mdp5_interface *intf = pipeline->intf;
+
if (WARN_ON(!ctl))
return -EINVAL;
- ctl->pipeline.encoder_enabled = enabled;
- DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
+ ctl->encoder_enabled = enabled;
+ DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
- if (start_signal_needed(ctl)) {
+ if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl);
+ refill_start_mask(ctl, pipeline);
}
return 0;
* CTL registers need to be flushed after calling this function
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm = ctl->lm;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+
+ if (unlikely(WARN_ON(!mixer))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ ctl->id);
+ return -EINVAL;
+ }
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
+ if (pipeline->r_mixer) {
+ dev_err(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
- blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags)
+static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ int i;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ for (i = 0; i < ctl_mgr->nlm; i++) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
+ }
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags)
{
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0;
+ u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
int i, start_stage;
+ mdp5_ctl_reset_blend_regs(ctl);
+
if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
start_stage = STAGE0;
blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ if (r_mixer)
+ r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
} else {
start_stage = STAGE_BASE;
}
for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
- blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
- blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+ blend_cfg |=
+ mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
+ blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
+ if (r_mixer) {
+ r_blend_cfg |=
+ mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
+ r_blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
+ }
}
spin_lock_irqsave(&ctl->hw_lock, flags);
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
+ blend_ext_cfg);
+ if (r_mixer) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
+ r_blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
+ r_blend_ext_cfg);
+ }
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
- DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
blend_cfg, blend_ext_cfg);
+ if (r_mixer)
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
+ r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
return 0;
}
}
}
-static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
u32 sw_mask = 0;
/* for some targets, cursor bit is the same as LM bit */
if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
- sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+ sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
return sw_mask;
}
*
* Return H/W flushed bit mask.
*/
-u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
- pipeline->start_mask &= ~flush_mask;
+ ctl->start_mask &= ~flush_mask;
VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
- pipeline->start_mask, ctl->pending_ctl_trigger);
+ ctl->start_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
ctl->pending_ctl_trigger = 0;
}
- flush_mask |= fix_sw_flush(ctl, flush_mask);
+ flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
flush_mask &= ctl_mgr->flush_hw_mask;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
- if (start_signal_needed(ctl)) {
+ if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl);
+ refill_start_mask(ctl, pipeline);
}
return curr_ctl_flush_mask;
found:
ctl = &ctl_mgr->ctls[c];
- ctl->pipeline.intf.num = intf_num;
- ctl->lm = -1;
ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
struct mdp5_interface;
-int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
- int lm);
-int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
+struct mdp5_pipeline;
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
+ bool enabled);
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable);
int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
+#define MAX_PIPE_STAGE 2
+
/*
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
*
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
-u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
ctrl_pol = 0;
/* DSI controller cannot handle active-low sync signals. */
- if (mdp5_encoder->intf.type != INTF_DSI) {
+ if (mdp5_encoder->intf->type != INTF_DSI) {
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
- if (mdp5_encoder->intf.type == INTF_eDP) {
+ if (mdp5_encoder->intf->type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
- mdp5_encoder->ctl);
+ mdp5_crtc_set_pipeline(encoder->crtc);
}
static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
- int lm = mdp5_crtc_get_lm(encoder->crtc);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
- int intfn = mdp5_encoder->intf.num;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf->num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
- mdp5_ctl_set_encoder_state(ctl, false);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
bs_set(mdp5_encoder, 0);
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
- struct mdp5_interface *intf = &mdp5_encoder->intf;
- int intfn = mdp5_encoder->intf.num;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ int intfn = intf->num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
- mdp5_ctl_set_encoder_state(ctl, true);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_encoder->enabled = true;
}
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
mdp5_vid_encoder_enable(encoder);
}
+static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+
+ mdp5_cstate->ctl = ctl;
+ mdp5_cstate->pipeline.intf = intf;
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
+ .atomic_check = mdp5_encoder_atomic_check,
};
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
}
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
return -EINVAL;
mdp5_kms = get_kms(encoder);
- intf_num = mdp5_encoder->intf.num;
+ intf_num = mdp5_encoder->intf->num;
/* Switch slave encoder's TimingGen Sync mode,
* to use the master's enable signal for the slave encoder.
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
/* TODO: Expand this to set writeback modes too */
if (cmd_mode) {
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
goto fail;
}
- memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
+ mdp5_encoder->intf = intf;
spin_lock_init(&mdp5_encoder->intf_lock);
/* Copy state: */
new_state->hwpipe = mdp5_kms->state->hwpipe;
+ new_state->hwmixer = mdp5_kms->state->hwmixer;
if (mdp5_kms->smp)
new_state->smp = mdp5_kms->state->smp;
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
int i;
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++)
+ mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
+
for (i = 0; i < mdp5_kms->num_hwpipes; i++)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
}
}
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
- enum mdp5_intf_type intf_type, int intf_num,
- struct mdp5_ctl *ctl)
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
- struct mdp5_interface intf = {
- .num = intf_num,
- .type = intf_type,
- .mode = MDP5_INTF_MODE_NONE,
- };
- encoder = mdp5_encoder_init(dev, &intf, ctl);
+ encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
return encoder;
return -EINVAL;
}
-static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct mdp5_cfg_hw *hw_cfg =
- mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
- switch (intf_type) {
- case INTF_DISABLED:
- break;
+ switch (intf->type) {
case INTF_eDP:
if (!priv->edp)
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
if (!priv->hdmi)
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
break;
case INTF_DSI:
{
- int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n",
- intf_num);
+ intf->num);
ret = -EINVAL;
break;
}
if (!priv->dsi[dsi_id])
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf_type);
+ dev_err(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
- for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
- ret = modeset_init_intf(mdp5_kms, i);
+ for (i = 0; i < mdp5_kms->num_intfs; i++) {
+ ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
if (ret)
goto fail;
}
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
- num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
+ num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
static void mdp5_destroy(struct platform_device *pdev)
{
struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+ int i;
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
+ for (i = 0; i < mdp5_kms->num_intfs; i++)
+ kfree(mdp5_kms->intfs[i]);
+
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
return 0;
}
+static int hwmixer_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ int i, ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ for (i = 0; i < hw_cfg->lm.count; i++) {
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ if (IS_ERR(mixer)) {
+ ret = PTR_ERR(mixer);
+ dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ i, ret);
+ return ret;
+ }
+
+ mixer->idx = mdp5_kms->num_hwmixers;
+ mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
+ }
+
+ return 0;
+}
+
+static int interface_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ const enum mdp5_intf_type *intf_types;
+ int i;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ intf_types = hw_cfg->intf.connect;
+
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+ struct mdp5_interface *intf;
+
+ if (intf_types[i] == INTF_DISABLED)
+ continue;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf) {
+ dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ return -ENOMEM;
+ }
+
+ intf->num = i;
+ intf->type = intf_types[i];
+ intf->mode = MDP5_INTF_MODE_NONE;
+ intf->idx = mdp5_kms->num_intfs;
+ mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
+ }
+
+ return 0;
+}
+
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
if (ret)
goto fail;
+ ret = hwmixer_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = interface_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
#include "mdp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
-#include "mdp5_ctl.h"
#include "mdp5_pipe.h"
+#include "mdp5_mixer.h"
+#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_state;
unsigned num_hwpipes;
struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+ unsigned num_hwmixers;
+ struct mdp5_hw_mixer *hwmixers[8];
+
+ unsigned num_intfs;
+ struct mdp5_interface *intfs[5];
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
*/
struct mdp5_state {
struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_hw_mixer_state hwmixer;
struct mdp5_smp_state smp;
};
struct drm_plane_state base;
struct mdp5_hw_pipe *hwpipe;
+ struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
/* aligned with property */
uint8_t premultiplied;
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
+struct mdp5_pipeline {
+ struct mdp5_interface *intf;
+ struct mdp5_hw_mixer *mixer;
+ struct mdp5_hw_mixer *r_mixer; /* right mixer */
+};
+
+struct mdp5_crtc_state {
+ struct drm_crtc_state base;
+
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline pipeline;
+
+ /* these are derivatives of intf/mixer state in mdp5_pipeline */
+ u32 vblank_irqmask;
+ u32 err_irqmask;
+ u32 pp_done_irqmask;
+
+ bool cmd_mode;
+};
+#define to_mdp5_crtc_state(x) \
+ container_of(x, struct mdp5_crtc_state, base)
+
enum mdp5_intf_mode {
MDP5_INTF_MODE_NONE = 0,
};
struct mdp5_interface {
+ int idx;
int num; /* display interface number */
enum mdp5_intf_type type;
enum mdp5_intf_mode mode;
struct mdp5_encoder {
struct drm_encoder base;
- struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
+ struct mdp5_interface *intf;
struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
}
}
-#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
-static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
+static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
+ struct mdp5_interface *intf)
{
/*
* In case of DSI Command Mode, the Ping Pong's read pointer IRQ
if ((intf->type == INTF_DSI) &&
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
- return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
if (intf->type == INTF_WB)
return MDP5_IRQ_WB_2_DONE;
}
}
-static inline uint32_t lm2ppdone(int lm)
+static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
{
- return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
+ return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
int mdp5_disable(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type);
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
-int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane,
--- /dev/null
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+/*
+ * As of now, there are only 2 combinations possible for source split:
+ *
+ * Left | Right
+ * -----|------
+ * LM0 | LM1
+ * LM2 | LM5
+ *
+ */
+static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
+
+static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
+{
+ int i;
+ int pair_lm;
+
+ pair_lm = lm_right_pair[lm];
+ if (pair_lm < 0)
+ return -EINVAL;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
+
+ if (mixer->lm == pair_lm)
+ return mixer->idx;
+ }
+
+ return -1;
+}
+
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+ int i;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwmixer;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
+
+ /*
+ * skip if already in-use by a different CRTC. If there is a
+ * mixer already assigned to this CRTC, it means this call is
+ * a request to get an additional right mixer. Assume that the
+ * existing mixer is the 'left' one, and try to see if we can
+ * get its corresponding 'right' pair.
+ */
+ if (new_state->hwmixer_to_crtc[cur->idx] &&
+ new_state->hwmixer_to_crtc[cur->idx] != crtc)
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ if (r_mixer) {
+ int pair_idx;
+
+ pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
+ if (pair_idx < 0)
+ return -EINVAL;
+
+ if (new_state->hwmixer_to_crtc[pair_idx])
+ continue;
+
+ *r_mixer = mdp5_kms->hwmixers[pair_idx];
+ }
+
+ /*
+ * prefer a pair-able LM over an unpairable one. We can
+ * switch the CRTC from Normal mode to Source Split mode
+ * without requiring a full modeset if we had already
+ * assigned this CRTC a pair-able LM.
+ *
+ * TODO: There will be assignment sequences which would
+ * result in the CRTC requiring a full modeset, even
+ * if we have the LM resources to prevent it. For a platform
+ * with a few displays, we don't run out of pair-able LMs
+ * so easily. For now, ignore the possibility of requiring
+ * a full modeset.
+ */
+ if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
+ *mixer = cur;
+ }
+
+ if (!(*mixer))
+ return -ENOMEM;
+
+ if (r_mixer && !(*r_mixer))
+ return -ENOMEM;
+
+ DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
+
+ new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
+ if (r_mixer) {
+ DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
+ crtc->name);
+ new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
+ }
+
+ return 0;
+}
+
+void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+{
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
+
+ if (!mixer)
+ return;
+
+ if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
+ return;
+
+ DBG("%s: release from crtc %s", mixer->name,
+ new_state->hwmixer_to_crtc[mixer->idx]->name);
+
+ new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+}
+
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
+{
+ kfree(mixer);
+}
+
+static const char * const mixer_names[] = {
+ "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+{
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return ERR_PTR(-ENOMEM);
+
+ mixer->name = mixer_names[lm->id];
+ mixer->lm = lm->id;
+ mixer->caps = lm->caps;
+ mixer->pp = lm->pp;
+ mixer->dspp = lm->dspp;
+ mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
+
+ return mixer;
+}
--- /dev/null
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_LM_H__
+#define __MDP5_LM_H__
+
+/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
+struct mdp5_hw_mixer {
+ int idx;
+
+ const char *name;
+
+ int lm; /* the LM instance # */
+ uint32_t caps;
+ int pp;
+ int dspp;
+
+ uint32_t flush_mask; /* used to commit LM registers */
+};
+
+/* global atomic state of assignment between CRTCs and Layer Mixers: */
+struct mdp5_hw_mixer_state {
+ struct drm_crtc *hwmixer_to_crtc[8];
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer);
+void mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
+
+#endif /* __MDP5_LM_H__ */
hwpipe->caps = caps;
hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
- spin_lock_init(&hwpipe->pipe_lock);
-
return hwpipe;
}
#ifndef __MDP5_PIPE_H__
#define __MDP5_PIPE_H__
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX (SSPP_CURSOR1 + 1)
/* represents a hw pipe, which is dynamically assigned to a plane */
struct mdp5_hw_pipe {
const char *name;
enum mdp5_pipe pipe;
- spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
uint32_t caps;
struct mdp5_plane {
struct drm_plane base;
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+
uint32_t nformats;
uint32_t formats[32];
};
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx);
-static void set_scanout_locked(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
const struct drm_plane_state *state)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ struct mdp5_kms *mdp5_kms = get_kms(state->plane);
drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
pstate->hwpipe->name : "(null)");
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright-hwpipe=%s\n",
+ pstate->r_hwpipe ? pstate->r_hwpipe->name :
+ "(null)");
drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
drm_printf(p, "\talpha=%u\n", pstate->alpha);
struct drm_plane_state *old_state = plane->state;
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
bool new_hwpipe = false;
+ bool need_right_hwpipe = false;
uint32_t max_width, max_height;
+ bool out_of_bounds = false;
uint32_t caps = 0;
struct drm_rect clip;
int min_scale, max_scale;
max_height = config->hw->lm.max_height << 16;
/* Make sure source dimensions are within bounds. */
- if ((state->src_w > max_width) || (state->src_h > max_height)) {
+ if (state->src_h > max_height)
+ out_of_bounds = true;
+
+ if (state->src_w > max_width) {
+ /* If source split is supported, we can go up to 2x
+ * the max LM width, but we'd need to stage another
+ * hwpipe to the right LM. So, the drm_plane would
+ * consist of 2 hwpipes.
+ */
+ if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
+ (state->src_w <= 2 * max_width))
+ need_right_hwpipe = true;
+ else
+ out_of_bounds = true;
+ }
+
+ if (out_of_bounds) {
struct drm_rect src = drm_plane_state_src(state);
DBG("Invalid source size "DRM_RECT_FP_FMT,
DRM_RECT_FP_ARG(&src));
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
new_hwpipe = true;
+ /*
+ * (re)allocte hw pipe if we're either requesting for 2 hw pipes
+ * or we're switching from 2 hw pipes to 1 hw pipe because the
+ * new src_w can be supported by 1 hw pipe itself.
+ */
+ if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
+ (!need_right_hwpipe && mdp5_state->r_hwpipe))
+ new_hwpipe = true;
+
if (mdp5_kms->smp) {
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(state->fb));
* it available for other planes?
*/
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ struct mdp5_hw_pipe *old_right_hwpipe =
+ mdp5_state->r_hwpipe;
+
mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
plane, caps, blkcfg);
if (IS_ERR(mdp5_state->hwpipe)) {
DBG("%s: failed to assign hwpipe!", plane->name);
return PTR_ERR(mdp5_state->hwpipe);
}
+
+ if (need_right_hwpipe) {
+ mdp5_state->r_hwpipe =
+ mdp5_pipe_assign(state->state, plane,
+ caps, blkcfg);
+ if (IS_ERR(mdp5_state->r_hwpipe)) {
+ DBG("%s: failed to assign right hwpipe",
+ plane->name);
+ return PTR_ERR(mdp5_state->r_hwpipe);
+ }
+ } else {
+ /*
+ * set it to NULL so that the driver knows we
+ * don't have a right hwpipe when committing a
+ * new state
+ */
+ mdp5_state->r_hwpipe = NULL;
+ }
+
mdp5_pipe_release(state->state, old_hwpipe);
+ mdp5_pipe_release(state->state, old_right_hwpipe);
}
}
.atomic_update = mdp5_plane_atomic_update,
};
-static void set_scanout_locked(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
+ enum mdp5_pipe pipe,
+ struct drm_framebuffer *fb)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
- enum mdp5_pipe pipe = hwpipe->pipe;
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 3));
-
- plane->fb = fb;
}
/* Note: mdp5_plane->pipe_lock must be locked */
}
}
+struct pixel_ext {
+ int left[COMP_MAX];
+ int right[COMP_MAX];
+ int top[COMP_MAX];
+ int bottom[COMP_MAX];
+};
+
+struct phase_step {
+ u32 x[COMP_MAX];
+ u32 y[COMP_MAX];
+};
+
+static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
+ struct mdp5_hw_pipe *hwpipe,
+ struct drm_framebuffer *fb,
+ struct phase_step *step,
+ struct pixel_ext *pe,
+ u32 scale_config, u32 hdecm, u32 vdecm,
+ bool hflip, bool vflip,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_img_w, u32 src_img_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
+ COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ mdp5_write_pixel_ext(mdp5_kms, pipe, format,
+ src_w, pe->left, pe->right,
+ src_h, pe->top, pe->bottom);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ step->x[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ step->y[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ step->x[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ step->y[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ scale_config);
+ }
+
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
+
+ set_scanout_locked(mdp5_kms, pipe, fb);
+}
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest)
{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *pstate = plane->state;
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = hwpipe->pipe;
+ struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
- bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
- int pe_left[COMP_MAX], pe_right[COMP_MAX];
- int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
+ struct phase_step step = { 0 };
+ struct pixel_ext pe = { 0 };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
+ uint32_t src_img_w, src_img_h;
+ uint32_t src_x_r;
+ int crtc_x_r;
unsigned long flags;
int ret;
src_w = src_w >> 16;
src_h = src_h >> 16;
+ src_img_w = min(fb->width, src_w);
+ src_img_h = min(fb->height, src_h);
+
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+ right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
+ if (right_hwpipe) {
+ /*
+ * if the plane comprises of 2 hw pipes, assume that the width
+ * is split equally across them. The only parameters that varies
+ * between the 2 pipes are src_x and crtc_x
+ */
+ crtc_w /= 2;
+ src_w /= 2;
+ src_img_w /= 2;
+
+ crtc_x_r = crtc_x + crtc_w;
+ src_x_r = src_x + src_w;
+ }
+
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
if (ret)
return ret;
- ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
if (ret)
return ret;
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
- calc_pixel_ext(format, src_w, crtc_w, phasex_step,
- pe_left, pe_right, true);
- calc_pixel_ext(format, src_h, crtc_h, phasey_step,
- pe_top, pe_bottom, false);
+ calc_pixel_ext(format, src_w, crtc_w, step.x,
+ pe.left, pe.right, true);
+ calc_pixel_ext(format, src_h, crtc_h, step.y,
+ pe.top, pe.bottom, false);
}
/* TODO calc hdecm, vdecm */
hflip = !!(rotation & DRM_REFLECT_X);
vflip = !!(rotation & DRM_REFLECT_Y);
- spin_lock_irqsave(&hwpipe->pipe_lock, flags);
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
- MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
- MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
- MDP5_PIPE_SRC_XY_X(src_x) |
- MDP5_PIPE_SRC_XY_Y(src_y));
+ mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x, src_y, src_w, src_h);
+ if (right_hwpipe)
+ mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x_r, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x_r, src_y, src_w, src_h);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
- MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
- MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
- MDP5_PIPE_OUT_XY_X(crtc_x) |
- MDP5_PIPE_OUT_XY_Y(crtc_y));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
- MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
- MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
- MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
- MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
- COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
- MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
- MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
- COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
- MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
- MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
- MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
- MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
- MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
- MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
- (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
- (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
- COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
- MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
-
- /* not using secure mode: */
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
-
- if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
- mdp5_write_pixel_ext(mdp5_kms, pipe, format,
- src_w, pe_left, pe_right,
- src_h, pe_top, pe_bottom);
-
- if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
- phasex_step[COMP_0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
- phasey_step[COMP_0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
- phasex_step[COMP_1_2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
- phasey_step[COMP_1_2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
- MDP5_PIPE_DECIMATION_VERT(vdecm) |
- MDP5_PIPE_DECIMATION_HORZ(hdecm));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
- }
-
- if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
- if (MDP_FORMAT_IS_YUV(format))
- csc_enable(mdp5_kms, pipe,
- mdp_get_default_csc_cfg(CSC_YUV2RGB));
- else
- csc_disable(mdp5_kms, pipe);
- }
-
- set_scanout_locked(plane, fb);
-
- spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
+ plane->fb = fb;
return ret;
}
if (new_plane_state->visible) {
struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
ret = mdp5_plane_mode_set(plane, crtc, fb,
&new_plane_state->src,
ctl = mdp5_crtc_get_ctl(crtc);
- mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane_state) =
src_x, src_y, src_w, src_h, ctx);
}
+/*
+ * Use this func and the one below only after the atomic state has been
+ * successfully swapped
+ */
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
return pstate->hwpipe->pipe;
}
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (!pstate->r_hwpipe)
+ return SSPP_NONE;
+
+ return pstate->r_hwpipe->pipe;
+}
+
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+ u32 mask;
if (WARN_ON(!pstate->hwpipe))
return 0;
- return pstate->hwpipe->flush_mask;
+ mask = pstate->hwpipe->flush_mask;
+
+ if (pstate->r_hwpipe)
+ mask |= pstate->r_hwpipe->flush_mask;
+
+ return mask;
}
/* initialize plane */
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
+ spin_lock_init(&mdp5_plane->pipe_lock);
+
if (type == DRM_PLANE_TYPE_CURSOR)
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_cursor_plane_funcs,
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
+#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
/* MDP pipe capabilities */
#define MDP_PIPE_CAP_HFLIP BIT(0)
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
#define MDP_PIPE_CAP_CURSOR BIT(6)
+/* MDP layer mixer caps */
+#define MDP_LM_CAP_DISPLAY BIT(0)
+#define MDP_LM_CAP_WB BIT(1)
+#define MDP_LM_CAP_PAIR BIT(2)
+
static inline bool pipe_supports_yuv(uint32_t pipe_caps)
{
return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->show(gpu, m);
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
return 0;
struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
+ if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->aspace[idx] = aspace;
+ priv->aspace[priv->num_aspaces] = aspace;
- return idx;
+ return priv->num_aspaces++;
}
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
if (gpu) {
mutex_lock(&ddev->struct_mutex);
+ // XXX what do we do here?
+ //pm_runtime_enable(&pdev->dev);
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
return 0;
}
-static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
DRIVER_ATOMIC |
DRIVER_MODESET,
.open = msm_open,
- .preclose = msm_preclose,
+ .postclose = msm_postclose,
.lastclose = msm_lastclose,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
-void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
size = PAGE_ALIGN(size);
+ /* Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret)
goto fail;
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
*/
struct drm_mm mm;
struct msm_mmu *mmu;
+ struct kref kref;
};
struct msm_gem_vma {
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence)
+ return -EINVAL;
+
+ /* TODO if we get an array-fence due to userspace merging multiple
+ * fences, we need a way to determine if all the backing fences
+ * are from our own context..
+ */
+
+ if (in_fence->context != gpu->fctx->context) {
+ ret = dma_fence_wait(in_fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (ret)
goto out;
- if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
- in_fence = sync_file_get_fence(args->fence_fd);
-
- if (!in_fence) {
- ret = -EINVAL;
- goto out;
- }
-
- /* TODO if we get an array-fence due to userspace merging multiple
- * fences, we need a way to determine if all the backing fences
- * are from our own context..
- */
-
- if (in_fence->context != gpu->fctx->context) {
- ret = dma_fence_wait(in_fence, true);
- if (ret)
- goto out;
- }
-
- }
-
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
#include "msm_gem.h"
#include "msm_mmu.h"
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
drm_mm_remove_node(&vma->node);
vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
}
int
size, IOMMU_READ | IOMMU_WRITE);
}
- return ret;
-}
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
-void
-msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
-{
- drm_mm_takedown(&aspace->mm);
- if (aspace->mmu)
- aspace->mmu->funcs->destroy(aspace->mmu);
- kfree(aspace);
+ return ret;
}
struct msm_gem_address_space *
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
(domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+ kref_init(&aspace->kref);
+
return aspace;
}
{
int i;
- if (gpu->grp_clks[0] && gpu->fast_rate)
- clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
+ if (gpu->core_clk && gpu->fast_rate)
+ clk_set_rate(gpu->core_clk, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
- if (gpu->grp_clks[2])
- clk_set_rate(gpu->grp_clks[2], 19200000);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_prepare(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
{
int i;
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
- if (gpu->grp_clks[0] && gpu->slow_rate)
- clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
+ /*
+ * Set the clock to a deliberately low rate. On older targets the clock
+ * speed had to be non zero to avoid problems. On newer targets this
+ * will be rounded down to zero anyway so it all works out.
+ */
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, 27000000);
- if (gpu->grp_clks[2])
- clk_set_rate(gpu->grp_clks[2], 0);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
return 0;
}
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (gpu->active_cnt++ > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt <= 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
if (ret)
return ret;
+ gpu->needs_hw_init = true;
+
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (--gpu->active_cnt > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt < 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = disable_axi(gpu);
if (ret)
return 0;
}
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
- struct drm_device *dev = gpu->dev;
-
- if (gpu->inactive)
- return;
-
- DBG("%s: inactive!\n", gpu->name);
- mutex_lock(&dev->struct_mutex);
- if (!(msm_gpu_active(gpu) || gpu->inactive)) {
- disable_axi(gpu);
- disable_clk(gpu);
- gpu->inactive = true;
- }
- mutex_unlock(&dev->struct_mutex);
-}
+ int ret;
-static void inactive_handler(unsigned long data)
-{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ if (!gpu->needs_hw_init)
+ return 0;
- queue_work(priv->wq, &gpu->inactive_work);
-}
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- del_timer(&gpu->inactive_timer);
- if (gpu->inactive) {
- enable_clk(gpu);
- enable_axi(gpu);
- gpu->inactive = false;
- }
-}
-
-static void inactive_start(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- mod_timer(&gpu->inactive_timer,
- round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+ return ret;
}
/*
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
/* replay the remaining submits after the one that hung: */
list_for_each_entry(submit, &gpu->submit_list, node) {
{
unsigned long flags;
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
drm_gem_object_unreference(&msm_obj->base);
}
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_free(submit);
}
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
-
- if (!msm_gpu_active(gpu))
- inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz)
{
struct iommu_domain *iommu;
- int i, ret;
+ int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->inactive = true;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
- INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
INIT_LIST_HEAD(&gpu->submit_list);
- setup_timer(&gpu->inactive_timer, inactive_handler,
- (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
gpu->mmio = msm_ioremap(pdev, ioname, name);
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
goto fail;
}
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
bs_init(gpu);
return 0;
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace)
- msm_gem_address_space_destroy(gpu->aspace);
-
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
}
struct msm_gpu {
const char *name;
struct drm_device *dev;
+ struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
/* fencing: */
struct msm_fence_context *fctx;
- /* is gpu powered/active? */
- int active_cnt;
- bool inactive;
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ int nr_clocks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
- struct timer_list inactive_timer;
- struct work_struct inactive_work;
+
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int ret;
+
+ pm_runtime_get_sync(mmu->dev);
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ pm_runtime_put_sync(mmu->dev);
+
+ return ret;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
+ pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned long da = iova;
- unsigned int i, j;
- int ret;
-
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- dma_addr_t pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
+ size_t ret;
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
+// pm_runtime_get_sync(mmu->dev);
+ ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+// pm_runtime_put_sync(mmu->dev);
+ WARN_ON(ret < 0);
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned long da = iova;
- int i;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
-
- VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
-
- BUG_ON(!PAGE_ALIGNED(bytes));
- da += bytes;
- }
+ pm_runtime_get_sync(mmu->dev);
+ iommu_unmap(iommu->domain, iova, len);
+ pm_runtime_put_sync(mmu->dev);
return 0;
}
}
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
break;
}
}
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_simple_kms_helper.h>
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int mxsfb_attach_endpoint(struct drm_device *drm,
- const struct of_endpoint *ep)
+int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct device_node *np;
struct drm_panel *panel;
- int ret = -EPROBE_DEFER;
-
- np = of_graph_get_remote_port_parent(ep->local_node);
- panel = of_drm_find_panel(np);
- of_node_put(np);
+ int ret;
- if (!panel)
- return -EPROBE_DEFER;
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ if (ret)
+ return ret;
mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
mxsfb->connector.polled = 0;
return ret;
}
-
-int mxsfb_create_output(struct drm_device *drm)
-{
- struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct device_node *ep_np = NULL;
- struct of_endpoint ep;
- int ret;
-
- for_each_endpoint_of_node(drm->dev->of_node, ep_np) {
- ret = of_graph_parse_endpoint(ep_np, &ep);
- if (!ret)
- ret = mxsfb_attach_endpoint(drm, &ep);
-
- if (ret) {
- of_node_put(ep_np);
- return ret;
- }
- }
-
- if (!mxsfb->panel)
- return -EPROBE_DEFER;
-
- return 0;
-}
static int
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int i;
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
struct nvkm_vma *
}
}
-static int
-nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- WARN_ON(1);
- return 0;
-}
-
static const struct drm_crtc_helper_funcs
nv50_head_help = {
- .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
.load_lut = nv50_head_lut_load,
.atomic_check = nv50_head_atomic_check,
};
static int
nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 i;
if (!dpi)
return -ENOMEM;
- ep = omapdss_of_get_next_endpoint(port, NULL);
+ ep = of_get_next_child(port, NULL);
if (!ep)
return 0;
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/component.h>
struct device_node *ep;
struct omap_dsi_pin_config pin_cfg;
- ep = omapdss_of_get_first_endpoint(node);
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/seq_file.h>
#include "omapdss.h"
-struct device_node *
-omapdss_of_get_next_port(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *port = NULL;
-
- if (!parent)
- return NULL;
-
- if (!prev) {
- struct device_node *ports;
- /*
- * It's the first call, we have to find a port subnode
- * within this node or within an optional 'ports' node.
- */
- ports = of_get_child_by_name(parent, "ports");
- if (ports)
- parent = ports;
-
- port = of_get_child_by_name(parent, "port");
-
- /* release the 'ports' node */
- of_node_put(ports);
- } else {
- struct device_node *ports;
-
- ports = of_get_parent(prev);
- if (!ports)
- return NULL;
-
- do {
- port = of_get_next_child(ports, prev);
- if (!port) {
- of_node_put(ports);
- return NULL;
- }
- prev = port;
- } while (of_node_cmp(port->name, "port") != 0);
-
- of_node_put(ports);
- }
-
- return port;
-}
-EXPORT_SYMBOL_GPL(omapdss_of_get_next_port);
-
-struct device_node *
-omapdss_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *ep = NULL;
-
- if (!parent)
- return NULL;
-
- do {
- ep = of_get_next_child(parent, prev);
- if (!ep)
- return NULL;
- prev = ep;
- } while (of_node_cmp(ep->name, "endpoint") != 0);
-
- return ep;
-}
-EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint);
-
struct device_node *dss_of_port_get_parent_device(struct device_node *port)
{
struct device_node *np;
}
EXPORT_SYMBOL_GPL(dss_of_port_get_port_number);
-static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
-{
- struct device_node *np;
-
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np)
- return NULL;
-
- np = of_get_next_parent(np);
-
- return np;
-}
-
-struct device_node *
-omapdss_of_get_first_endpoint(const struct device_node *parent)
-{
- struct device_node *port, *ep;
-
- port = omapdss_of_get_next_port(parent, NULL);
-
- if (!port)
- return NULL;
-
- ep = omapdss_of_get_next_endpoint(port, NULL);
-
- of_node_put(port);
-
- return ep;
-}
-EXPORT_SYMBOL_GPL(omapdss_of_get_first_endpoint);
-
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node)
{
struct device_node *src_port;
struct omap_dss_device *src;
- ep = omapdss_of_get_first_endpoint(node);
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return ERR_PTR(-EINVAL);
- src_port = omapdss_of_get_remote_port(ep);
+ src_port = of_graph_get_remote_port(ep);
if (!src_port) {
of_node_put(ep);
return ERR_PTR(-EINVAL);
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
#include <linux/component.h>
{
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int r;
-
- if (parent == NULL)
- return 0;
+ int i;
- port = omapdss_of_get_next_port(parent, NULL);
- if (!port)
- return 0;
-
- if (dss.feat->num_ports == 0)
- return 0;
-
- do {
- enum omap_display_type port_type;
- u32 reg;
-
- r = of_property_read_u32(port, "reg", ®);
- if (r)
- reg = 0;
-
- if (reg >= dss.feat->num_ports)
+ for (i = 0; i < dss.feat->num_ports; i++) {
+ port = of_graph_get_port_by_id(parent, i);
+ if (!port)
continue;
- port_type = dss.feat->ports[reg];
-
- switch (port_type) {
+ switch (dss.feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
dpi_init_port(pdev, port);
break;
default:
break;
}
- } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
+ }
return 0;
}
{
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
+ int i;
- if (parent == NULL)
- return;
-
- port = omapdss_of_get_next_port(parent, NULL);
- if (!port)
- return;
-
- if (dss.feat->num_ports == 0)
- return;
-
- do {
- enum omap_display_type port_type;
- u32 reg;
- int r;
-
- r = of_property_read_u32(port, "reg", ®);
- if (r)
- reg = 0;
-
- if (reg >= dss.feat->num_ports)
+ for (i = 0; i < dss.feat->num_ports; i++) {
+ port = of_graph_get_port_by_id(parent, i);
+ if (!port)
continue;
- port_type = dss.feat->ports[reg];
-
- switch (port_type) {
+ switch (dss.feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
dpi_uninit_port(port);
break;
default:
break;
}
- } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
+ }
}
static int dss_video_pll_probe(struct platform_device *pdev)
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
#include "omapdss.h"
struct device_node *ep;
int r;
- ep = omapdss_of_get_first_endpoint(node);
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
#include "omapdss.h"
struct device_node *ep;
int r;
- ep = omapdss_of_get_first_endpoint(node);
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct device_node *
-omapdss_of_get_next_port(const struct device_node *parent,
- struct device_node *prev);
-
-struct device_node *
-omapdss_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev);
-
-struct device_node *
-omapdss_of_get_first_endpoint(const struct device_node *parent);
-
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
u32 datapairs;
int r;
- ep = omapdss_of_get_next_endpoint(port, NULL);
+ ep = of_get_next_child(port, NULL);
if (!ep)
return 0;
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/component.h>
#include "omapdss.h"
u32 channels;
int r;
- ep = omapdss_of_get_first_endpoint(node);
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
return 0;
.release = omap_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
- .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
- .kmap = omap_gem_dmabuf_kmap,
- .kunmap = omap_gem_dmabuf_kunmap,
+ .map_atomic = omap_gem_dmabuf_kmap_atomic,
+ .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .map = omap_gem_dmabuf_kmap,
+ .unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
};
WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
Xperia Z2 tablets
+config DRM_PANEL_SAMSUNG_S6E3HA2
+ tristate "Samsung S6E3HA2 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
(540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
+config DRM_PANEL_SITRONIX_ST7789V
+ tristate "Sitronix ST7789V panel"
+ depends on OF && SPI
+ help
+ Say Y here if you want to enable support for the Sitronix
+ ST7789V controller for 240x320 LCD panels
+
endmenu
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
--- /dev/null
+/*
+ * MIPI-DSI based s6e3ha2 AMOLED 5.7 inch panel driver.
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Donghwa Lee <dh09.lee@samsung.com>
+ * Hyungwon Hwang <human.hwang@samsung.com>
+ * Hoegeun Kwon <hoegeun.kwon@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#define S6E3HA2_MIN_BRIGHTNESS 0
+#define S6E3HA2_MAX_BRIGHTNESS 100
+#define S6E3HA2_DEFAULT_BRIGHTNESS 80
+
+#define S6E3HA2_NUM_GAMMA_STEPS 46
+#define S6E3HA2_GAMMA_CMD_CNT 35
+#define S6E3HA2_VINT_STATUS_MAX 10
+
+static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x82, 0x83,
+ 0x85, 0x88, 0x8b, 0x8b, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8c,
+ 0x94, 0x84, 0xb1, 0xaf, 0x8e, 0xcf, 0xad, 0xc9, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x84, 0x84,
+ 0x85, 0x87, 0x8b, 0x8a, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8a,
+ 0x93, 0x84, 0xb0, 0xae, 0x8e, 0xc9, 0xa8, 0xc5, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x88, 0x81, 0x84, 0x8a, 0x88, 0x8a,
+ 0x91, 0x84, 0xb1, 0xae, 0x8b, 0xd5, 0xb2, 0xcc, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x87, 0x81, 0x84, 0x8a, 0x87, 0x8a,
+ 0x91, 0x85, 0xae, 0xac, 0x8a, 0xc3, 0xa3, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x85, 0x85,
+ 0x86, 0x85, 0x88, 0x89, 0x84, 0x89, 0x82, 0x84, 0x87, 0x85, 0x8b,
+ 0x91, 0x88, 0xad, 0xab, 0x8a, 0xb7, 0x9b, 0xb6, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x89, 0x8a, 0x84, 0x89, 0x83, 0x83, 0x86, 0x84, 0x8b,
+ 0x90, 0x84, 0xb0, 0xae, 0x8b, 0xce, 0xad, 0xc8, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x89,
+ 0x8f, 0x84, 0xac, 0xaa, 0x89, 0xb1, 0x98, 0xaf, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x88, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8c,
+ 0x91, 0x86, 0xac, 0xaa, 0x89, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x88,
+ 0x8b, 0x82, 0xad, 0xaa, 0x8a, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8a,
+ 0x8e, 0x84, 0xae, 0xac, 0x89, 0xda, 0xb7, 0xd0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x80, 0x83, 0x82, 0x8b,
+ 0x8e, 0x85, 0xac, 0xaa, 0x89, 0xc8, 0xaa, 0xc1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x81, 0x85, 0x81, 0x84, 0x86, 0x84, 0x8c,
+ 0x8c, 0x84, 0xa9, 0xa8, 0x87, 0xa3, 0x92, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x86, 0x83, 0x80, 0x83, 0x81, 0x8c,
+ 0x8d, 0x84, 0xaa, 0xaa, 0x89, 0xce, 0xaf, 0xc5, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
+ 0x8c, 0x84, 0xa8, 0xa8, 0x88, 0xb5, 0x9f, 0xb0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
+ 0x8b, 0x84, 0xab, 0xa8, 0x86, 0xd4, 0xb4, 0xc9, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x84, 0x84, 0x85, 0x8b,
+ 0x8a, 0x83, 0xa6, 0xa5, 0x84, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x85, 0x86, 0x86, 0x82, 0x85, 0x81, 0x82, 0x83, 0x84, 0x8e,
+ 0x8b, 0x83, 0xa4, 0xa3, 0x8a, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8e,
+ 0x8b, 0x83, 0xa4, 0xa2, 0x86, 0xc1, 0xa9, 0xb7, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8d,
+ 0x89, 0x82, 0xa2, 0xa1, 0x84, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x83, 0x83, 0x85, 0x8c,
+ 0x87, 0x7f, 0xa2, 0x9d, 0x88, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xbb, 0x00, 0xc5, 0x00, 0xb4, 0x87, 0x86, 0x86, 0x84, 0x83,
+ 0x86, 0x87, 0x87, 0x87, 0x80, 0x82, 0x7f, 0x86, 0x86, 0x88, 0x8a,
+ 0x84, 0x7e, 0x9d, 0x9c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xbd, 0x00, 0xc7, 0x00, 0xb7, 0x87, 0x85, 0x85, 0x84, 0x83,
+ 0x86, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x83, 0x84, 0x85, 0x8a,
+ 0x85, 0x7e, 0x9c, 0x9b, 0x85, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc0, 0x00, 0xca, 0x00, 0xbb, 0x87, 0x86, 0x85, 0x83, 0x83,
+ 0x85, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x84, 0x85, 0x86, 0x89,
+ 0x83, 0x7d, 0x9c, 0x99, 0x87, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc4, 0x00, 0xcd, 0x00, 0xbe, 0x87, 0x86, 0x85, 0x83, 0x83,
+ 0x86, 0x85, 0x85, 0x87, 0x81, 0x82, 0x80, 0x82, 0x82, 0x83, 0x8a,
+ 0x85, 0x7f, 0x9f, 0x9b, 0x86, 0xb4, 0xa1, 0xac, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc7, 0x00, 0xd0, 0x00, 0xc2, 0x87, 0x85, 0x85, 0x83, 0x82,
+ 0x85, 0x85, 0x85, 0x86, 0x82, 0x83, 0x80, 0x82, 0x82, 0x84, 0x87,
+ 0x86, 0x80, 0x9e, 0x9a, 0x87, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xca, 0x00, 0xd2, 0x00, 0xc5, 0x87, 0x85, 0x84, 0x82, 0x82,
+ 0x84, 0x85, 0x85, 0x86, 0x81, 0x82, 0x7f, 0x82, 0x82, 0x84, 0x88,
+ 0x86, 0x81, 0x9d, 0x98, 0x86, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xce, 0x00, 0xd6, 0x00, 0xca, 0x86, 0x85, 0x84, 0x83, 0x83,
+ 0x85, 0x84, 0x84, 0x85, 0x81, 0x82, 0x80, 0x81, 0x81, 0x82, 0x89,
+ 0x86, 0x81, 0x9c, 0x97, 0x86, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd1, 0x00, 0xd9, 0x00, 0xce, 0x86, 0x84, 0x83, 0x83, 0x82,
+ 0x85, 0x85, 0x85, 0x86, 0x81, 0x83, 0x81, 0x82, 0x82, 0x83, 0x86,
+ 0x83, 0x7f, 0x99, 0x95, 0x86, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd4, 0x00, 0xdb, 0x00, 0xd1, 0x86, 0x85, 0x83, 0x83, 0x82,
+ 0x85, 0x84, 0x84, 0x85, 0x80, 0x83, 0x82, 0x80, 0x80, 0x81, 0x87,
+ 0x84, 0x81, 0x98, 0x93, 0x85, 0xae, 0x9c, 0xa8, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd8, 0x00, 0xde, 0x00, 0xd6, 0x86, 0x84, 0x83, 0x81, 0x81,
+ 0x83, 0x85, 0x85, 0x85, 0x82, 0x83, 0x81, 0x81, 0x81, 0x83, 0x86,
+ 0x84, 0x80, 0x98, 0x91, 0x85, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xdc, 0x00, 0xe2, 0x00, 0xda, 0x85, 0x84, 0x83, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x81, 0x82, 0x82, 0x80, 0x80, 0x81, 0x83,
+ 0x82, 0x7f, 0x99, 0x93, 0x86, 0x94, 0x8b, 0x92, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xdf, 0x00, 0xe5, 0x00, 0xde, 0x85, 0x84, 0x82, 0x82, 0x82,
+ 0x84, 0x83, 0x83, 0x84, 0x81, 0x81, 0x80, 0x83, 0x82, 0x84, 0x82,
+ 0x81, 0x7f, 0x99, 0x92, 0x86, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x83, 0x83, 0x84, 0x80,
+ 0x81, 0x7c, 0x99, 0x92, 0x87, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x85, 0x84, 0x83, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x83, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
+ 0x82, 0x80, 0x91, 0x8d, 0x83, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
+ 0x81, 0x7f, 0x91, 0x8c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x82,
+ 0x82, 0x7f, 0x94, 0x89, 0x84, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x83,
+ 0x82, 0x7f, 0x91, 0x85, 0x81, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x83, 0x82, 0x84, 0x83,
+ 0x82, 0x7f, 0x90, 0x84, 0x81, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x80, 0x80,
+ 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x80, 0x80, 0x81, 0x81,
+ 0x82, 0x83, 0x7e, 0x80, 0x7c, 0xa4, 0x97, 0x9f, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe9, 0x00, 0xec, 0x00, 0xe8, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x83, 0x7f, 0x7f, 0x7f, 0x81, 0x80, 0x82, 0x83,
+ 0x83, 0x84, 0x79, 0x7c, 0x79, 0xb1, 0xa0, 0xaa, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xed, 0x00, 0xf0, 0x00, 0xec, 0x83, 0x83, 0x82, 0x80, 0x80,
+ 0x81, 0x82, 0x82, 0x82, 0x7f, 0x7f, 0x7e, 0x81, 0x81, 0x82, 0x80,
+ 0x81, 0x81, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xf1, 0x00, 0xf4, 0x00, 0xf1, 0x83, 0x82, 0x82, 0x80, 0x80,
+ 0x81, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x7d,
+ 0x7e, 0x7f, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xf6, 0x00, 0xf7, 0x00, 0xf5, 0x82, 0x82, 0x81, 0x80, 0x80,
+ 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x82,
+ 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfa, 0x81, 0x81, 0x81, 0x80, 0x80,
+ 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 }
+};
+
+unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c,
+ 0x1d, 0x1e, 0x1f, 0x20, 0x21
+};
+
+struct s6e3ha2 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+};
+
+static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ return mipi_dsi_dcs_write_buffer(dsi, data, len);
+}
+
+#define s6e3ha2_dcs_write_seq_static(ctx, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = s6e3ha2_dcs_write(ctx, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+} while (0)
+
+#define s6e3ha2_call_write_func(ret, func) do { \
+ ret = (func); \
+ if (ret < 0) \
+ return ret; \
+} while (0)
+
+static int s6e3ha2_test_key_on_f0(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
+ return 0;
+}
+
+static int s6e3ha2_test_key_off_f0(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0xa5, 0xa5);
+ return 0;
+}
+
+static int s6e3ha2_test_key_on_fc(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
+ return 0;
+}
+
+static int s6e3ha2_test_key_off_fc(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0xa5, 0xa5);
+ return 0;
+}
+
+static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf9, 0x09);
+ return 0;
+}
+
+static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62, 0x40,
+ 0x80, 0xc0, 0x28, 0x28, 0x28, 0x28, 0x39, 0xc5);
+ return 0;
+}
+
+static int s6e3ha2_aor_control(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb2, 0x03, 0x10);
+ return 0;
+}
+
+static int s6e3ha2_caps_elvss_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb6, 0x9c, 0x0a);
+ return 0;
+}
+
+static int s6e3ha2_acl_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0x55, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_acl_off_opr(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb5, 0x40);
+ return 0;
+}
+
+static int s6e3ha2_test_global(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x07);
+ return 0;
+}
+
+static int s6e3ha2_test(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb8, 0x19);
+ return 0;
+}
+
+static int s6e3ha2_touch_hsync_on1(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xbd, 0x33, 0x11, 0x02,
+ 0x16, 0x02, 0x16);
+ return 0;
+}
+
+static int s6e3ha2_pentile_control(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xc0, 0x00, 0x00, 0xd8, 0xd8);
+ return 0;
+}
+
+static int s6e3ha2_poc_global(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x20);
+ return 0;
+}
+
+static int s6e3ha2_poc_setting(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x08);
+ return 0;
+}
+
+static int s6e3ha2_pcd_set_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xcc, 0x40, 0x51);
+ return 0;
+}
+
+static int s6e3ha2_err_fg_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xed, 0x44);
+ return 0;
+}
+
+static int s6e3ha2_hbm_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0x53, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_te_start_setting(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb9, 0x10, 0x09, 0xff, 0x00, 0x09);
+ return 0;
+}
+
+static int s6e3ha2_gamma_update(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x03);
+ ndelay(100); /* need for 100ns delay */
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_get_brightness(struct backlight_device *bl_dev)
+{
+ return bl_dev->props.brightness;
+}
+
+static int s6e3ha2_set_vint(struct s6e3ha2 *ctx)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int brightness = bl_dev->props.brightness;
+ unsigned char data[] = { 0xf4, 0x8b,
+ vint_table[brightness * (S6E3HA2_VINT_STATUS_MAX - 1) /
+ S6E3HA2_MAX_BRIGHTNESS] };
+
+ return s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data));
+}
+
+static unsigned int s6e3ha2_get_brightness_index(unsigned int brightness)
+{
+ return (brightness * (S6E3HA2_NUM_GAMMA_STEPS - 1)) /
+ S6E3HA2_MAX_BRIGHTNESS;
+}
+
+static int s6e3ha2_update_gamma(struct s6e3ha2 *ctx, unsigned int brightness)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int index = s6e3ha2_get_brightness_index(brightness);
+ u8 data[S6E3HA2_GAMMA_CMD_CNT + 1] = { 0xca, };
+ int ret;
+
+ memcpy(data + 1, gamma_tbl + index, S6E3HA2_GAMMA_CMD_CNT);
+ s6e3ha2_call_write_func(ret,
+ s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data)));
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
+ bl_dev->props.brightness = brightness;
+
+ return 0;
+}
+
+static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
+{
+ struct s6e3ha2 *ctx = bl_get_data(bl_dev);
+ unsigned int brightness = bl_dev->props.brightness;
+ int ret;
+
+ if (brightness < S6E3HA2_MIN_BRIGHTNESS ||
+ brightness > bl_dev->props.max_brightness) {
+ dev_err(ctx->dev, "Invalid brightness: %u\n", brightness);
+ return -EINVAL;
+ }
+
+ if (bl_dev->props.power > FB_BLANK_NORMAL)
+ return -EPERM;
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_update_gamma(ctx, brightness));
+ s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_set_vint(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ return 0;
+}
+
+static const struct backlight_ops s6e3ha2_bl_ops = {
+ .get_brightness = s6e3ha2_get_brightness,
+ .update_status = s6e3ha2_set_brightness,
+};
+
+static int s6e3ha2_panel_init(struct s6e3ha2 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_exit_sleep_mode(dsi));
+ usleep_range(5000, 6000);
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_single_dsi_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_freq_calibration(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ return 0;
+}
+
+static int s6e3ha2_power_off(struct s6e3ha2 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int s6e3ha2_disable(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_enter_sleep_mode(dsi));
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
+
+ msleep(40);
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+}
+
+static int s6e3ha2_unprepare(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+
+ return s6e3ha2_power_off(ctx);
+}
+
+static int s6e3ha2_power_on(struct s6e3ha2 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ gpiod_set_value(ctx->enable_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value(ctx->enable_gpio, 1);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+
+ return 0;
+}
+static int s6e3ha2_prepare(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ int ret;
+
+ ret = s6e3ha2_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e3ha2_panel_init(ctx);
+ if (ret < 0)
+ goto err;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err:
+ s6e3ha2_power_off(ctx);
+ return ret;
+}
+
+static int s6e3ha2_enable(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /* common setting */
+ s6e3ha2_call_write_func(ret,
+ mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK));
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_touch_hsync_on1(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_pentile_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_poc_global(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_poc_setting(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
+
+ /* pcd setting off for TB */
+ s6e3ha2_call_write_func(ret, s6e3ha2_pcd_set_off(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_err_fg_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_te_start_setting(ctx));
+
+ /* brightness setting */
+ s6e3ha2_call_write_func(ret, s6e3ha2_set_brightness(ctx->bl_dev));
+ s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_caps_elvss_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_acl_off(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_acl_off_opr(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_hbm_off(ctx));
+
+ /* elvss temp compensation */
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_global(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 222372,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 1,
+ .hsync_end = 1440 + 1 + 1,
+ .htotal = 1440 + 1 + 1 + 1,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 1,
+ .vsync_end = 2560 + 1 + 1,
+ .vtotal = 2560 + 1 + 1 + 15,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int s6e3ha2_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 71;
+ connector->display_info.height_mm = 125;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e3ha2_drm_funcs = {
+ .disable = s6e3ha2_disable,
+ .unprepare = s6e3ha2_unprepare,
+ .prepare = s6e3ha2_prepare,
+ .enable = s6e3ha2_enable,
+ .get_modes = s6e3ha2_get_modes,
+};
+
+static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e3ha2 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->enable_gpio)) {
+ dev_err(dev, "cannot get enable-gpios %ld\n",
+ PTR_ERR(ctx->enable_gpio));
+ return PTR_ERR(ctx->enable_gpio);
+ }
+
+ ctx->bl_dev = backlight_device_register("s6e3ha2", dev, ctx,
+ &s6e3ha2_bl_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
+ ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e3ha2_drm_funcs;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto unregister_backlight;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ goto remove_panel;
+
+ return ret;
+
+remove_panel:
+ drm_panel_remove(&ctx->panel);
+
+unregister_backlight:
+ backlight_device_unregister(ctx->bl_dev);
+
+ return ret;
+}
+
+static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id s6e3ha2_of_match[] = {
+ { .compatible = "samsung,s6e3ha2" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e3ha2_of_match);
+
+static struct mipi_dsi_driver s6e3ha2_driver = {
+ .probe = s6e3ha2_probe,
+ .remove = s6e3ha2_remove,
+ .driver = {
+ .name = "panel-samsung-s6e3ha2",
+ .of_match_table = s6e3ha2_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3ha2_driver);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_AUTHOR("Hyungwon Hwang <human.hwang@samsung.com>");
+MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e3ha2 AMOLED Panel Driver");
+MODULE_LICENSE("GPL v2");
panel_simple_disable(&panel->base);
}
+static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
+ .modes = &ire_am_480272h3tmqw_t01h_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
.clock = 33333,
.hdisplay = 800,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode winstar_wf35ltiacd_mode = {
+ .clock = 6410,
+ .hdisplay = 320,
+ .hsync_start = 320 + 20,
+ .hsync_end = 320 + 20 + 30,
+ .htotal = 320 + 20 + 30 + 38,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 3,
+ .vtotal = 240 + 4 + 3 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc winstar_wf35ltiacd = {
+ .modes = &winstar_wf35ltiacd_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 70,
+ .height = 53,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am-480272h3tmqw-t01h",
+ .data = &ire_am_480272h3tmqw_t01h,
+ }, {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ire_am800480r3tmqwa1h,
}, {
}, {
.compatible = "urt,umsh-8596md-20t",
.data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "winstar,wf35ltiacd",
+ .data = &winstar_wf35ltiacd,
}, {
/* sentinel */
}
--- /dev/null
+/*
+ * Copyright (C) 2017 Free Electrons
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4)
+#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0)
+
+#define ST7789V_RAMCTRL_CMD 0xb0
+#define ST7789V_RAMCTRL_RM_RGB BIT(4)
+#define ST7789V_RAMCTRL_DM_RGB BIT(0)
+#define ST7789V_RAMCTRL_MAGIC (3 << 6)
+#define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4)
+
+#define ST7789V_RGBCTRL_CMD 0xb1
+#define ST7789V_RGBCTRL_WO BIT(7)
+#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5)
+#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3)
+#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2)
+#define ST7789V_RGBCTRL_PCLK_HIGH BIT(1)
+#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f)
+#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f)
+
+#define ST7789V_PORCTRL_CMD 0xb2
+#define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4)
+#define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf)
+#define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4)
+#define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf)
+
+#define ST7789V_GCTRL_CMD 0xb7
+#define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4)
+#define ST7789V_GCTRL_VGLS(n) ((n) & 7)
+
+#define ST7789V_VCOMS_CMD 0xbb
+
+#define ST7789V_LCMCTRL_CMD 0xc0
+#define ST7789V_LCMCTRL_XBGR BIT(5)
+#define ST7789V_LCMCTRL_XMX BIT(3)
+#define ST7789V_LCMCTRL_XMH BIT(2)
+
+#define ST7789V_VDVVRHEN_CMD 0xc2
+#define ST7789V_VDVVRHEN_CMDEN BIT(0)
+
+#define ST7789V_VRHS_CMD 0xc3
+
+#define ST7789V_VDVS_CMD 0xc4
+
+#define ST7789V_FRCTRL2_CMD 0xc6
+
+#define ST7789V_PWCTRL1_CMD 0xd0
+#define ST7789V_PWCTRL1_MAGIC 0xa4
+#define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6)
+#define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4)
+#define ST7789V_PWCTRL1_VDS(n) ((n) & 3)
+
+#define ST7789V_PVGAMCTRL_CMD 0xe0
+#define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4)
+#define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4)
+#define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f)
+#define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7)
+#define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4)
+#define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f)
+#define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4)
+
+#define ST7789V_NVGAMCTRL_CMD 0xe1
+#define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4)
+#define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4)
+#define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f)
+#define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7)
+#define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4)
+#define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f)
+#define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4)
+
+#define ST7789V_TEST(val, func) \
+ do { \
+ if ((val = (func))) \
+ return val; \
+ } while (0)
+
+struct st7789v {
+ struct drm_panel panel;
+ struct spi_device *spi;
+ struct gpio_desc *reset;
+ struct backlight_device *backlight;
+ struct regulator *power;
+};
+
+enum st7789v_prefix {
+ ST7789V_COMMAND = 0,
+ ST7789V_DATA = 1,
+};
+
+static inline struct st7789v *panel_to_st7789v(struct drm_panel *panel)
+{
+ return container_of(panel, struct st7789v, panel);
+}
+
+static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
+ u8 data)
+{
+ struct spi_transfer xfer = { };
+ struct spi_message msg;
+ u16 txbuf = ((prefix & 1) << 8) | data;
+
+ spi_message_init(&msg);
+
+ xfer.tx_buf = &txbuf;
+ xfer.bits_per_word = 9;
+ xfer.len = sizeof(txbuf);
+
+ spi_message_add_tail(&xfer, &msg);
+ return spi_sync(ctx->spi, &msg);
+}
+
+static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
+{
+ return st7789v_spi_write(ctx, ST7789V_COMMAND, cmd);
+}
+
+static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
+{
+ return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 7000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
+ .vdisplay = 320,
+ .vsync_start = 320 + 8,
+ .vsync_end = 320 + 8 + 4,
+ .vtotal = 320 + 8 + 4 + 4,
+ .vrefresh = 60,
+};
+
+static int st7789v_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 61;
+ panel->connector->display_info.height_mm = 103;
+
+ return 1;
+}
+
+static int st7789v_prepare(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ret = regulator_enable(ctx->power);
+ if (ret)
+ return ret;
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(30);
+ gpiod_set_value(ctx->reset, 0);
+ msleep(120);
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
+
+ /* We need to wait 120ms after a sleep out command */
+ msleep(120);
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_SET_ADDRESS_MODE));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_SET_PIXEL_FORMAT));
+ ST7789V_TEST(ret, st7789v_write_data(ctx,
+ (MIPI_DCS_PIXEL_FMT_18BIT << 4) |
+ (MIPI_DCS_PIXEL_FMT_18BIT)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PORCTRL_IDLE_BP(3) |
+ ST7789V_PORCTRL_IDLE_FP(3)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx,
+ ST7789V_PORCTRL_PARTIAL_BP(3) |
+ ST7789V_PORCTRL_PARTIAL_FP(3)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_GCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_GCTRL_VGLS(5) |
+ ST7789V_GCTRL_VGHS(3)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VCOMS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0x2b));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_LCMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_LCMCTRL_XMH |
+ ST7789V_LCMCTRL_XMX |
+ ST7789V_LCMCTRL_XBGR));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVVRHEN_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_VDVVRHEN_CMDEN));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VRHS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0x20));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_FRCTRL2_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PWCTRL1_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_MAGIC));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_AVDD(2) |
+ ST7789V_PWCTRL1_AVCL(2) |
+ ST7789V_PWCTRL1_VDS(1)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PVGAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP63(0xd)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP1(0xca)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP2(0xe)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP4(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP6(9)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP13(7)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP20(0x2d)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP27(0xb) |
+ ST7789V_PVGAMCTRL_VP36(3)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP43(0x3d)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_JP1(3) |
+ ST7789V_PVGAMCTRL_VP50(4)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP57(0xa)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP59(0xa)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP61(0x1b)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP62(0x28)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_NVGAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN63(0xd)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN1(0xca)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN2(0xf)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN4(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN6(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN13(7)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN20(0x2e)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN27(0xc) |
+ ST7789V_NVGAMCTRL_VN36(5)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN43(0x40)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_JN1(3) |
+ ST7789V_NVGAMCTRL_VN50(4)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN57(9)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN59(0xb)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
+ ST7789V_RAMCTRL_RM_RGB));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_EPF(3) |
+ ST7789V_RAMCTRL_MAGIC));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
+ ST7789V_RGBCTRL_RCM(2) |
+ ST7789V_RGBCTRL_VSYNC_HIGH |
+ ST7789V_RGBCTRL_HSYNC_HIGH |
+ ST7789V_RGBCTRL_PCLK_HIGH));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
+
+ return 0;
+}
+
+static int st7789v_enable(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+
+ if (ctx->backlight) {
+ ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
+ ctx->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ctx->backlight);
+ }
+
+ return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int st7789v_disable(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
+
+ if (ctx->backlight) {
+ ctx->backlight->props.power = FB_BLANK_POWERDOWN;
+ ctx->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(ctx->backlight);
+ }
+
+ return 0;
+}
+
+static int st7789v_unprepare(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_SLEEP_MODE));
+
+ regulator_disable(ctx->power);
+
+ return 0;
+}
+
+static const struct drm_panel_funcs st7789v_drm_funcs = {
+ .disable = st7789v_disable,
+ .enable = st7789v_enable,
+ .get_modes = st7789v_get_modes,
+ .prepare = st7789v_prepare,
+ .unprepare = st7789v_unprepare,
+};
+
+static int st7789v_probe(struct spi_device *spi)
+{
+ struct device_node *backlight;
+ struct st7789v *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctx);
+ ctx->spi = spi;
+
+ ctx->panel.dev = &spi->dev;
+ ctx->panel.funcs = &st7789v_drm_funcs;
+
+ ctx->power = devm_regulator_get(&spi->dev, "power");
+ if (IS_ERR(ctx->power))
+ return PTR_ERR(ctx->power);
+
+ ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(&spi->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
+ if (backlight) {
+ ctx->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!ctx->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto err_free_backlight;
+
+ return 0;
+
+err_free_backlight:
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return ret;
+}
+
+static int st7789v_remove(struct spi_device *spi)
+{
+ struct st7789v *ctx = spi_get_drvdata(spi);
+
+ drm_panel_detach(&ctx->panel);
+ drm_panel_remove(&ctx->panel);
+
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id st7789v_of_match[] = {
+ { .compatible = "sitronix,st7789v" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, st7789v_of_match);
+
+static struct spi_driver st7789v_driver = {
+ .probe = st7789v_probe,
+ .remove = st7789v_remove,
+ .driver = {
+ .name = "st7789v",
+ .of_match_table = st7789v_of_match,
+ },
+};
+module_spi_driver(st7789v_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Sitronix st7789v LCD Driver");
+MODULE_LICENSE("GPL v2");
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 11) & 0xf) + 4;
} else if (rdev->family == CHIP_RV350 ||
- rdev->family <= CHIP_RV380) {
+ rdev->family == CHIP_RV380) {
/* rv3x0 */
mem_trcd = (temp & 0x7) + 3;
mem_trp = ((temp >> 8) & 0x7) + 3;
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
p->relocs[i].allowed_domains = domain;
}
+ /* Objects shared as dma-bufs cannot be moved to VRAM */
+ if (p->relocs[i].robj->prime_shared_count) {
+ p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
+ if (!p->relocs[i].allowed_domains) {
+ DRM_ERROR("BO associated with dma-buf cannot "
+ "be moved to VRAM\n");
+ return -EINVAL;
+ }
+ }
+
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
}
static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int i;
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
return r;
}
}
+ if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
+ /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
return 0;
}
return 0;
}
+ if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
+
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}
return;
radeon_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
radeon_bo_unreserve(bo);
}
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[0].fpfn < fpfn)
- rbo->placements[0].fpfn = fpfn;
+ if (rbo->placements[i].fpfn < fpfn)
+ rbo->placements[i].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
rockchip_drm_psr_unregister(&dp->encoder);
- return analogix_dp_unbind(dev, master, data);
+ analogix_dp_unbind(dev, master, data);
+ clk_disable_unprepare(dp->pclk);
}
static const struct component_ops rockchip_dp_component_ops = {
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *panel_node, *port, *endpoint;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
+ int ret;
- port = of_graph_get_port_by_id(dev->of_node, 1);
- if (port) {
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_err(dev, "no output endpoint found\n");
- return -EINVAL;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no output node found\n");
- return -EINVAL;
- }
-
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (!panel)
- return -EPROBE_DEFER;
- }
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ if (ret)
+ return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
dp->connected = false;
dp->active = false;
dp->active_port = -1;
+ dp->fw_loaded = false;
INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
goto err_free_connector;
}
- cdn_dp_audio_codec_init(dp, dev);
-
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
struct drm_connector *connector = &dp->connector;
cancel_work_sync(&dp->event_work);
- platform_device_unregister(dp->audio_pdev);
cdn_dp_encoder_disable(encoder);
encoder->funcs->destroy(encoder);
connector->funcs->destroy(connector);
pm_runtime_disable(dev);
- release_firmware(dp->fw);
+ if (dp->fw_loaded)
+ release_firmware(dp->fw);
kfree(dp->edid);
dp->edid = NULL;
}
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
+ cdn_dp_audio_codec_init(dp, dev);
+
return component_add(dev, &cdn_dp_component_ops);
}
{
struct cdn_dp_device *dp = platform_get_drvdata(pdev);
+ platform_device_unregister(dp->audio_pdev);
cdn_dp_suspend(dp->dev);
component_del(&pdev->dev, &cdn_dp_component_ops);
INIT_LIST_HEAD(&private->psr_list);
spin_lock_init(&private->psr_list_lock);
+ ret = rockchip_drm_init_iommu(drm_dev);
+ if (ret)
+ goto err_free;
+
drm_mode_config_init(drm_dev);
rockchip_drm_mode_config_init(drm_dev);
- ret = rockchip_drm_init_iommu(drm_dev);
- if (ret)
- goto err_config_cleanup;
-
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_iommu_cleanup;
+ goto err_mode_config_cleanup;
- /* init kms poll for handling hpd */
- drm_kms_helper_poll_init(drm_dev);
+ ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
+ if (ret)
+ goto err_unbind_all;
+
+ drm_mode_config_reset(drm_dev);
/*
* enable drm irq mode.
*/
drm_dev->irq_enabled = true;
- ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
- if (ret)
- goto err_kms_helper_poll_fini;
-
- drm_mode_config_reset(drm_dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm_dev);
ret = rockchip_drm_fbdev_init(drm_dev);
if (ret)
- goto err_vblank_cleanup;
+ goto err_kms_helper_poll_fini;
ret = drm_dev_register(drm_dev, 0);
if (ret)
return 0;
err_fbdev_fini:
rockchip_drm_fbdev_fini(drm_dev);
-err_vblank_cleanup:
- drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
+ drm_vblank_cleanup(drm_dev);
+err_unbind_all:
component_unbind_all(dev, drm_dev);
-err_iommu_cleanup:
- rockchip_iommu_cleanup(drm_dev);
-err_config_cleanup:
+err_mode_config_cleanup:
drm_mode_config_cleanup(drm_dev);
- drm_dev->dev_private = NULL;
+ rockchip_iommu_cleanup(drm_dev);
err_free:
+ drm_dev->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
drm_dev_unref(drm_dev);
return ret;
}
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ drm_dev_unregister(drm_dev);
+
rockchip_drm_fbdev_fini(drm_dev);
- drm_vblank_cleanup(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
+
+ drm_atomic_helper_shutdown(drm_dev);
+ drm_vblank_cleanup(drm_dev);
component_unbind_all(dev, drm_dev);
- rockchip_iommu_cleanup(drm_dev);
drm_mode_config_cleanup(drm_dev);
+ rockchip_iommu_cleanup(drm_dev);
+
drm_dev->dev_private = NULL;
- drm_dev_unregister(drm_dev);
- drm_dev_unref(drm_dev);
dev_set_drvdata(dev, NULL);
+ drm_dev_unref(drm_dev);
}
static void rockchip_drm_lastclose(struct drm_device *dev)
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
- goto err_put_pm_runtime;
+ return ret;
}
ret = clk_enable(vop->hclk);
return PTR_ERR(vop->dclk);
}
+ ret = pm_runtime_get_sync(vop->dev);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+ }
+
ret = clk_prepare(vop->dclk);
if (ret < 0) {
dev_err(vop->dev, "failed to prepare dclk\n");
- return ret;
+ goto err_put_pm_runtime;
}
/* Enable both the hclk and aclk to setup the vop */
vop->is_enabled = false;
+ pm_runtime_put_sync(vop->dev);
+
return 0;
err_disable_aclk:
clk_disable_unprepare(vop->hclk);
err_unprepare_dclk:
clk_unprepare(vop->dclk);
+err_put_pm_runtime:
+ pm_runtime_put_sync(vop->dev);
return ret;
}
if (!vop->regsbak)
return -ENOMEM;
- ret = vop_initial(vop);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
- return ret;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "cannot find irq for vop\n");
pm_runtime_enable(&pdev->dev);
+ ret = vop_initial(vop);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ goto err_disable_pm_runtime;
+ }
+
return 0;
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+ vop_destroy_crtc(vop);
err_enable_irq:
enable_irq(vop->irq); /* To balance out the disable_irq above */
return ret;
pm_runtime_disable(dev);
vop_destroy_crtc(vop);
+
+ clk_unprepare(vop->aclk);
+ clk_unprepare(vop->hclk);
+ clk_unprepare(vop->dclk);
}
const struct component_ops vop_component_ops = {
#define GAM_GDP_ALPHARANGE_255 BIT(5)
#define GAM_GDP_AGC_FULL_RANGE 0x00808080
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
-#define GAM_GDP_SIZE_MAX 0x7FF
+
+#define GAM_GDP_SIZE_MAX_WIDTH 3840
+#define GAM_GDP_SIZE_MAX_HEIGHT 2160
#define GDP_NODE_NB_BANK 2
#define GDP_NODE_PER_FIELD 2
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
format = sti_gdp_fourcc2format(fb->format->format);
if (format == -1) {
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
-sun4i-drm-y += sun4i_crtc.o
sun4i-drm-y += sun4i_drv.o
sun4i-drm-y += sun4i_framebuffer.o
-sun4i-drm-y += sun4i_layer.o
sun4i-tcon-y += sun4i_tcon.o
sun4i-tcon-y += sun4i_rgb.o
sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_crtc.o
+sun4i-tcon-y += sun4i_layer.o
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o
#include "sun4i_backend.h"
#include "sun4i_drv.h"
-static u32 sunxi_rgb2yuv_coef[12] = {
+static const u32 sunxi_rgb2yuv_coef[12] = {
0x00000107, 0x00000204, 0x00000064, 0x00000108,
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
+#include "sun4i_layer.h"
#include "sun4i_tcon.h"
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_DRIVER("Committing plane changes\n");
- sun4i_backend_commit(drv->backend);
+ sun4i_backend_commit(scrtc->backend);
if (event) {
crtc->state->event = NULL;
static void sun4i_crtc_disable(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
- sun4i_tcon_disable(drv->tcon);
+ sun4i_tcon_disable(scrtc->tcon);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
static void sun4i_crtc_enable(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
- sun4i_tcon_enable(drv->tcon);
+ sun4i_tcon_enable(scrtc->tcon);
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc);
- sun4i_tcon_enable_vblank(drv->tcon, true);
+ sun4i_tcon_enable_vblank(scrtc->tcon, true);
return 0;
}
static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc);
- sun4i_tcon_enable_vblank(drv->tcon, false);
+ sun4i_tcon_enable_vblank(scrtc->tcon, false);
}
static const struct drm_crtc_funcs sun4i_crtc_funcs = {
.disable_vblank = sun4i_crtc_disable_vblank,
};
-struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm)
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
+ struct sun4i_backend *backend,
+ struct sun4i_tcon *tcon)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_crtc *scrtc;
- int ret;
+ struct drm_plane *primary = NULL, *cursor = NULL;
+ int ret, i;
scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
if (!scrtc)
+ return ERR_PTR(-ENOMEM);
+ scrtc->backend = backend;
+ scrtc->tcon = tcon;
+
+ /* Create our layers */
+ scrtc->layers = sun4i_layers_init(drm, scrtc->backend);
+ if (IS_ERR(scrtc->layers)) {
+ dev_err(drm->dev, "Couldn't create the planes\n");
return NULL;
- scrtc->drv = drv;
+ }
+
+ /* find primary and cursor planes for drm_crtc_init_with_planes */
+ for (i = 0; scrtc->layers[i]; i++) {
+ struct sun4i_layer *layer = scrtc->layers[i];
+
+ switch (layer->plane.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ primary = &layer->plane;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ cursor = &layer->plane;
+ break;
+ default:
+ break;
+ }
+ }
ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
- drv->primary,
- NULL,
+ primary,
+ cursor,
&sun4i_crtc_funcs,
NULL);
if (ret) {
dev_err(drm->dev, "Couldn't init DRM CRTC\n");
- return NULL;
+ return ERR_PTR(ret);
}
drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
+ /* Set crtc.port to output port node of the tcon */
+ scrtc->crtc.port = of_graph_get_port_by_id(scrtc->tcon->dev->of_node,
+ 1);
+
+ /* Set possible_crtcs to this crtc for overlay planes */
+ for (i = 0; scrtc->layers[i]; i++) {
+ uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+ struct sun4i_layer *layer = scrtc->layers[i];
+
+ if (layer->plane.type == DRM_PLANE_TYPE_OVERLAY)
+ layer->plane.possible_crtcs = possible_crtcs;
+ }
+
return scrtc;
}
struct drm_crtc crtc;
struct drm_pending_vblank_event *event;
- struct sun4i_drv *drv;
+ struct sun4i_backend *backend;
+ struct sun4i_tcon *tcon;
+ struct sun4i_layer **layers;
};
static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
return container_of(crtc, struct sun4i_crtc, crtc);
}
-struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm);
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
+ struct sun4i_backend *backend,
+ struct sun4i_tcon *tcon);
#endif /* _SUN4I_CRTC_H_ */
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
-#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
-#include "sun4i_layer.h"
+#include "sun4i_tcon.h"
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
}
drm->dev_private = drv;
- drm_vblank_init(drm, 1);
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(drm->dev, "Couldn't claim our memory region\n");
+ goto free_drm;
+ }
+
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto free_mem_region;
+
drm_mode_config_init(drm);
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all pipelines components\n");
- goto free_drm;
- }
-
- /* Create our layers */
- drv->layers = sun4i_layers_init(drm);
- if (IS_ERR(drv->layers)) {
- dev_err(drm->dev, "Couldn't create the planes\n");
- ret = PTR_ERR(drv->layers);
- goto free_drm;
+ goto cleanup_mode_config;
}
- /* Create our CRTC */
- drv->crtc = sun4i_crtc_init(drm);
- if (!drv->crtc) {
- dev_err(drm->dev, "Couldn't create the CRTC\n");
- ret = -EINVAL;
- goto free_drm;
- }
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
if (IS_ERR(drv->fbdev)) {
dev_err(drm->dev, "Couldn't create our framebuffer\n");
ret = PTR_ERR(drv->fbdev);
- goto free_drm;
+ goto cleanup_mode_config;
}
/* Enable connectors polling */
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto finish_poll;
return 0;
+finish_poll:
+ drm_kms_helper_poll_fini(drm);
+ sun4i_framebuffer_free(drm);
+cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+free_mem_region:
+ of_reserved_mem_device_release(dev);
free_drm:
drm_dev_unref(drm);
return ret;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
+ drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
+ of_reserved_mem_device_release(dev);
drm_dev_unref(drm);
}
struct sun4i_drv {
struct sun4i_backend *backend;
- struct sun4i_crtc *crtc;
struct sun4i_tcon *tcon;
- struct drm_plane *primary;
struct drm_fbdev_cma *fbdev;
-
- struct sun4i_layer **layers;
};
#endif /* _SUN4I_DRV_H_ */
struct sun4i_drv *drv = drm->dev_private;
drm_fbdev_cma_fini(drv->fbdev);
- drm_mode_config_cleanup(drm);
}
#include <drm/drmP.h>
#include "sun4i_backend.h"
-#include "sun4i_drv.h"
#include "sun4i_layer.h"
struct sun4i_plane_desc {
struct drm_plane_state *old_state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
- struct sun4i_drv *drv = layer->drv;
- struct sun4i_backend *backend = drv->backend;
+ struct sun4i_backend *backend = layer->backend;
sun4i_backend_layer_enable(backend, layer->id, false);
}
struct drm_plane_state *old_state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
- struct sun4i_drv *drv = layer->drv;
- struct sun4i_backend *backend = drv->backend;
+ struct sun4i_backend *backend = layer->backend;
sun4i_backend_update_layer_coord(backend, layer->id, plane);
sun4i_backend_update_layer_formats(backend, layer->id, plane);
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
+ struct sun4i_backend *backend,
const struct sun4i_plane_desc *plane)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_layer *layer;
int ret;
if (!layer)
return ERR_PTR(-ENOMEM);
- ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
+ /* possible crtcs are set later */
+ ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
plane->formats, plane->nformats,
plane->type, NULL);
drm_plane_helper_add(&layer->plane,
&sun4i_backend_layer_helper_funcs);
- layer->drv = drv;
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- drv->primary = &layer->plane;
+ layer->backend = backend;
return layer;
}
-struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
+ struct sun4i_backend *backend)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_layer **layers;
int i;
- layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes),
- sizeof(**layers), GFP_KERNEL);
+ layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes) + 1,
+ sizeof(*layers), GFP_KERNEL);
if (!layers)
return ERR_PTR(-ENOMEM);
*/
for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
- struct sun4i_layer *layer = layers[i];
+ struct sun4i_layer *layer;
- layer = sun4i_layer_init_one(drm, plane);
+ layer = sun4i_layer_init_one(drm, backend, plane);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
i ? "overlay" : "primary", plane->pipe);
- regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
layer->id = i;
+ layers[i] = layer;
};
return layers;
struct sun4i_layer {
struct drm_plane plane;
struct sun4i_drv *drv;
+ struct sun4i_backend *backend;
int id;
};
return container_of(plane, struct sun4i_layer, plane);
}
-struct sun4i_layer **sun4i_layers_init(struct drm_device *drm);
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
+ struct sun4i_backend *backend);
#endif /* _SUN4I_LAYER_H_ */
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-#include "sun4i_drv.h"
+#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun4i_rgb.h"
struct drm_connector connector;
struct drm_encoder encoder;
- struct sun4i_drv *drv;
+ struct sun4i_tcon *tcon;
};
static inline struct sun4i_rgb *
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
return drm_panel_get_modes(tcon->panel);
}
struct drm_display_mode *mode)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
unsigned long rate = mode->clock * 1000;
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
drm_panel_detach(tcon->panel);
drm_connector_cleanup(connector);
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Enabling RGB output\n");
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Disabling RGB output\n");
struct drm_display_mode *adjusted_mode)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
sun4i_tcon0_mode_set(tcon, mode);
.destroy = sun4i_rgb_enc_destroy,
};
-int sun4i_rgb_init(struct drm_device *drm)
+int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
- struct sun4i_drv *drv = drm->dev_private;
- struct sun4i_tcon *tcon = drv->tcon;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return -ENOMEM;
- rgb->drv = drv;
+ rgb->tcon = tcon;
encoder = &rgb->encoder;
- tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
- bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
- if (IS_ERR(tcon->panel) && IS_ERR(bridge)) {
+ ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
+ &tcon->panel, &bridge);
+ if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
}
}
/* The RGB encoder can only work with the TCON channel 0 */
- rgb->encoder.possible_crtcs = BIT(0);
+ rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_connector_helper_add(&rgb->connector,
&sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector,
}
}
- if (!IS_ERR(bridge)) {
+ if (bridge) {
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
#ifndef _SUN4I_RGB_H_
#define _SUN4I_RGB_H_
-int sun4i_rgb_init(struct drm_device *drm);
+int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon);
#endif /* _SUN4I_RGB_H_ */
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/reset.h>
/*
* This is called a backporch in the register documentation,
- * but it really is the front porch + hsync
+ * but it really is the back porch + hsync
*/
bp = mode->crtc_htotal - mode->crtc_hsync_start;
DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
/*
* This is called a backporch in the register documentation,
- * but it really is the front porch + hsync
+ * but it really is the back porch + hsync
*/
bp = mode->crtc_vtotal - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
{
struct sun4i_tcon *tcon = private;
struct drm_device *drm = tcon->drm;
- struct sun4i_drv *drv = drm->dev_private;
- struct sun4i_crtc *scrtc = drv->crtc;
+ struct sun4i_crtc *scrtc = tcon->crtc;
unsigned int status;
regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
}
}
- return sun4i_dclk_create(dev, tcon);
+ return 0;
}
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
- sun4i_dclk_free(tcon);
clk_disable_unprepare(tcon->clk);
}
return 0;
}
-struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
-{
- struct device_node *port, *remote, *child;
- struct device_node *end_node = NULL;
-
- /* Inputs are listed first, then outputs */
- port = of_graph_get_port_by_id(node, 1);
-
- /*
- * Our first output is the RGB interface where the panel will
- * be connected.
- */
- for_each_child_of_node(port, child) {
- u32 reg;
-
- of_property_read_u32(child, "reg", ®);
- if (reg == 0)
- end_node = child;
- }
-
- if (!end_node) {
- DRM_DEBUG_DRIVER("Missing panel endpoint\n");
- return ERR_PTR(-ENODEV);
- }
-
- remote = of_graph_get_remote_port_parent(end_node);
- if (!remote) {
- DRM_DEBUG_DRIVER("Unable to parse remote node\n");
- return ERR_PTR(-EINVAL);
- }
-
- return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
-}
-
-struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
-{
- struct device_node *port, *remote, *child;
- struct device_node *end_node = NULL;
-
- /* Inputs are listed first, then outputs */
- port = of_graph_get_port_by_id(node, 1);
-
- /*
- * Our first output is the RGB interface where the panel will
- * be connected.
- */
- for_each_child_of_node(port, child) {
- u32 reg;
-
- of_property_read_u32(child, "reg", ®);
- if (reg == 0)
- end_node = child;
- }
-
- if (!end_node) {
- DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
- return ERR_PTR(-ENODEV);
- }
-
- remote = of_graph_get_remote_port_parent(end_node);
- if (!remote) {
- DRM_DEBUG_DRIVER("Enable to parse remote node\n");
- return ERR_PTR(-EINVAL);
- }
-
- return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
-}
-
static int sun4i_tcon_bind(struct device *dev, struct device *master,
void *data)
{
return ret;
}
+ ret = sun4i_tcon_init_clocks(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON clocks\n");
+ goto err_assert_reset;
+ }
+
ret = sun4i_tcon_init_regmap(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON regmap\n");
- goto err_assert_reset;
+ goto err_free_clocks;
}
- ret = sun4i_tcon_init_clocks(dev, tcon);
+ ret = sun4i_dclk_create(dev, tcon);
if (ret) {
- dev_err(dev, "Couldn't init our TCON clocks\n");
- goto err_assert_reset;
+ dev_err(dev, "Couldn't create our TCON dot clock\n");
+ goto err_free_clocks;
}
ret = sun4i_tcon_init_irq(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON interrupts\n");
+ goto err_free_dotclock;
+ }
+
+ tcon->crtc = sun4i_crtc_init(drm, drv->backend, tcon);
+ if (IS_ERR(tcon->crtc)) {
+ dev_err(dev, "Couldn't create our CRTC\n");
+ ret = PTR_ERR(tcon->crtc);
goto err_free_clocks;
}
- ret = sun4i_rgb_init(drm);
+ ret = sun4i_rgb_init(drm, tcon);
if (ret < 0)
goto err_free_clocks;
return 0;
+err_free_dotclock:
+ sun4i_dclk_free(tcon);
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
err_assert_reset:
{
struct sun4i_tcon *tcon = dev_get_drvdata(dev);
+ sun4i_dclk_free(tcon);
sun4i_tcon_free_clocks(tcon);
}
struct device_node *node = pdev->dev.of_node;
struct drm_bridge *bridge;
struct drm_panel *panel;
+ int ret;
- /*
- * Neither the bridge or the panel is ready.
- * Defer the probe.
- */
- panel = sun4i_tcon_find_panel(node);
- bridge = sun4i_tcon_find_bridge(node);
-
- /*
- * If we don't have a panel endpoint, just go on
- */
- if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
- (PTR_ERR(bridge) == -EPROBE_DEFER)) {
- DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
- return -EPROBE_DEFER;
- }
+ ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
/* Platform adjustments */
const struct sun4i_tcon_quirks *quirks;
+
+ /* Associated crtc */
+ struct sun4i_crtc *crtc;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include "sun4i_backend.h"
+#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_tcon.h"
static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
+ struct sun4i_backend *backend = crtc->backend;
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
0);
- sun4i_backend_disable_color_correction(drv->backend);
+ sun4i_backend_disable_color_correction(backend);
}
static void sun4i_tv_enable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
+ struct sun4i_backend *backend = crtc->backend;
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
- sun4i_backend_apply_color_correction(drv->backend);
+ sun4i_backend_apply_color_correction(backend);
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
sun4i_tcon1_mode_set(tcon, mode);
goto err_disable_clk;
}
- tv->encoder.possible_crtcs = BIT(0);
+ tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
+ dev->of_node);
+ if (!tv->encoder.possible_crtcs) {
+ ret = -EPROBE_DEFER;
+ goto err_disable_clk;
+ }
drm_connector_helper_add(&tv->connector,
&sun4i_tv_comp_connector_helper_funcs);
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release,
- .kmap_atomic = tegra_gem_prime_kmap_atomic,
- .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
- .kmap = tegra_gem_prime_kmap,
- .kunmap = tegra_gem_prime_kunmap,
+ .map_atomic = tegra_gem_prime_kmap_atomic,
+ .unmap_atomic = tegra_gem_prime_kunmap_atomic,
+ .map = tegra_gem_prime_kmap,
+ .unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap,
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
+#include <linux/of_graph.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
if (priv->is_componentized) {
- struct device_node *ports =
- of_get_child_by_name(dev->dev->of_node, "ports");
-
- if (ports) {
- crtc->port = of_get_child_by_name(ports, "port");
- of_node_put(ports);
- } else {
- crtc->port =
- of_get_child_by_name(dev->dev->of_node, "port");
- }
+ crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
if (!crtc->port) { /* This should never happen */
dev_err(dev->dev, "Port node not found in %s\n",
dev->dev->of_node->full_name);
return ret;
}
-static int tilcdc_node_has_port(struct device_node *dev_node)
-{
- struct device_node *node;
-
- node = of_get_child_by_name(dev_node, "ports");
- if (!node)
- node = of_get_child_by_name(dev_node, "port");
- if (!node)
- return 0;
- of_node_put(node);
-
- return 1;
-}
-
-static
-struct device_node *tilcdc_get_remote_node(struct device_node *node)
-{
- struct device_node *ep;
- struct device_node *parent;
-
- if (!tilcdc_node_has_port(node))
- return NULL;
-
- ep = of_graph_get_next_endpoint(node, NULL);
- if (!ep)
- return NULL;
-
- parent = of_graph_get_remote_port_parent(ep);
- of_node_put(ep);
-
- return parent;
-}
-
int tilcdc_attach_external_device(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
struct drm_bridge *bridge;
int ret;
- remote_node = tilcdc_get_remote_node(ddev->dev->of_node);
+ remote_node = of_graph_get_remote_node(ddev->dev->of_node, 0, 0);
if (!remote_node)
return 0;
struct component_match **match)
{
struct device_node *node;
- struct device_node *ep = NULL;
- int count = 0;
- int ret = 0;
-
- if (!tilcdc_node_has_port(dev->of_node))
- return 0;
- while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
- node = of_graph_get_remote_port_parent(ep);
- if (!node || !of_device_is_available(node)) {
- of_node_put(node);
- continue;
- }
-
- dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
-
- if (of_device_is_compatible(node, "nxp,tda998x")) {
- if (match)
- drm_of_component_match_add(dev, match,
- dev_match_of, node);
- ret = 1;
- }
+ node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!of_device_is_compatible(node, "nxp,tda998x")) {
of_node_put(node);
- if (count++ > 1) {
- dev_err(dev, "Only one port is supported\n");
- return -EINVAL;
- }
+ return 0;
}
- return ret;
+ if (match)
+ drm_of_component_match_add(dev, match, dev_match_of, node);
+ of_node_put(node);
+ return 1;
}
return ret;
}
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+static bool ttm_bo_places_compat(const struct ttm_place *places,
+ unsigned num_placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
- int i;
-
- for (i = 0; i < placement->num_placement; i++) {
- const struct ttm_place *heap = &placement->placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
+ unsigned i;
- *new_flags = heap->flags;
- if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
- return true;
- }
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
- for (i = 0; i < placement->num_busy_placement; i++) {
- const struct ttm_place *heap = &placement->busy_placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
+ if (mem->mm_node && (mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
+ (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
+ return false;
+}
+
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
+{
+ if (ttm_bo_places_compat(placement->placement, placement->num_placement,
+ mem, new_flags))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_bo_places_compat(placement->busy_placement,
+ placement->num_busy_placement,
+ mem, new_flags))
+ return true;
return false;
}
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+ pfn = bdev->driver->io_mem_pfn(bo, page_offset);
else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
return bo;
}
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ + page_offset;
+}
+EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
ttm_ref_object_release(&ref->kref);
}
+ spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
*p_tdev = NULL;
- spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- spin_unlock(&tdev->object_lock);
kfree(tdev);
}
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
.detach = udl_detach_dma_buf,
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
- .kmap = udl_dmabuf_kmap,
- .kmap_atomic = udl_dmabuf_kmap_atomic,
- .kunmap = udl_dmabuf_kunmap,
- .kunmap_atomic = udl_dmabuf_kunmap_atomic,
+ .map = udl_dmabuf_kmap,
+ .map_atomic = udl_dmabuf_kmap_atomic,
+ .unmap = udl_dmabuf_kunmap,
+ .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/prefetch.h>
+#include <asm/unaligned.h>
#include <drm/drmP.h>
#include "udl_drv.h"
const u8 *const start = pixel;
const uint16_t repeating_pixel_val16 = pixel_val16;
- *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
+ put_unaligned_be16(pixel_val16, cmd);
cmd += 2;
pixel += bpp;
static int
vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 i;
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+static void
+vc4_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = vc4_crtc_destroy,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.gamma_set = vc4_crtc_gamma_set,
*/
static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
{
- struct device_node *endpoint, *panel_node;
+ struct device_node *panel_node;
struct device_node *np = dev->of_node;
struct drm_panel *panel;
- endpoint = of_graph_get_next_endpoint(np, NULL);
- if (!endpoint) {
- dev_err(dev, "no endpoint to fetch DPI panel\n");
- return NULL;
- }
-
/* don't proceed if we have an endpoint but no panel_node tied to it */
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no valid panel node\n");
+ panel_node = of_graph_get_remote_node(np, 0, 0);
+ if (!panel_node)
return NULL;
- }
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtio_gpu_plane.c */
+uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
mode_cmd.pitches[0] = mode_cmd.width * 4;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
- switch (mode_cmd.pixel_format) {
-#ifdef __BIG_ENDIAN
- case DRM_FORMAT_XRGB8888:
- format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
- break;
- case DRM_FORMAT_ARGB8888:
- format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
- break;
- case DRM_FORMAT_BGRX8888:
- format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
- break;
- case DRM_FORMAT_BGRA8888:
- format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
- break;
- case DRM_FORMAT_RGBX8888:
- format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
- break;
- case DRM_FORMAT_RGBA8888:
- format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
- break;
- case DRM_FORMAT_XBGR8888:
- format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
- break;
- case DRM_FORMAT_ABGR8888:
- format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
- break;
-#else
- case DRM_FORMAT_XRGB8888:
- format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
- break;
- case DRM_FORMAT_ARGB8888:
- format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
- break;
- case DRM_FORMAT_BGRX8888:
- format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
- break;
- case DRM_FORMAT_BGRA8888:
- format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
- break;
- case DRM_FORMAT_RGBX8888:
- format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
- break;
- case DRM_FORMAT_RGBA8888:
- format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
- break;
- case DRM_FORMAT_XBGR8888:
- format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
- break;
- case DRM_FORMAT_ABGR8888:
- format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
- break;
-#endif
- default:
- DRM_ERROR("failed to find virtio gpu format for %d\n",
- mode_cmd.pixel_format);
+ format = virtio_gpu_translate_format(mode_cmd.pixel_format);
+ if (format == 0)
return -EINVAL;
- }
size = mode_cmd.pitches[0] * mode_cmd.height;
obj = virtio_gpu_alloc_object(dev, size, false, true);
int ret;
uint32_t pitch;
uint32_t resid;
+ uint32_t format;
pitch = args->width * ((args->bpp + 1) / 8);
args->size = pitch * args->height;
if (ret)
goto fail;
+ format = virtio_gpu_translate_format(DRM_FORMAT_XRGB8888);
virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid,
- 2, args->width, args->height);
+ virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ args->width, args->height);
/* attach the object to the resource */
obj = gem_to_virtio_gpu_obj(gobj);
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
- if (ret != 0)
+ if (ret != 0) {
+ kfree(bo);
return ret;
+ }
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);
};
static const uint32_t virtio_gpu_cursor_formats[] = {
+#ifdef __BIG_ENDIAN
+ DRM_FORMAT_BGRA8888,
+#else
DRM_FORMAT_ARGB8888,
+#endif
};
+uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
+{
+ uint32_t format;
+
+ switch (drm_fourcc) {
+#ifdef __BIG_ENDIAN
+ case DRM_FORMAT_XRGB8888:
+ format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+#else
+ case DRM_FORMAT_XRGB8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+#endif
+ default:
+ /*
+ * This should not happen, we handle everything listed
+ * in virtio_gpu_formats[].
+ */
+ format = 0;
+ break;
+ }
+ WARN_ON(format == 0);
+ return format;
+}
+
static void virtio_gpu_plane_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
return 0;
}
+static int vmwgfx_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *tmp;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_device *dev = set->crtc->dev;
+ int ret;
+
+ ctx = dev->mode_config.acquire_ctx;
+
+restart:
+ /*
+ * NOTE: ->set_config can also disable other crtcs (if we steal all
+ * connectors from it), hence we need to refcount the fbs across all
+ * crtcs. Atomic modeset will have saner semantics ...
+ */
+ drm_for_each_crtc(tmp, dev)
+ tmp->primary->old_fb = tmp->primary->fb;
+
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set, ctx);
+ if (ret == 0) {
+ crtc->primary->crtc = crtc;
+ crtc->primary->fb = fb;
+ }
+
+ drm_for_each_crtc(tmp, dev) {
+ if (tmp->primary->fb)
+ drm_framebuffer_get(tmp->primary->fb);
+ if (tmp->primary->old_fb)
+ drm_framebuffer_put(tmp->primary->old_fb);
+ tmp->primary->old_fb = NULL;
+ }
+
+ if (ret == -EDEADLK) {
+ dev->mode_config.acquire_ctx = NULL;
+
+retry_locking:
+ drm_modeset_backoff(ctx);
+
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret)
+ goto retry_locking;
+
+ dev->mode_config.acquire_ctx = ctx;
+
+ goto restart;
+ }
+
+ return ret;
+}
+
static int vmw_fb_kms_detach(struct vmw_fb_par *par,
bool detach_bo,
bool unref_bo)
set.fb = NULL;
set.num_connectors = 0;
set.connectors = &par->con;
- ret = drm_mode_set_config_internal(&set);
+ ret = vmwgfx_set_config_internal(&set);
if (ret) {
DRM_ERROR("Could not unset a mode.\n");
return ret;
set.num_connectors = 1;
set.connectors = &par->con;
- ret = drm_mode_set_config_internal(&set);
+ ret = vmwgfx_set_config_internal(&set);
if (ret)
goto out_unlock;
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Wait invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Fence signaled invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t size);
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx);
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
- .kmap = vmw_prime_dmabuf_kmap,
- .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
- .kunmap = vmw_prime_dmabuf_kunmap,
- .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .map = vmw_prime_dmabuf_kmap,
+ .map_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .unmap = vmw_prime_dmabuf_kunmap,
+ .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/**
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
- ipu-pre.o ipu-prg.o ipu-smfc.o ipu-vdi.o
+ ipu-smfc.o ipu-vdi.o
+
+ifdef CONFIG_DRM
+ imx-ipu-v3-objs += ipu-pre.o ipu-prg.o
+endif
ipu->id = of_alias_get_id(np, "ipu");
- if (of_device_is_compatible(np, "fsl,imx6qp-ipu")) {
+ if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
+ IS_ENABLED(CONFIG_DRM)) {
ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
"fsl,prg", ipu->id);
if (!ipu->prg_priv)
};
static struct platform_driver * const drivers[] = {
+#if IS_ENABLED(CONFIG_DRM)
&ipu_pre_drv,
&ipu_prg_drv,
+#endif
&imx_ipu_driver,
};
hid->group = HID_GROUP_WACOM;
break;
case USB_VENDOR_ID_SYNAPTICS:
- if (hid->group == HID_GROUP_GENERIC ||
- hid->group == HID_GROUP_MULTITOUCH_WIN_8)
+ if (hid->group == HID_GROUP_GENERIC)
if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
&& (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
/*
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
+#define USB_VENDOR_ID_UGEE 0x28bd
+#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
+
#define USB_VENDOR_ID_UNITEC 0x227d
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
#define USB_VENDOR_ID_XIN_MO 0x16c0
#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
+#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
#define USB_VENDOR_ID_XIROKU 0x1477
#define USB_DEVICE_ID_XIROKU_SPX 0x1006
}
break;
case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
+ case USB_DEVICE_ID_UGEE_TABLET_EX07S:
/* If this is the pen interface */
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
rc = uclogic_tablet_enable(hdev);
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
static const struct hid_device_id xinmo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
{ }
};
wacom_update_name(wacom, wireless ? " (WL)" : "");
+ /* pen only Bamboo neither support touch nor pad */
+ if ((features->type == BAMBOO_PEN) &&
+ ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
+ (features->device_type & WACOM_DEVICETYPE_PAD))) {
+ error = -ENODEV;
+ goto fail;
+ }
+
error = wacom_add_shared_data(hdev);
if (error)
goto fail;
/* touch only Bamboo doesn't support pen */
if ((features->type == BAMBOO_TOUCH) &&
(features->device_type & WACOM_DEVICETYPE_PEN)) {
- error = -ENODEV;
- goto fail_quirks;
- }
-
- /* pen only Bamboo neither support touch nor pad */
- if ((features->type == BAMBOO_PEN) &&
- ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
- (features->device_type & WACOM_DEVICETYPE_PAD))) {
+ cancel_delayed_work_sync(&wacom->init_work);
+ _wacom_query_tablet_data(wacom);
error = -ENODEV;
goto fail_quirks;
}
* warranty of any kind, whether express or implied.
*/
-#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
.has_irq = 1,
.muxtype = pca954x_isswi,
},
+ [pca_9546] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ },
[pca_9547] = {
.nchans = 8,
.enable = 0x8,
{ "pca9543", pca_9543 },
{ "pca9544", pca_9544 },
{ "pca9545", pca_9545 },
- { "pca9546", pca_9545 },
+ { "pca9546", pca_9546 },
{ "pca9547", pca_9547 },
{ "pca9548", pca_9548 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca954x_id);
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id pca954x_acpi_ids[] = {
- { .id = "PCA9540", .driver_data = pca_9540 },
- { .id = "PCA9542", .driver_data = pca_9542 },
- { .id = "PCA9543", .driver_data = pca_9543 },
- { .id = "PCA9544", .driver_data = pca_9544 },
- { .id = "PCA9545", .driver_data = pca_9545 },
- { .id = "PCA9546", .driver_data = pca_9545 },
- { .id = "PCA9547", .driver_data = pca_9547 },
- { .id = "PCA9548", .driver_data = pca_9548 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
-#endif
-
#ifdef CONFIG_OF
static const struct of_device_id pca954x_of_match[] = {
{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
if (match)
data->chip = of_device_get_match_data(&client->dev);
- else if (id)
+ else
data->chip = &chips[id->driver_data];
- else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
- &client->dev);
- if (!acpi_id)
- return -ENODEV;
- data->chip = &chips[acpi_id->driver_data];
- }
data->last_chan = 0; /* force the first selection */
.name = "pca954x",
.pm = &pca954x_pm,
.of_match_table = of_match_ptr(pca954x_of_match),
- .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
},
.probe = pca954x_probe,
.remove = pca954x_remove,
name = "accel_3d";
channel_spec = accel_3d_channels;
channel_size = sizeof(accel_3d_channels);
+ indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
} else {
name = "gravity";
channel_spec = gravity_channels;
channel_size = sizeof(gravity_channels);
+ indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
}
ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
&accel_state->common_attributes);
goto error_free_dev_mem;
}
- indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &accel_3d_info;
indio_dev->name = name;
ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
if (ret < 0)
break;
-
+ ret = IIO_VAL_INT;
*val = data;
break;
case IIO_CHAN_INFO_CALIBBIAS:
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
st->core.calib[i] =
st->core.resp->sensor_offset.offset[i];
-
+ ret = IIO_VAL_INT;
*val = st->core.calib[idx];
break;
case IIO_CHAN_INFO_SCALE:
{
struct hid_sensor_hub_attribute_info timestamp;
+ s32 value;
+ int ret;
hid_sensor_get_reporting_interval(hsdev, usage_id, st);
st->sensitivity.index, st->sensitivity.report_id,
timestamp.index, timestamp.report_id);
+ ret = sensor_hub_get_feature(hsdev,
+ st->power_state.report_id,
+ st->power_state.index, sizeof(value), &value);
+ if (ret < 0)
+ return ret;
+ if (value < 0)
+ return -EINVAL;
+
return 0;
}
EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
#define BMG160_DEF_BW 100
#define BMG160_REG_PMU_BW_RES BIT(7)
+#define BMG160_GYRO_REG_RESET 0x14
+#define BMG160_GYRO_RESET_VAL 0xb6
+
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
int ret;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 30ms after
+ * reset is required according to the datasheet.
+ */
+ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+ BMG160_GYRO_RESET_VAL);
+ usleep_range(30000, 30700);
+
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(dev, "Error reading reg_chip_id\n");
tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
- tmp = (s64)vals[0] * 1000000000LL >> vals[1];
- tmp1 = do_div(tmp, 1000000000LL);
- tmp0 = tmp;
- return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
+ tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
+ tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = true,
+ .bootime = 2,
},
};
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
+ rx_desc->in_use = false;
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
struct ib_recv_wr *rx_wr_failed, rx_wr;
int ret;
+ if (!rx_desc->in_use) {
+ /*
+ * if the descriptor is not in-use we already reposted it
+ * for recv, so just silently return
+ */
+ return 0;
+ }
+
+ rx_desc->in_use = false;
rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
return;
}
+ rx_desc->in_use = true;
+
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
- if (ret)
- transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
- else
- isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+ if (ret) {
+ /*
+ * transport_generic_request_failure() expects to have
+ * plus two references to handle queue-full, so re-add
+ * one here as target-core will have already dropped
+ * it after the first isert_put_datain() callback.
+ */
+ kref_get(&cmd->cmd_kref);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ } else {
+ /*
+ * XXX: isert_put_response() failure is not retried.
+ */
+ ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+ if (ret)
+ pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+ }
}
static void
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
- if (ret) {
- target_put_sess_cmd(se_cmd);
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
- } else {
+ /*
+ * transport_generic_request_failure() will drop the extra
+ * se_cmd->cmd_kref reference after T10-PI error, and handle
+ * any non-zero ->queue_status() callback error retries.
+ */
+ if (ret)
+ transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+ else
target_execute_cmd(se_cmd);
- }
}
static void
chain_wr = &isert_cmd->tx_desc.send_wr;
}
- isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
- isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
- return 1;
+ rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+ isert_cmd, rc);
+ return rc;
}
static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
- isert_rdma_rw_ctx_post(isert_cmd, conn->context,
- &isert_cmd->tx_desc.tx_cqe, NULL);
+ ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+ &isert_cmd->tx_desc.tx_cqe, NULL);
- isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
- isert_cmd);
- return 0;
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+ isert_cmd, ret);
+ return ret;
}
static int
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
- sizeof(struct ib_cqe)))
+ sizeof(struct ib_cqe) + sizeof(bool)))
#define ISCSI_ISER_SG_TABLESIZE 256
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
+ bool in_use;
char pad[ISER_RX_PAD_SIZE];
} __packed;
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
+ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
config MVEBU_ODMI
bool
+ select GENERIC_MSI_IRQ_DOMAIN
config MVEBU_PIC
bool
return -ENOMEM;
}
+ raw_spin_lock_init(&cd->rlock);
+
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n");
static void __init gic_map_interrupts(struct device_node *node)
{
+ gic_map_single_int(node, GIC_LOCAL_INT_WD);
+ gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
+ gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
+ gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
gic_map_single_int(node, GIC_LOCAL_INT_FDC);
}
sizeof(avmb1_carddef))))
return -EFAULT;
cdef.cardtype = AVM_CARDTYPE_B1;
+ cdef.cardnr = 0;
} else {
if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_extcarddef))))
*result = true;
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
- from_cblock(begin), &cmd->dirty_cursor);
+ from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
return r;
return 0;
}
+ begin = to_cblock(from_cblock(begin) + 1);
+ if (begin == end)
+ break;
+
r = dm_bitset_cursor_next(&cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
}
-
- begin = to_cblock(from_cblock(begin) + 1);
}
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
to_bytes(rs->requested_bitmap_chunk_sectors), 0);
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
+ blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
return BLK_MQ_RQ_QUEUE_BUSY;
}
block = fec_buffer_rs_block(v, fio, n, i);
res = fec_decode_rs8(v, fio, block, &par[offset], neras);
if (res < 0) {
- dm_bufio_release(buf);
-
r = res;
goto error;
}
done:
r = corrected;
error:
+ dm_bufio_release(buf);
+
if (r < 0 && neras)
DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
v->data_dev->name, (unsigned long long)rsb, r);
&is_zero) == 0) {
/* skip known zero blocks entirely */
if (is_zero)
- continue;
+ goto done;
/*
* skip if we have already found the theoretical
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
+ if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+ DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+ return -EIO;
+ }
+
+ fio->level++;
+
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block += v->data_blocks;
if (r < 0) {
r = fec_decode_rsb(v, io, fio, rsb, offset, true);
if (r < 0)
- return r;
+ goto done;
}
if (dest)
r = verity_for_bv_block(v, io, iter, fec_bv_copy);
}
+done:
+ fio->level--;
return r;
}
memset(fio->bufs, 0, sizeof(fio->bufs));
fio->nbufs = 0;
fio->output = NULL;
+ fio->level = 0;
}
/*
#define DM_VERITY_FEC_BUF_MAX \
(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION 4
+
#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
#define DM_VERITY_OPT_FEC_START "fec_start"
unsigned nbufs; /* number of buffers allocated */
u8 *output; /* buffer for corrected output */
size_t output_pos;
+ unsigned level; /* recursion level */
};
#ifdef CONFIG_DM_VERITY_FEC
.detach = vb2_dc_dmabuf_ops_detach,
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
- .kmap = vb2_dc_dmabuf_ops_kmap,
- .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .map = vb2_dc_dmabuf_ops_kmap,
+ .map_atomic = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
.detach = vb2_dma_sg_dmabuf_ops_detach,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
- .kmap = vb2_dma_sg_dmabuf_ops_kmap,
- .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
+ .map = vb2_dma_sg_dmabuf_ops_kmap,
+ .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
.detach = vb2_vmalloc_dmabuf_ops_detach,
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
- .kmap = vb2_vmalloc_dmabuf_ops_kmap,
- .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
+ .map = vb2_vmalloc_dmabuf_ops_kmap,
+ .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
#include "sdhci-pltfm.h"
+#define SDMMC_MC1R 0x204
+#define SDMMC_MC1R_DDR BIT(3)
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
sdhci_set_power_noreg(host, mode, vdd);
}
+void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ if (timing == MMC_TIMING_MMC_DDR52)
+ sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ sdhci_set_uhs_signaling(host, timing);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
.set_power = sdhci_at91_set_power,
};
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ if (enable)
+ pm_runtime_get_noresume(host->mmc->parent);
+
spin_lock_irqsave(&host->lock, flags);
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable)
+ pm_runtime_put_noidle(host->mmc->parent);
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
int work_done = 0;
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
- u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
/* Handle bus state changes */
devm_can_led_init(ndev);
- dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
- priv->regs, ndev->irq);
+ dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
fail_candev:
goto err_exit;
ndev->mtu = new_mtu;
- if (netif_running(ndev)) {
- aq_ndev_close(ndev);
- aq_ndev_open(ndev);
- }
-
err_exit:
return err;
}
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
+ dx_buff->is_ipv6 =
+ (ip_hdr(skb)->version == 6) ? 1U : 0U;
+
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
1U : 0U;
- dx_buff->is_tcp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
- dx_buff->is_udp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+
+ if (ip_hdr(skb)->version == 4) {
+ dx_buff->is_tcp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
+ 1U : 0U;
+ dx_buff->is_udp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
+ 1U : 0U;
+ } else if (ip_hdr(skb)->version == 6) {
+ dx_buff->is_tcp_cso =
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
+ 1U : 0U;
+ dx_buff->is_udp_cso =
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
+ 1U : 0U;
+ }
}
for (; nr_frags--; ++frag_count) {
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
+ spin_lock_init(&self->header.lock);
return 0;
}
u8 len_l2;
u8 len_l3;
u8 len_l4;
- u8 rsvd2;
+ u8 is_ipv6:1;
+ u8 rsvd2:7;
u32 len_pkt;
};
};
buff->len_l3 +
buff->len_l2);
is_gso = true;
+
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
} else {
buff_pa_len = buff->len;
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+ is_gso = false;
}
}
buff->len_l3 +
buff->len_l2);
is_gso = true;
+
+ if (buff->is_ipv6)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
} else {
buff_pa_len = buff->len;
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+ is_gso = false;
}
}
GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
-#define HW_INTERRUT_ASSERT_SET_0 \
+#define HW_INTERRUPT_ASSERT_SET_0 \
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_1 \
+#define HW_INTERRUPT_ASSERT_SET_1 \
(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_2 \
+#define HW_INTERRUPT_ASSERT_SET_2 \
(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
bnx2x_release_phy_lock(bp);
}
- if (attn & HW_INTERRUT_ASSERT_SET_0) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_0) {
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
bnx2x_panic();
}
}
BNX2X_ERR("FATAL error from DORQ\n");
}
- if (attn & HW_INTERRUT_ASSERT_SET_1) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_1) {
int port = BP_PORT(bp);
int reg_offset;
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
bnx2x_panic();
}
}
}
}
- if (attn & HW_INTERRUT_ASSERT_SET_2) {
+ if (attn & HW_INTERRUPT_ASSERT_SET_2) {
int port = BP_PORT(bp);
int reg_offset;
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
val = REG_RD(bp, reg_offset);
- val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
REG_WR(bp, reg_offset, val);
BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
- (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
bnx2x_panic();
}
}
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
- dma_unmap_single(&pdev->dev, rx_buf->mapping,
- bp->rx_buf_use_size, bp->rx_dir);
-
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp))
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ mapping -= bp->rx_dma_offset;
+ dma_unmap_page(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir);
__free_page(data);
- else
+ } else {
+ dma_unmap_single(&pdev->dev, mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir);
kfree(data);
+ }
}
for (j = 0; j < max_agg_idx; j++) {
return 0;
}
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+}
+
static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
- rc, i);
+ i, rc);
return rc;
}
}
static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
{
+ bnxt_init_cp_rings(bp);
bnxt_init_rx_rings(bp);
bnxt_init_tx_rings(bp);
bnxt_init_ring_grps(bp, irq_re_init);
bfa_ioc_send_enable(struct bfa_ioc *ioc)
{
struct bfi_ioc_ctrl_req enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = htons(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = ntohl(tv.tv_sec);
+ enable_req.rsvd = htons(0);
+ /* overflow in 2106 */
+ enable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
}
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = htons(ioc->clscode);
+ disable_req.rsvd = htons(0);
+ /* overflow in 2106 */
+ disable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
}
__be_cmd_set_logical_link_config(struct be_adapter *adapter,
int link_state, int version, u8 domain)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
+ struct be_mcc_wrb *wrb;
+ u32 link_config = 0;
int status;
mutex_lock(&adapter->mcc_lock);
if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
link_state == IFLA_VF_LINK_STATE_AUTO)
- req->link_config |= PLINK_ENABLE;
+ link_config |= PLINK_ENABLE;
if (link_state == IFLA_VF_LINK_STATE_AUTO)
- req->link_config |= PLINK_TRACK;
+ link_config |= PLINK_TRACK;
+
+ req->link_config = cpu_to_le32(link_config);
status = be_mcc_notify_wait(adapter);
err:
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
- if (work_done < budget) {
+ if ((work_done < budget) && napi_complete_done(napi, work_done)) {
u32 buf_int_enable_value = 0;
- napi_complete_done(napi, work_done);
-
/* set tx_done and rx_rdy bits */
buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <net/ip.h>
#include <net/ncsi.h>
else
*link_status = 0;
- ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
- if (!ret)
- *link_status = *link_status && sfp_prsnt;
+ if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+ }
mac_cb->link = *link_status;
}
of_node_put(np);
np = of_parse_phandle(to_of_node(mac_cb->fw_port),
- "serdes-syscon", 0);
+ "serdes-syscon", 0);
syscon = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR_OR_NULL(syscon)) {
mac_key->high.bits.mac_3 = addr[3];
mac_key->low.bits.mac_4 = addr[4];
mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.port_vlan = 0;
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
/* find the tcam entry index for promisc */
entry_index = dsaf_promisc_tcam_entry(port);
+ memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+ memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+
/* config key mask */
if (enable) {
- memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
- memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
return 0;
}
+int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -ENODEV;
+
+ *sfp_prsnt = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
- misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
} else {
/* Quiesce the device without resetting the hardware */
e1000e_down(adapter, false);
e1000_free_irq(adapter);
- e1000e_reset_interrupt_capability(adapter);
}
+ e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
- struct netdev_lag_upper_info *lag_upper_info;
+ struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded;
int bond_status = 0;
int num_slaves = 0;
if (!netif_is_lag_master(upper))
return 0;
- lag_upper_info = info->upper_info;
+ if (info->linking)
+ lag_upper_info = info->upper_info;
/* The event may still be of interest if the slave does not belong to
* us, but is enslaved to a master which has one or more of our netdevs
#include <linux/of_irq.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
+#include <linux/circ_buf.h>
#include "moxart_ether.h"
return rx;
}
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
static void moxart_tx_finished(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
tx_tail = TX_NEXT(tx_tail);
}
priv->tx_tail = tx_tail;
+ if (netif_queue_stopped(ndev) &&
+ moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+ netif_wake_queue(ndev);
}
static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
- unsigned int tx_head = priv->tx_head;
+ unsigned int tx_head;
u32 txdes1;
int ret = NETDEV_TX_BUSY;
+ spin_lock_irq(&priv->txlock);
+
+ tx_head = priv->tx_head;
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
- spin_lock_irq(&priv->txlock);
+ if (moxart_tx_queue_space(ndev) == 1)
+ netif_stop_queue(ndev);
+
if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++;
#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
#define TX_BUF_SIZE 1600
#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD 16
#define RX_DESC_NUM 64
#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unregister_netdev(nn->netdev);
+
if (nn->xdp_prog)
bpf_prog_put(nn->xdp_prog);
if (nn->bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
- unregister_netdev(nn->netdev);
}
{
bool want[OFDPA_CTRL_MAX] = { 0, };
bool prev_ctrls[OFDPA_CTRL_MAX];
- u8 uninitialized_var(prev_state);
+ u8 prev_state;
int err;
int i;
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
- prev_state = ofdpa_port->stp_state;
- }
-
- if (ofdpa_port->stp_state == state)
+ prev_state = ofdpa_port->stp_state;
+ if (prev_state == state)
return 0;
+ memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
ofdpa_port->stp_state = state;
switch (state) {
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
+ struct phy_device *phy;
struct cpsw_common *cpsw = priv->cpsw;
soft_reset_slave(slave);
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
if (slave->data->phy_node) {
- slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if);
- if (!slave->phy) {
+ if (!phy) {
dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
slave->data->phy_node->full_name,
slave->slave_num);
return;
}
} else {
- slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
- if (IS_ERR(slave->phy)) {
+ if (IS_ERR(phy)) {
dev_err(priv->dev,
"phy \"%s\" not found on slave %d, err %ld\n",
slave->data->phy_id, slave->slave_num,
- PTR_ERR(slave->phy));
- slave->phy = NULL;
+ PTR_ERR(phy));
return;
}
}
+ slave->phy = phy;
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
}
cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
memset(rd, 0, sizeof(*rd));
rd->hw = hwmap + i;
rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
- if (rd->buf == NULL ||
- !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+ if (rd->buf)
+ busaddr = pci_map_single(pdev, rd->buf, len, dir);
+ if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
if (rd->buf) {
net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
__func__, rd->buf);
rd = r->rd + j;
busaddr = rd_get_addr(rd);
rd_set_addr_status(rd, 0, 0);
- if (busaddr)
- pci_unmap_single(pdev, busaddr, len, dir);
+ pci_unmap_single(pdev, busaddr, len, dir);
kfree(rd->buf);
rd->buf = NULL;
}
return 0;
}
+EXPORT_SYMBOL(mdiobus_register_board_info);
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
-static void ___team_compute_features(struct team *team)
+static void __team_compute_features(struct team *team)
{
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
}
-static void __team_compute_features(struct team *team)
-{
- ___team_compute_features(team);
- netdev_change_features(team->dev);
-}
-
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
- ___team_compute_features(team);
+ __team_compute_features(team);
mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
+ netdev_change_features(dev);
}
static void team_destructor(struct net_device *dev)
mutex_lock(&team->lock);
err = team_port_add(team, port_dev);
mutex_unlock(&team->lock);
+
+ if (!err)
+ netdev_change_features(dev);
+
return err;
}
mutex_lock(&team->lock);
err = team_port_del(team, port_dev);
mutex_unlock(&team->lock);
+
+ if (!err)
+ netdev_change_features(dev);
+
return err;
}
#define LENOVO_VENDOR_ID 0x17ef
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
+#define MICROSOFT_VENDOR_ID 0x045e
static const struct usb_device_id products[] = {
/* BLACKLIST !!
.driver_info = 0,
},
+/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
- {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_NVIDIA 0x0955
}
} else {
if (netif_carrier_ok(tp->netdev)) {
+ netif_stop_queue(tp->netdev);
set_bit(RTL8152_LINK_CHG, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
napi_enable(&tp->napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
+ } else if (netif_queue_stopped(netdev) &&
+ skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+ netif_wake_queue(netdev);
}
} else {
if (netif_carrier_ok(netdev)) {
tp->rtl_ops.autosuspend_en(tp, false);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
- if (netif_carrier_ok(tp->netdev))
- rtl_start_rx(tp);
+
+ if (netif_carrier_ok(tp->netdev)) {
+ if (rtl8152_get_speed(tp) & LINK_STATUS) {
+ rtl_start_rx(tp);
+ } else {
+ netif_carrier_off(tp->netdev);
+ tp->rtl_ops.disable(tp);
+ netif_info(tp, link, tp->netdev,
+ "linking down\n");
+ }
+ }
+
napi_enable(&tp->napi);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
smp_mb__after_atomic();
static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
" value=0x%04x index=0x%04x size=%d\n",
cmd, reqtype, value, index, size);
- if (data) {
+ if (size) {
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
goto out;
err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
cmd, reqtype, value, index, buf, size,
USB_CTRL_GET_TIMEOUT);
- if (err > 0 && err <= size)
- memcpy(data, buf, err);
+ if (err > 0 && err <= size) {
+ if (data)
+ memcpy(data, buf, err);
+ else
+ netdev_dbg(dev->net,
+ "Huh? Data requested but thrown away.\n");
+ }
kfree(buf);
out:
return err;
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
- }
+ } else {
+ if (size) {
+ WARN_ON_ONCE(1);
+ err = -EINVAL;
+ goto out;
+ }
+ }
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
cmd, reqtype, value, index, buf, size,
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU
-static int virtnet_probe(struct virtio_device *vdev)
+static int virtnet_validate(struct virtio_device *vdev)
{
- int i, err;
- struct net_device *dev;
- struct virtnet_info *vi;
- u16 max_queue_pairs;
- int mtu;
-
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
if (!virtnet_validate_features(vdev))
return -EINVAL;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+ int mtu = virtio_cread16(vdev,
+ offsetof(struct virtio_net_config,
+ mtu));
+ if (mtu < MIN_MTU)
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+ }
+
+ return 0;
+}
+
+static int virtnet_probe(struct virtio_device *vdev)
+{
+ int i, err;
+ struct net_device *dev;
+ struct virtnet_info *vi;
+ u16 max_queue_pairs;
+ int mtu;
+
/* Find if host supports multiqueue virtio_net device */
err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
struct virtio_net_config,
offsetof(struct virtio_net_config,
mtu));
if (mtu < dev->min_mtu) {
- __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
- } else {
- dev->mtu = mtu;
- dev->max_mtu = mtu;
+ /* Should never trigger: MTU was previously validated
+ * in virtnet_validate.
+ */
+ dev_err(&vdev->dev, "device MTU appears to have changed "
+ "it is now %d < %d", mtu, dev->min_mtu);
+ goto free_stats;
}
+
+ dev->mtu = mtu;
+ dev->max_mtu = mtu;
+
+ /* TODO: size buffers correctly in this case. */
+ if (dev->mtu > ETH_DATA_LEN)
+ vi->big_packets = true;
}
if (vi->any_header_sg)
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtnet_validate,
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_cfg80211_vif *vif;
+ enum nl80211_iftype iftype;
bool wait_for_disable = false;
int err;
brcmf_dbg(TRACE, "delete P2P vif\n");
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ iftype = vif->wdev.iftype;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- switch (vif->wdev.iftype) {
+ switch (iftype) {
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
wait_for_disable = true;
BRCMF_P2P_DISABLE_TIMEOUT);
err = 0;
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
brcmf_vif_clear_mgmt_ies(vif);
err = brcmf_p2p_release_p2p_if(vif);
}
brcmf_remove_interface(vif->ifp, true);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE)
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
return err;
if (ret)
return ret;
+ if (count == 0)
+ return 0;
iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
qmask |= BIT(vif->hw_queue[ac]);
}
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
qmask |= BIT(vif->cab_queue);
return qmask;
return;
rcu_read_lock();
- sta = mvm->fw_id_to_mac_id[notif->sta_id];
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return;
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
* enabled-cab_queue to the mask)
*/
if (iwl_mvm_is_dqa_supported(mvm) &&
- vif->type == NL80211_IFTYPE_AP) {
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvmvif->bcast_sta.sta_id,
lockdep_assert_held(&mvm->mutex);
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
/*
* Handle legacy hostapd as well, where station may be added
* only after assoc. Take care of the case where we send a
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
- WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP) {
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
sta_id = mvmvif->bcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
unsigned long flags;
struct rtl_c2hcmd *c2hcmd;
- c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+ c2hcmd = kmalloc(sizeof(*c2hcmd),
+ in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!c2hcmd)
goto label_err;
- c2hcmd->val = kmalloc(len, GFP_KERNEL);
+ c2hcmd->val = kmalloc(len,
+ in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!c2hcmd->val)
goto label_err2;
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
if (rc < 0)
goto out_unlock;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
+
+ vfree(buf);
+ return rc;
+
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
- if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+ /*
+ * FIXME: nsio_rw_bytes() may be called from atomic
+ * context in the btt case and nvdimm_clear_poison()
+ * takes a sleeping lock. Until the locking can be
+ * reworked this capability requires that the namespace
+ * is not claimed by btt.
+ */
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
+ && (!ndns->claim || !is_nd_btt(ndns->claim))) {
long cleared;
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
int alias_dpa_busy(struct device *dev, void *data)
{
- resource_size_t map_end, blk_start, new, busy;
+ resource_size_t map_end, blk_start, new;
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
retry:
/*
* Find the free dpa from the end of the last pmem allocation to
- * the end of the interleave-set mapping that is not already
- * covered by a blk allocation.
+ * the end of the interleave-set mapping.
*/
- busy = 0;
for_each_dpa_resource(ndd, res) {
+ if (strncmp(res->name, "pmem", 4) != 0)
+ continue;
if ((res->start >= blk_start && res->start < map_end)
|| (res->end >= blk_start
&& res->end <= map_end)) {
- if (strncmp(res->name, "pmem", 4) == 0) {
- new = max(blk_start, min(map_end + 1,
- res->end + 1));
- if (new != blk_start) {
- blk_start = new;
- goto retry;
- }
- } else
- busy += min(map_end, res->end)
- - max(nd_mapping->start, res->start) + 1;
- } else if (nd_mapping->start > res->start
- && map_end < res->end) {
- /* total eclipse of the PMEM region mapping */
- busy += nd_mapping->size;
- break;
+ new = max(blk_start, min(map_end + 1, res->end + 1));
+ if (new != blk_start) {
+ blk_start = new;
+ goto retry;
+ }
}
}
return 1;
}
- info->available -= blk_start - nd_mapping->start + busy;
+ info->available -= blk_start - nd_mapping->start;
return 0;
}
-static int blk_dpa_busy(struct device *dev, void *data)
-{
- struct blk_alloc_info *info = data;
- struct nd_mapping *nd_mapping;
- struct nd_region *nd_region;
- resource_size_t map_end;
- int i;
-
- if (!is_nd_pmem(dev))
- return 0;
-
- nd_region = to_nd_region(dev);
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- nd_mapping = &nd_region->mapping[i];
- if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
- break;
- }
-
- if (i >= nd_region->ndr_mappings)
- return 0;
-
- map_end = nd_mapping->start + nd_mapping->size - 1;
- if (info->res->start >= nd_mapping->start
- && info->res->start < map_end) {
- if (info->res->end <= map_end) {
- info->busy = 0;
- return 1;
- } else {
- info->busy -= info->res->end - map_end;
- return 0;
- }
- } else if (info->res->end >= nd_mapping->start
- && info->res->end <= map_end) {
- info->busy -= nd_mapping->start - info->res->start;
- return 0;
- } else {
- info->busy -= nd_mapping->size;
- return 0;
- }
-}
-
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "blk", 3) != 0)
continue;
-
- info.res = res;
- info.busy = resource_size(res);
- device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
- info.available -= info.busy;
+ info.available -= resource_size(res);
}
return info.available;
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.nr = segments - 1;
+ cmnd->dsm.nr = cpu_to_le32(segments - 1);
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
req->special_vec.bv_page = virt_to_page(range);
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
u16 status;
WARN_ON(req == NULL || slog == NULL);
- if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+ if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
status = nvmet_get_smart_log_all(req, slog);
else
status = nvmet_get_smart_log_nsid(req, slog);
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
- nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+ nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
(req->ns->blksize_shift - 9)) + 1;
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_execute_dsm;
- req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
+ req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
+ select PCI_HOST_COMMON
help
Say Y here if you want PCIe controller support on HiSilicon
Hip05 and Hip06 SoCs
return 0;
}
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return -ENOMEM;
pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
artpec6_pcie->pci = pci;
return 0;
}
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return -ENOMEM;
pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
* Copyright (C) 2015 - 2016 Cavium, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_address.h>
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+#define PEM_RES_BASE 0x87e0c0000000UL
+#define PEM_NODE_MASK GENMASK(45, 44)
+#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_MIN_DOM_IN_NODE 4
+#define PEM_MAX_DOM_IN_NODE 10
+
+static void thunder_pem_reserve_range(struct device *dev, int seg,
+ struct resource *r)
+{
+ resource_size_t start = r->start, end = r->end;
+ struct resource *res;
+ const char *regionid;
+
+ regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
+ if (!regionid)
+ return;
+
+ res = request_mem_region(start, end - start + 1, regionid);
+ if (res)
+ res->flags &= ~IORESOURCE_BUSY;
+ else
+ kfree(regionid);
+
+ dev_info(dev, "%pR %s reserved\n", r,
+ res ? "has been" : "could not be");
+}
+
+static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
+ struct resource *res_pem)
+{
+ int node = acpi_get_node(root->device->handle);
+ int index;
+
+ if (node == NUMA_NO_NODE)
+ node = 0;
+
+ index = root->segment - PEM_MIN_DOM_IN_NODE;
+ index -= node * PEM_MAX_DOM_IN_NODE;
+ res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
+ FIELD_PREP(PEM_INDX_MASK, index);
+ res_pem->flags = IORESOURCE_MEM;
+}
+
static int thunder_pem_acpi_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
if (!res_pem)
return -ENOMEM;
- ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+ ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
+
+ /*
+ * If we fail to gather resources it means that we run with old
+ * FW where we need to calculate PEM-specific resources manually.
+ */
if (ret) {
- dev_err(dev, "can't get rc base address\n");
- return ret;
+ thunder_pem_legacy_fw(root, res_pem);
+ /*
+ * Reserve 64K size PEM specific resources. The full 16M range
+ * size is required for thunder_pem_init() call.
+ */
+ res_pem->end = res_pem->start + SZ_64K - 1;
+ thunder_pem_reserve_range(dev, root->segment, res_pem);
+ res_pem->end = res_pem->start + SZ_16M - 1;
+
+ /* Reserve PCI configuration space as well. */
+ thunder_pem_reserve_range(dev, root->segment, &cfg->res);
}
return thunder_pem_init(dev, cfg, res_pem);
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
- LIST_HEAD(res);
- struct resource res_mem;
+ LIST_HEAD(resources);
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
pcie->base_addr = bdev->addr;
- res_mem.start = bdev->addr_s[0];
- res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
- res_mem.name = "PCIe MEM space";
- res_mem.flags = IORESOURCE_MEM;
- pci_add_resource(&res, &res_mem);
+ pcie->mem.start = bdev->addr_s[0];
+ pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+ pcie->mem.name = "PCIe MEM space";
+ pcie->mem.flags = IORESOURCE_MEM;
+ pci_add_resource(&resources, &pcie->mem);
pcie->map_irq = iproc_pcie_bcma_map_irq;
- ret = iproc_pcie_setup(pcie, &res);
- if (ret)
+ ret = iproc_pcie_setup(pcie, &resources);
+ if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
-
- pci_free_resource_list(&res);
+ pci_free_resource_list(&resources);
+ return ret;
+ }
bcma_set_drvdata(bdev, pcie);
- return ret;
+ return 0;
}
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
- LIST_HEAD(res);
+ LIST_HEAD(resources);
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
+ &iobase);
if (ret) {
- dev_err(dev,
- "unable to get PCI host bridge resources\n");
+ dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
}
pcie->map_irq = of_irq_parse_and_map_pci;
}
- ret = iproc_pcie_setup(pcie, &res);
- if (ret)
+ ret = iproc_pcie_setup(pcie, &resources);
+ if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
-
- pci_free_resource_list(&res);
+ pci_free_resource_list(&resources);
+ return ret;
+ }
platform_set_drvdata(pdev, pcie);
- return ret;
+ return 0;
}
static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
#ifdef CONFIG_ARM
struct pci_sys_data sysdata;
#endif
+ struct resource mem;
struct pci_bus *root_bus;
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
return ERR_PTR(ret);
}
-static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
+static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
{
pctldev->p = create_pinctrl(pctldev->dev, pctldev);
- if (!IS_ERR(pctldev->p)) {
- kref_get(&pctldev->p->users);
- pctldev->hog_default =
- pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pctldev->hog_default)) {
- dev_dbg(pctldev->dev,
- "failed to lookup the default state\n");
- } else {
- if (pinctrl_select_state(pctldev->p,
- pctldev->hog_default))
- dev_err(pctldev->dev,
- "failed to select default state\n");
- }
+ if (PTR_ERR(pctldev->p) == -ENODEV) {
+ dev_dbg(pctldev->dev, "no hogs found\n");
- pctldev->hog_sleep =
- pinctrl_lookup_state(pctldev->p,
- PINCTRL_STATE_SLEEP);
- if (IS_ERR(pctldev->hog_sleep))
- dev_dbg(pctldev->dev,
- "failed to lookup the sleep state\n");
+ return 0;
+ }
+
+ if (IS_ERR(pctldev->p)) {
+ dev_err(pctldev->dev, "error claiming hogs: %li\n",
+ PTR_ERR(pctldev->p));
+
+ return PTR_ERR(pctldev->p);
+ }
+
+ kref_get(&pctldev->p->users);
+ pctldev->hog_default =
+ pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(pctldev->hog_default)) {
+ dev_dbg(pctldev->dev,
+ "failed to lookup the default state\n");
+ } else {
+ if (pinctrl_select_state(pctldev->p,
+ pctldev->hog_default))
+ dev_err(pctldev->dev,
+ "failed to select default state\n");
+ }
+
+ pctldev->hog_sleep =
+ pinctrl_lookup_state(pctldev->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(pctldev->hog_sleep))
+ dev_dbg(pctldev->dev,
+ "failed to lookup the sleep state\n");
+
+ return 0;
+}
+
+int pinctrl_enable(struct pinctrl_dev *pctldev)
+{
+ int error;
+
+ error = pinctrl_claim_hogs(pctldev);
+ if (error) {
+ dev_err(pctldev->dev, "could not claim hogs: %i\n",
+ error);
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+
+ return error;
}
mutex_lock(&pinctrldev_list_mutex);
return 0;
}
+EXPORT_SYMBOL_GPL(pinctrl_enable);
/**
* pinctrl_register() - register a pin controller device
if (IS_ERR(pctldev))
return pctldev;
- error = pinctrl_create_and_start(pctldev);
- if (error) {
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ error = pinctrl_enable(pctldev);
+ if (error)
return ERR_PTR(error);
- }
return pctldev;
}
EXPORT_SYMBOL_GPL(pinctrl_register);
+/**
+ * pinctrl_register_and_init() - register and init pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ * @pctldev: pin controller device
+ *
+ * Note that pinctrl_enable() still needs to be manually called after
+ * this once the driver is ready.
+ */
int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev)
{
struct pinctrl_dev *p;
- int error;
p = pinctrl_init_controller(pctldesc, dev, driver_data);
if (IS_ERR(p))
*/
*pctldev = p;
- error = pinctrl_create_and_start(p);
- if (error) {
- mutex_destroy(&p->mutex);
- kfree(p);
- *pctldev = NULL;
-
- return error;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
- return 0;
+ return pinctrl_enable(ipctl->pctl);
free:
imx_free_resources(ipctl);
* published by the Free Software Foundation.
*/
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
chained_irq_exit(chip, desc);
}
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+ {
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+ .ident = "Acer Chromebook (CYAN)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+ },
+ }
+};
+
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
struct gpio_chip *chip = &pctrl->chip;
+ bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
*chip = chv_gpio_chip;
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = true;
+ chip->irq_need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
- if (intsel >= pctrl->community->nirqs)
+ if (need_valid_mask && intsel >= pctrl->community->nirqs)
clear_bit(i, chip->irq_valid_mask);
}
};
static const char * const i2c_ao_groups[] = {
- "i2c_sdk_ao", "i2c_sda_ao",
+ "i2c_sck_ao", "i2c_sda_ao",
};
static const char * const i2c_slave_ao_groups[] = {
- "i2c_slave_sdk_ao", "i2c_slave_sda_ao",
+ "i2c_slave_sck_ao", "i2c_slave_sda_ao",
};
static const char * const remote_input_ao_groups[] = {
dev_info(pcs->dev, "%i pins at pa %p size %u\n",
pcs->desc.npins, pcs->base, pcs->size);
- return 0;
+ return pinctrl_enable(pcs->pctl);
free:
pcs_free_resources(pcs);
writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
}
+static int st_gpio_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ st_gpio_direction_input(gc, d->hwirq);
+
+ return gpiochip_lock_as_irq(gc, d->hwirq);
+}
+
+static void st_gpio_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_unlock_as_irq(gc, d->hwirq);
+}
+
static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
};
static struct irq_chip st_gpio_irqchip = {
- .name = "GPIO",
- .irq_disable = st_gpio_irq_mask,
- .irq_mask = st_gpio_irq_mask,
- .irq_unmask = st_gpio_irq_unmask,
- .irq_set_type = st_gpio_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .name = "GPIO",
+ .irq_request_resources = st_gpio_irq_request_resources,
+ .irq_release_resources = st_gpio_irq_release_resources,
+ .irq_disable = st_gpio_irq_mask,
+ .irq_mask = st_gpio_irq_mask,
+ .irq_unmask = st_gpio_irq_unmask,
+ .irq_set_type = st_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,
PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
};
static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
- val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
-
val = readl(pctrl->regs + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
};
/* pin banks of exynos5433 pin-controller - AUD */
static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
- EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
- EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
};
/* pin banks of exynos5433 pin-controller - IMEM */
static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
- EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
- EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
- EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
- EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
- EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
- EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
- EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
- EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
- EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
- EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
- EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
- EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
};
/* pin banks of exynos5433 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/*
.name = id \
}
-#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
- { \
- .type = &bank_type_alive, \
- .pctl_offset = reg, \
- .nr_pins = pins, \
- .eint_type = EINT_TYPE_WKUP, \
- .eint_offset = offs, \
- .name = id, \
- .pctl_res_idx = pctl_idx, \
- } \
-
#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
{ \
.type = &exynos5433_bank_type_off, \
for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- virt_base[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(virt_base[i]))
- return ERR_CAST(virt_base[i]);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
+ return ERR_PTR(-EINVAL);
+ }
+ virt_base[i] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!virt_base[i]) {
+ dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+ return ERR_PTR(-EIO);
+ }
}
bank = d->pin_banks;
pmx->pctl_desc.pins = pmx->pins;
pmx->pctl_desc.npins = pfc->info->nr_pins;
- return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
- &pmx->pctl);
+ ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
+ &pmx->pctl);
+ if (ret) {
+ dev_err(pfc->dev, "could not register: %i\n", ret);
+
+ return ret;
+ }
+
+ return pinctrl_enable(pmx->pctl);
}
config PINCTRL_TI_IODELAY
tristate "TI IODelay Module pinconf driver"
- depends on OF
+ depends on OF && (SOC_DRA7XX || COMPILE_TEST)
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
platform_set_drvdata(pdev, iod);
+ return pinctrl_enable(iod->pctl);
+
exit_out:
of_node_put(np);
return ret;
kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
- if (IS_ERR(kvm_ptp_clock.ptp_clock))
- return PTR_ERR(kvm_ptp_clock.ptp_clock);
-
- return 0;
+ return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
}
module_init(ptp_kvm_init);
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
+ .bypass = true,
+};
+
+/* Tangier */
+static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+ .clk_rate = 19200000,
+ .npwm = 4,
+ .base_unit_bits = 22,
};
static int pwm_lpss_probe_pci(struct pci_dev *pdev,
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
- { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
+ { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
{ PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
+ .bypass = true,
};
static int pwm_lpss_probe_platform(struct platform_device *pdev)
writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
}
-static int pwm_lpss_update(struct pwm_device *pwm)
+static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
{
struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
u32 val;
int err;
- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
-
/*
* PWM Configuration register has SW_UPDATE bit that is set when a new
* configuration is written to the register. The bit is automatically
pwm_lpss_write(pwm, ctrl);
}
+static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
+{
+ if (cond)
+ pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+}
+
static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
return ret;
}
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- ret = pwm_lpss_update(pwm);
+ pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+ pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+ ret = pwm_lpss_wait_for_update(pwm);
if (ret) {
pm_runtime_put(chip->dev);
return ret;
}
- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+ pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
} else {
ret = pwm_lpss_is_updating(pwm);
if (ret)
return ret;
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- return pwm_lpss_update(pwm);
+ pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+ return pwm_lpss_wait_for_update(pwm);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
unsigned long clk_rate;
unsigned int npwm;
unsigned long base_unit_bits;
+ bool bypass;
};
struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
return 0;
}
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ bool enable,
+ enum pwm_polarity polarity)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ int ret;
+
+ if (enable) {
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+ }
+
+ pc->data->set_enable(chip, pwm, enable, polarity);
+
+ if (!enable)
+ clk_disable(pc->clk);
+
+ return 0;
+}
+
static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
return ret;
if (state->polarity != curstate.polarity && enabled) {
- pc->data->set_enable(chip, pwm, false, state->polarity);
+ ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+ if (ret)
+ goto out;
enabled = false;
}
ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (ret) {
if (enabled != curstate.enabled)
- pc->data->set_enable(chip, pwm, !enabled,
- state->polarity);
-
+ rockchip_pwm_enable(chip, pwm, !enabled,
+ state->polarity);
goto out;
}
- if (state->enabled != enabled)
- pc->data->set_enable(chip, pwm, state->enabled,
- state->polarity);
+ if (state->enabled != enabled) {
+ ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+ state->polarity);
+ if (ret)
+ goto out;
+ }
/*
* Update the state with the real hardware, which can differ a bit
#include "tsi721.h"
#ifdef DEBUG
-u32 dbg_level;
-module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+u32 tsi_dbg_level;
+module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
};
#ifdef DEBUG
-extern u32 dbg_level;
+extern u32 tsi_dbg_level;
#define tsi_debug(level, dev, fmt, arg...) \
do { \
- if (DBG_##level & dbg_level) \
+ if (DBG_##level & tsi_dbg_level) \
dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
} while (0)
#else
}
EXPORT_SYMBOL_GPL(reset_control_status);
-static struct reset_control *__reset_control_get(
+static struct reset_control *__reset_control_get_internal(
struct reset_controller_dev *rcdev,
unsigned int index, bool shared)
{
return rstc;
}
-static void __reset_control_put(struct reset_control *rstc)
+static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
}
/* reset_list_mutex also protects the rcdev's reset_control list */
- rstc = __reset_control_get(rcdev, rstc_id, shared);
+ rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+ int index, bool shared, bool optional)
+{
+ if (dev->of_node)
+ return __of_reset_control_get(dev->of_node, id, index, shared,
+ optional);
+
+ return optional ? NULL : ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(__reset_control_get);
+
/**
* reset_control_put - free the reset controller
* @rstc: reset controller
return;
mutex_lock(&reset_list_mutex);
- __reset_control_put(rstc);
+ __reset_control_put_internal(rstc);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);
if (!ptr)
return ERR_PTR(-ENOMEM);
- rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
- id, index, shared, optional);
+ rstc = __reset_control_get(dev, id, index, shared, optional);
if (!IS_ERR(rstc)) {
*ptr = rstc;
devres_add(dev, ptr);
rc = -EIO;
goto out;
}
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
}
/*
- * Fetch just the mkvp value via query_crypto_facility from adapter.
+ * Fetch the current and old mkvp values via
+ * query_crypto_facility from adapter.
*/
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
{
int rc, found = 0;
size_t rlen, vlen;
rc = query_crypto_facility(cardnr, domain, "STATICSA",
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
- if (rarray[64] == '2') {
+ if (rarray[8*8] == '2') {
/* current master key state is valid */
- *mkvp = *((u64 *)(varray + 184));
+ mkvp[0] = *((u64 *)(varray + 184));
+ mkvp[1] = *((u64 *)(varray + 172));
found = 1;
}
}
struct list_head list;
u16 cardnr;
u16 domain;
- u64 mkvp;
+ u64 mkvp[2];
};
/* a list with mkvp_info entries */
static LIST_HEAD(mkvp_list);
static DEFINE_SPINLOCK(mkvp_list_lock);
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
{
int rc = -ENOENT;
struct mkvp_info *ptr;
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
- *mkvp = ptr->mkvp;
+ memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
rc = 0;
break;
}
return rc;
}
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
{
int found = 0;
struct mkvp_info *ptr;
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
- ptr->mkvp = mkvp;
+ memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
found = 1;
break;
}
}
ptr->cardnr = cardnr;
ptr->domain = domain;
- ptr->mkvp = mkvp;
+ memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
list_add(&ptr->list, &mkvp_list);
}
spin_unlock_bh(&mkvp_list_lock);
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
struct zcrypt_device_matrix *device_matrix;
u16 card, dom;
- u64 mkvp;
- int i, rc;
+ u64 mkvp[2];
+ int i, rc, oi = -1;
/* mkvp must not be zero */
if (t->mkvp == 0)
device_matrix->device[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
- if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
- t->mkvp == mkvp) {
+ if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
+ t->mkvp == mkvp[0]) {
if (!verify)
break;
/* verify: fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp)
+ if (t->mkvp == mkvp[0])
break;
}
}
card = AP_QID_CARD(device_matrix->device[i].qid);
dom = AP_QID_QUEUE(device_matrix->device[i].qid);
/* fresh fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp)
+ if (t->mkvp == mkvp[0])
break;
+ if (t->mkvp == mkvp[1] && oi < 0)
+ oi = i;
}
}
+ if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+ /* old mkvp matched, use this card then */
+ card = AP_QID_CARD(device_matrix->device[oi].qid);
+ dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+ }
}
- if (i < MAX_ZDEV_ENTRIES) {
+ if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+ int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int, int, int);
* @card: qeth card structure, to check max. elems.
* @skb: SKB address
* @extra_elems: extra elems needed, to check against max.
+ * @data_offset: range starts at skb->data + data_offset
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* skb data, including linear part and fragments. Checks if the result plus
* Note: extra_elems is not included in the returned result.
*/
int qeth_get_elements_no(struct qeth_card *card,
- struct sk_buff *skb, int extra_elems)
+ struct sk_buff *skb, int extra_elems, int data_offset)
{
int elements = qeth_get_elements_for_range(
- (addr_t)skb->data,
+ (addr_t)skb->data + data_offset,
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb);
* chaining we can not send long frag lists
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
- !qeth_get_elements_no(card, new_skb, 0)) {
+ !qeth_get_elements_no(card, new_skb, 0, 0)) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
}
}
- elements = qeth_get_elements_no(card, new_skb, elements_needed);
+ elements = qeth_get_elements_no(card, new_skb, elements_needed,
+ (data_offset > 0) ? data_offset : 0);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
- skb_pull(skb, 14);
- card->dev->header_ops->create(skb, card->dev, 0,
- card->dev->dev_addr, card->dev->dev_addr,
- card->dev->addr_len);
- skb_pull(skb, 14);
- iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.ext_flags = 0;
- hdr->hdr.l3.length = skb->len;
+ hdr->hdr.l3.length = skb->len - ETH_HLEN;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+ iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
!skb_is_nonlinear(skb)) {
new_skb = skb;
- if (new_skb->protocol == ETH_P_AF_IUCV)
- data_offset = 0;
- else
- data_offset = ETH_HLEN;
+ data_offset = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
- (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+ (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
elements = use_tso ?
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
- qeth_get_elements_no(card, new_skb, hdr_elements);
+ qeth_get_elements_no(card, new_skb, hdr_elements,
+ (data_offset > 0) ? data_offset : 0);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
-#define aac_adapter_check_health(dev) \
- (dev)->a_ops.adapter_check_health(dev)
-
#define aac_adapter_restart(dev, bled, reset_type) \
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
return capacity;
}
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+ if (unlikely(pci_channel_offline(dev->pdev)))
+ return -1;
+
+ return (dev)->a_ops.adapter_check_health(dev);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) {
- printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+ printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+ aac->name, BlinkLED);
goto out;
}
{
struct hw_fib **hw_fib_p;
struct fib **fib_p;
- int rcode = 1;
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
}
}
+ /*
+ * Get the actual number of allocated fibs
+ */
num = hw_fib_p - hw_fib_pool;
- if (!num)
- rcode = 0;
-
- return rcode;
+ return num;
}
static void wakeup_fibctx_threads(struct aac_dev *dev,
struct fib *fib;
unsigned long flags;
spinlock_t *t_lock;
- unsigned int rcode;
t_lock = dev->queues->queue[HostNormCmdQueue].lock;
spin_lock_irqsave(t_lock, flags);
* Fill up fib pointer pools with actual fibs
* and hw_fibs
*/
- rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
- if (!rcode)
+ num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+ if (!num)
goto free_mem;
/*
#define ALUA_POLICY_SWITCH_ALL 1
static void alua_rtpg_work(struct work_struct *work);
-static void alua_rtpg_queue(struct alua_port_group *pg,
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force);
static void alua_check(struct scsi_device *sdev, bool force);
kref_put(&pg->kref, release_port_group);
}
-static void alua_rtpg_queue(struct alua_port_group *pg,
+/**
+ * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ *
+ * Returns true if and only if alua_rtpg_work() will be called asynchronously.
+ * That function is responsible for calling @qdata->fn().
+ */
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force)
{
unsigned long flags;
struct workqueue_struct *alua_wq = kaluad_wq;
- if (!pg)
- return;
+ if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
+ return false;
spin_lock_irqsave(&pg->lock, flags);
if (qdata) {
pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref);
pg->rtpg_sdev = sdev;
- scsi_device_get(sdev);
start_queue = 1;
} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG;
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
- sdev = NULL;
start_queue = 1;
}
}
alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags);
- if (start_queue &&
- !queue_delayed_work(alua_wq, &pg->rtpg_work,
- msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- if (sdev)
- scsi_device_put(sdev);
- kref_put(&pg->kref, release_port_group);
+ if (start_queue) {
+ if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
+ sdev = NULL;
+ else
+ kref_put(&pg->kref, release_port_group);
}
+ if (sdev)
+ scsi_device_put(sdev);
+
+ return true;
}
/*
mutex_unlock(&h->init_mutex);
goto out;
}
- fn = NULL;
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- alua_rtpg_queue(pg, sdev, qdata, true);
+ if (alua_rtpg_queue(pg, sdev, qdata, true))
+ fn = NULL;
+ else
+ err = SCSI_DH_DEV_OFFLINED;
kref_put(&pg->kref, release_port_group);
out:
if (fn)
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr);
+ this_device->volume_offline = volume_offline;
if (volume_offline == HPSA_LV_FAILED) {
rc = HPSA_LV_FAILED;
dev_err(&h->pdev->dev,
break;
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
- scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+ /*
+ * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
+ * so SCSI mid-layer and upper layers handle it accordingly.
+ */
+ if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
+ scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case IPR_IOASC_BUS_WAS_RESET:
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
-enum {
- DUMP_FCP,
- DUMP_NVME,
- DUMP_MBX,
- DUMP_ELS,
- DUMP_NVMELS,
-};
-
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_NVMEKTIME_SIZE 8192
struct lpfc_idiag_offset offset;
void *ptr_private;
};
+
+#else
+
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+ no_printk(fmt, ##arg)
+
#endif
+enum {
+ DUMP_FCP,
+ DUMP_NVME,
+ DUMP_MBX,
+ DUMP_ELS,
+ DUMP_NVMELS,
+};
+
/* Mask for discovery_trace */
#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLI++;
- if (vport->port_state < LPFC_DISC_AUTH) {
+ if ((vport->port_state < LPFC_DISC_AUTH) &&
+ (vport->fc_flag & FC_FABRIC)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_iocbq *nvmewqeq;
unsigned long iflags;
- int rc, id;
+ int rc;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
ctxp->ts_nvme_data = ktime_get_ns();
}
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
+ int id = smp_processor_id();
ctxp->cpu = id;
if (id < LPFC_CHECK_CPU_CNT)
phba->cpucheck_xmt_io[id]++;
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
- complete(&qedf->fipvlan_compl);
+ if (!completion_done(&qedf->fipvlan_compl))
+ complete(&qedf->fipvlan_compl);
}
}
atomic_set(&qedf->num_offloads, 0);
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
+ init_completion(&qedf->fipvlan_compl);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
static struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT);
+ if (IS_P3P_TYPE(ha))
+ return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((RD_REG_DWORD(®->host_status)) ==
+ ISP_REG_DISCONNECT);
}
/**************************************************************************
/* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding.
*/
- if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
+ if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+ (sp->type == SRB_SCSI_CMD)) {
/* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() -
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
- blk_mq_start_stopped_hw_queues(q, false);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
- blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_run_hw_queues(q, true);
} else {
unsigned long flags;
case BLK_MQ_RQ_QUEUE_BUSY:
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
- blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
break;
case BLK_MQ_RQ_QUEUE_ERROR:
/*
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
- rw_max = BLK_DEF_MAX_SECTORS;
+ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ (sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
result = get_user(val, ip);
if (result)
return result;
+ if (val > SG_MAX_CDB_SIZE)
+ return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mmio_base = devm_ioremap_resource(dev, mem_res);
- if (IS_ERR(*(void **)&mmio_base)) {
- err = PTR_ERR(*(void **)&mmio_base);
+ if (IS_ERR(mmio_base)) {
+ err = PTR_ERR(mmio_base);
goto out;
}
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
- if (ufshcd_is_clkscaling_supported(hba))
- hba->clk_scaling.active_reqs--;
}
/* clear corresponding bits of completed commands */
ret = PTR_ERR(vmfile);
goto out;
}
+ vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
}
get_file(asma->file);
.release = ion_dma_buf_release,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
- .kmap_atomic = ion_dma_buf_kmap,
- .kunmap_atomic = ion_dma_buf_kunmap,
- .kmap = ion_dma_buf_kmap,
- .kunmap = ion_dma_buf_kunmap,
+ .map_atomic = ion_dma_buf_kmap,
+ .unmap_atomic = ion_dma_buf_kunmap,
+ .map = ion_dma_buf_kmap,
+ .unmap = ion_dma_buf_kunmap,
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
- return 0;
+ return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
EXPORT_SYMBOL(iscsit_queue_rsp);
static int lio_queue_data_in(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_DATAIN;
- cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
-
- return 0;
+ return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
}
static int lio_write_pending(struct se_cmd *se_cmd)
static int lio_queue_status(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
- return 0;
+ return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
}
- cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
-
- return 0;
+ return conn->conn_transport->iscsit_queue_status(conn, cmd);
}
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
/*
* Required for gPXE iSCSI boot client
*/
}
}
-void iscsit_add_cmd_to_response_queue(
+int iscsit_add_cmd_to_response_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
u8 state)
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
- return;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
spin_unlock_bh(&conn->response_queue_lock);
wake_up(&conn->queues_wq);
+ return 0;
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
{
struct se_cmd *se_cmd = NULL;
int rc;
+ bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
+ op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd);
}
break;
struct iscsi_conn_recovery **, itt_t);
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
/*
* Set the ASYMMETRIC ACCESS State
*/
- buf[off++] |= (atomic_read(
- &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp;
- out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
// XXX: keeps using tg_pt_gp witout reference after unlock
}
/*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp)
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
tg_pt_gp->tg_pt_gp_id,
- tg_pt_gp->tg_pt_gp_alua_pending_state,
+ tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
-{
- struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
- struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
- ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
-
- /*
- * Update the ALUA metadata buf that has been allocated in
- * core_alua_do_port_transition(), this metadata will be written
- * to struct file.
- *
- * Note that there is the case where we do not want to update the
- * metadata when the saved metadata is being parsed in userspace
- * when setting the existing port access state and access status.
- *
- * Also note that the failure to write out the ALUA metadata to
- * struct file does NOT affect the actual ALUA transition.
- */
- if (tg_pt_gp->tg_pt_gp_write_metadata) {
- mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
- core_alua_update_tpg_primary_metadata(tg_pt_gp);
- mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
- }
- /*
- * Set the current primary ALUA access state to the requested new state
- */
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- tg_pt_gp->tg_pt_gp_alua_pending_state);
-
- pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " from primary access state %s to %s\n", (explicit) ? "explicit" :
- "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
- tg_pt_gp->tg_pt_gp_id,
- core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
- core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
-
- core_alua_queue_state_change_ua(tg_pt_gp);
-
- spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
-
- if (tg_pt_gp->tg_pt_gp_transition_complete)
- complete(tg_pt_gp->tg_pt_gp_transition_complete);
-}
-
static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp,
int new_state,
int explicit)
{
- struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int prev_state;
+ mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
/* Nothing to be done here */
- if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+ if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
+ }
- if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
+ if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return -EAGAIN;
-
- /*
- * Flush any pending transitions
- */
- if (!explicit)
- flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ }
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_TRANSITION);
+ prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
+ tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
core_alua_queue_state_change_ua(tg_pt_gp);
- if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
-
- tg_pt_gp->tg_pt_gp_alua_previous_state =
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+ }
/*
* Check for the optional ALUA primary state transition delay
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
/*
- * Take a reference for workqueue item
+ * Set the current primary ALUA access state to the requested new state
*/
- spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
- schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
- if (explicit) {
- tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- wait_for_completion(&wait);
- tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ /*
+ * Update the ALUA metadata buf that has been allocated in
+ * core_alua_do_port_transition(), this metadata will be written
+ * to struct file.
+ *
+ * Note that there is the case where we do not want to update the
+ * metadata when the saved metadata is being parsed in userspace
+ * when setting the existing port access state and access status.
+ *
+ * Also note that the failure to write out the ALUA metadata to
+ * struct file does NOT affect the actual ALUA transition.
+ */
+ if (tg_pt_gp->tg_pt_gp_write_metadata) {
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
}
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ " from primary access state %s to %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(prev_state),
+ core_alua_dump_state(new_state));
+
+ core_alua_queue_state_change_ua(tg_pt_gp);
+
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
- mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+ mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
- core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
+ tg_pt_gp->tg_pt_gp_alua_access_state =
+ ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
/*
* Enable both explicit and implicit ALUA support by default
*/
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
-
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
"Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
- core_alua_dump_state(atomic_read(
- &tg_pt_gp->tg_pt_gp_alua_access_state)),
+ core_alua_dump_state(
+ tg_pt_gp->tg_pt_gp_alua_access_state),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
atomic_read(&lun->lun_tg_pt_secondary_offline) ?
char *page)
{
return sprintf(page, "%d\n",
- atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
+ to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
}
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
+ if (lun->lun_shutdown) {
+ pr_err("Unable to create mappedlun symlink because"
+ " lun->lun_shutdown=true\n");
+ return -EINVAL;
+ }
se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+ lun->lun_shutdown = true;
+
core_clear_lun_from_tpg(lun, tpg);
/*
* Wait for any active I/O references to percpu se_lun->lun_ref to
}
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link);
+
+ lun->lun_shutdown = false;
mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref);
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
- struct se_device *dev);
+ struct se_device *dev, int err, bool write_pending);
static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd);
- else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+ cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
transport_complete_qf(cmd);
}
}
}
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
goto check_stop;
default:
}
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
check_stop:
return;
queue_full:
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
EXPORT_SYMBOL(transport_generic_request_failure);
int ret = 0;
transport_complete_task_attr(cmd);
+ /*
+ * If a fabric driver ->write_pending() or ->queue_data_in() callback
+ * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+ * the same callbacks should not be retried. Return CHECK_CONDITION
+ * if a scsi_status is not already set.
+ *
+ * If a fabric driver ->queue_status() has returned non zero, always
+ * keep retrying no matter what..
+ */
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+ if (cmd->scsi_status)
+ goto queue_status;
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- goto out;
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ goto queue_status;
}
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ goto queue_status;
+
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
if (cmd->scsi_status)
break;
}
-out:
if (ret < 0) {
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
-static void transport_handle_queue_full(
- struct se_cmd *cmd,
- struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+ int err, bool write_pending)
{
+ /*
+ * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+ * ->queue_data_in() callbacks from new process context.
+ *
+ * Otherwise for other errors, transport_complete_qf() will send
+ * CHECK_CONDITION via ->queue_status() instead of attempting to
+ * retry associated fabric driver data-transfer callbacks.
+ */
+ if (err == -EAGAIN || err == -ENOMEM) {
+ cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+ TRANSPORT_COMPLETE_QF_OK;
+ } else {
+ pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+ }
+
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc_mb(&dev->dev_qf_count);
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
rc, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
case DMA_TO_DEVICE:
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
}
queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
default:
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
void target_free_sgl(struct scatterlist *sgl, int nents)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
- /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
- WARN_ON(ret);
-
- return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return 0;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
int ret;
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM) {
+ if (ret) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
}
}
__releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock)
{
+ int ret;
+
assert_spin_locked(&cmd->t_state_lock);
WARN_ON_ONCE(!irqs_disabled());
trace_target_cmd_complete(cmd);
spin_unlock_irq(&cmd->t_state_lock);
- cmd->se_tfo->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
spin_lock_irq(&cmd->t_state_lock);
return 1;
void transport_send_task_abort(struct se_cmd *cmd)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd);
- cmd->se_tfo->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
static void target_tmr_work(struct work_struct *work)
DATA_BLOCK_BITS);
}
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
- struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ bool bidi)
{
+ struct se_cmd *se_cmd = cmd->se_cmd;
int i, block;
int block_remaining = 0;
void *from, *to;
size_t copy_bytes, from_offset;
- struct scatterlist *sg;
+ struct scatterlist *sg, *data_sg;
+ unsigned int data_nents;
+ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+ if (!bidi) {
+ data_sg = se_cmd->t_data_sg;
+ data_nents = se_cmd->t_data_nents;
+ } else {
+ uint32_t count;
+
+ /*
+ * For bidi case, the first count blocks are for Data-Out
+ * buffer blocks, and before gathering the Data-In buffer
+ * the Data-Out buffer blocks should be discarded.
+ */
+ count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ while (count--) {
+ block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+ clear_bit(block, bitmap);
+ }
+
+ data_sg = se_cmd->t_bidi_data_sg;
+ data_nents = se_cmd->t_bidi_data_nents;
+ }
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
if (block_remaining == 0) {
- block = find_first_bit(cmd_bitmap,
+ block = find_first_bit(bitmap,
DATA_BLOCK_BITS);
block_remaining = DATA_BLOCK_SIZE;
- clear_bit(block, cmd_bitmap);
+ clear_bit(block, bitmap);
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
return true;
}
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+ data_length += round_up(se_cmd->t_bidi_data_sg->length,
+ DATA_BLOCK_SIZE);
+ }
+
+ return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+ return data_length / DATA_BLOCK_SIZE;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
- size_t data_length;
+ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
* expensive to tell how many regions are freed in the bitmap
*/
base_command_size = max(offsetof(struct tcmu_cmd_entry,
- req.iov[se_cmd->t_bidi_data_nents +
- se_cmd->t_data_nents]),
+ req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
sizeof(struct tcmu_cmd_entry));
command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
- data_length = se_cmd->data_length;
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += se_cmd->t_bidi_data_sg->length;
- }
if ((command_size > (udev->cmdr_size / 2)) ||
data_length > udev->data_size) {
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */
- iov_cnt = 0;
- alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
- entry->req.iov_bidi_cnt = iov_cnt;
-
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ iov_cnt = 0;
+ iov++;
+ alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+ se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+ false);
+ entry->req.iov_bidi_cnt = iov_cnt;
+ }
/* cmd's data_bitmap is what changed in process */
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
DATA_BLOCK_BITS);
se_cmd->scsi_sense_length);
free_data_area(udev, cmd);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
/* Get Data-In buffer before clean up */
- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
- gather_data_area(udev, bitmap,
- se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+ gather_data_area(udev, cmd, true);
free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
- gather_data_area(udev, bitmap,
- se_cmd->t_data_sg, se_cmd->t_data_nents);
+ gather_data_area(udev, cmd, false);
free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
free_data_area(udev, cmd);
if (ret < 0)
return ret;
- if (!val) {
- pr_err("Illegal value for cmd_time_out\n");
- return -EINVAL;
- }
-
udev->cmd_time_out = val * MSEC_PER_SEC;
return count;
}
};
static DEFINE_IDA(cpufreq_ida);
-static unsigned int cpufreq_dev_count;
-
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
true);
+ if (IS_ERR(opp)) {
+ dev_warn_ratelimited(cpufreq_device->cpu_dev,
+ "Failed to find OPP for frequency %lu: %ld\n",
+ freq_hz, PTR_ERR(opp));
+ return -EINVAL;
+ }
+
voltage = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
if (voltage == 0) {
- dev_warn_ratelimited(cpufreq_device->cpu_dev,
- "Failed to get voltage for frequency %lu: %ld\n",
- freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+ dev_err_ratelimited(cpufreq_device->cpu_dev,
+ "Failed to get voltage for frequency %lu\n",
+ freq_hz);
return -EINVAL;
}
*state = cpufreq_cooling_get_level(cpu, target_freq);
if (*state == THERMAL_CSTATE_INVALID) {
- dev_warn_ratelimited(&cdev->device,
- "Failed to convert %dKHz for cpu %d into a cdev state\n",
- target_freq, cpu);
+ dev_err_ratelimited(&cdev->device,
+ "Failed to convert %dKHz for cpu %d into a cdev state\n",
+ target_freq, cpu);
return -EINVAL;
}
unsigned int freq, i, num_cpus;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
+ bool first;
if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_list_lock);
+ /* Register the notifier for first cpufreq cooling device */
+ first = list_empty(&cpufreq_dev_list);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
- /* Register the notifier for first cpufreq cooling device */
- if (!cpufreq_dev_count++)
+ if (first)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- mutex_unlock(&cooling_list_lock);
goto put_policy;
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct cpufreq_cooling_device *cpufreq_dev;
+ bool last;
if (!cdev)
return;
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (!--cpufreq_dev_count)
+ last = list_empty(&cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
+ if (last)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_del(&cpufreq_dev->node);
- mutex_unlock(&cooling_list_lock);
-
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
kfree(cpufreq_dev->dyn_power_table);
return 0;
opp = dev_pm_opp_find_freq_exact(dev, freq, true);
- if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE))
+ if (PTR_ERR(opp) == -ERANGE)
opp = dev_pm_opp_find_freq_exact(dev, freq, false);
+ if (IS_ERR(opp)) {
+ dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
+ freq, PTR_ERR(opp));
+ return 0;
+ }
+
voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
dev_pm_opp_put(opp);
if (voltage == 0) {
- dev_warn_ratelimited(dev,
- "Failed to get voltage for frequency %lu: %ld\n",
- freq, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+ dev_err_ratelimited(dev,
+ "Failed to get voltage for frequency %lu\n",
+ freq);
return 0;
}
by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
config SERIAL_8250_EXAR
- tristate "8250/16550 PCI device support"
- depends on SERIAL_8250_PCI
+ tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
+ depends on SERIAL_8250_PCI
default SERIAL_8250
+ help
+ This builds support for XR17C1xx, XR17V3xx and some Commtech
+ 422x PCIe serial cards that are not covered by the more generic
+ SERIAL_8250_PCI option.
config SERIAL_8250_HP300
tristate
uart_console_write(&dev->port, s, n, pl011_putc);
}
+/*
+ * On non-ACPI systems, earlycon is enabled by specifying
+ * "earlycon=pl011,<address>" on the kernel command line.
+ *
+ * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
+ * by specifying only "earlycon" on the command line. Because it requires
+ * SPCR, the console starts after ACPI is parsed, which is later than a
+ * traditional early console.
+ *
+ * To get the traditional early console that starts before ACPI is parsed,
+ * specify the full "earlycon=pl011,<address>" option.
+ */
static int __init pl011_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
- device->con->write = qdf2400_e44_present ?
- qdf2400_e44_early_write : pl011_early_write;
+ /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
+ * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
+ */
+ if (!strcmp(device->options, "qdf2400_e44"))
+ device->con->write = qdf2400_e44_early_write;
+ else
+ device->con->write = pl011_early_write;
+
return 0;
}
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
+EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
#else
#define AMBA_CONSOLE NULL
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
+ /*
+ * in uart_flush_buffer(), the xmit circular buffer has just
+ * been cleared, so we have to reset tx_len accordingly.
+ */
+ atmel_port->tx_len = 0;
}
/*
pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ /* Make sure that tx path is actually able to send characters */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
uart_console_write(port, s, count, atmel_console_putchar);
/*
AUART_LINECTRL_BAUD_DIV_MAX);
baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
- div = u->uartclk * 32 / baud;
+ div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
}
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
tty_ldisc_debug(tty, "%p: closed\n", ld);
}
+/**
+ * tty_ldisc_restore - helper for tty ldisc change
+ * @tty: tty to recover
+ * @old: previous ldisc
+ *
+ * Restore the previous line discipline or N_TTY when a line discipline
+ * change fails due to an open error
+ */
+
+static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+{
+ struct tty_ldisc *new_ldisc;
+ int r;
+
+ /* There is an outstanding reference here so this is safe */
+ old = tty_ldisc_get(tty, old->ops->num);
+ WARN_ON(IS_ERR(old));
+ tty->ldisc = old;
+ tty_set_termios_ldisc(tty, old->ops->num);
+ if (tty_ldisc_open(tty, old) < 0) {
+ tty_ldisc_put(old);
+ /* This driver is always present */
+ new_ldisc = tty_ldisc_get(tty, N_TTY);
+ if (IS_ERR(new_ldisc))
+ panic("n_tty: get");
+ tty->ldisc = new_ldisc;
+ tty_set_termios_ldisc(tty, N_TTY);
+ r = tty_ldisc_open(tty, new_ldisc);
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty), r);
+ }
+}
+
/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
- int retval, old_disc;
+ int retval;
+ struct tty_ldisc *old_ldisc, *new_ldisc;
+
+ new_ldisc = tty_ldisc_get(tty, disc);
+ if (IS_ERR(new_ldisc))
+ return PTR_ERR(new_ldisc);
tty_lock(tty);
retval = tty_ldisc_lock(tty, 5 * HZ);
}
/* Check the no-op case */
- old_disc = tty->ldisc->ops->num;
- if (old_disc == disc)
+ if (tty->ldisc->ops->num == disc)
goto out;
if (test_bit(TTY_HUPPED, &tty->flags)) {
goto out;
}
- retval = tty_ldisc_reinit(tty, disc);
+ old_ldisc = tty->ldisc;
+
+ /* Shutdown the old discipline. */
+ tty_ldisc_close(tty, old_ldisc);
+
+ /* Now set up the new line discipline. */
+ tty->ldisc = new_ldisc;
+ tty_set_termios_ldisc(tty, disc);
+
+ retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
/* Back to the old one or N_TTY if we can't */
- if (tty_ldisc_reinit(tty, old_disc) < 0) {
- pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
- if (tty_ldisc_reinit(tty, N_TTY) < 0) {
- /* At this point we have tty->ldisc == NULL. */
- pr_err("tty: reinitializing N_TTY failed\n");
- }
- }
+ tty_ldisc_put(new_ldisc);
+ tty_ldisc_restore(tty, old_ldisc);
}
- if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
- tty->ops->set_ldisc) {
+ if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
down_read(&tty->termios_rwsem);
tty->ops->set_ldisc(tty);
up_read(&tty->termios_rwsem);
}
+ /* At this point we hold a reference to the new ldisc and a
+ reference to the old ldisc, or we hold two references to
+ the old ldisc (if it was restored as part of error cleanup
+ above). In either case, releasing a single reference from
+ the old ldisc is correct. */
+ new_ldisc = old_ldisc;
out:
tty_ldisc_unlock(tty);
already running */
tty_buffer_restart_work(tty->port);
err:
+ tty_ldisc_put(new_ldisc); /* drop the extra reference */
tty_unlock(tty);
return retval;
}
int retval;
ld = tty_ldisc_get(tty, disc);
- if (IS_ERR(ld))
+ if (IS_ERR(ld)) {
+ BUG_ON(disc == N_TTY);
return PTR_ERR(ld);
+ }
if (tty->ldisc) {
tty_ldisc_close(tty, tty->ldisc);
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
+ if (!WARN_ON(disc == N_TTY)) {
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ }
}
return retval;
}
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
-#include <linux/sched/debug.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/mm.h>
*/
tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
tbuf = kzalloc(tbuf_size, GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ status = -ENOMEM;
+ goto err_alloc;
+ }
bufp = tbuf;
}
kfree(tbuf);
+ err_alloc:
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
usb_ep_free_request(fu->ep_out, fu->cmd.req);
- usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+ usb_ep_free_request(fu->ep_in, fu->bot_status.req);
kfree(fu->cmd.buf);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
case TRB_NORMAL:
td->urb->actual_length = requested - remaining;
goto finish_td;
+ case TRB_STATUS:
+ td->urb->actual_length = requested;
+ goto finish_td;
default:
xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
trb_type);
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct xhci_command *command;
+ struct xhci_virt_device *vdev;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (ret || !urb->hcpriv)
+ if (ret)
goto done;
+
+ /* give back URB now if we can't queue it for cancel */
+ vdev = xhci->devs[urb->dev->slot_id];
+ urb_priv = urb->hcpriv;
+ if (!vdev || !urb_priv)
+ goto err_giveback;
+
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &vdev->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep || !ep_ring)
+ goto err_giveback;
+
temp = readl(&xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HW died, freeing TD.");
- urb_priv = urb->hcpriv;
for (i = urb_priv->num_tds_done;
- i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id];
+ i < urb_priv->num_tds;
i++) {
td = &urb_priv->td[i];
if (!list_empty(&td->td_list))
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
-
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
- xhci_urb_free_priv(urb_priv);
- return ret;
+ goto err_giveback;
}
- ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring) {
- ret = -EINVAL;
- goto done;
- }
-
- urb_priv = urb->hcpriv;
i = urb_priv->num_tds_done;
if (i < urb_priv->num_tds)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
+
+err_giveback:
+ if (urb_priv)
+ xhci_urb_free_priv(urb_priv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
+ return ret;
}
/* Drop an endpoint from a new bandwidth configuration for this device.
static struct i2c_driver isp1301_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(isp1301_of_match),
+ .of_match_table = isp1301_of_match,
},
.probe = isp1301_probe,
.remove = isp1301_remove,
#include <linux/efi.h>
#include <linux/errno.h>
#include <linux/fb.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <video/vga.h>
};
ATTRIBUTE_GROUPS(efifb);
+static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
+
static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
unsigned int size_total;
char *option = NULL;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
if (fb_get_options("efifb", &option))
};
builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+ u16 word;
+
+ pci_bar_found = true;
+
+ pci_read_config_word(dev, PCI_COMMAND, &word);
+ if (!(word & PCI_COMMAND_MEMORY)) {
+ pci_dev_disabled = true;
+ dev_err(&dev->dev,
+ "BAR %d: assigned to efifb but device is disabled!\n",
+ idx);
+ return;
+ }
+
+ if (pci_claim_resource(dev, idx)) {
+ pci_dev_disabled = true;
+ dev_err(&dev->dev,
+ "BAR %d: failed to claim resource for efifb!\n", idx);
+ return;
+ }
+
+ dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
+ int i;
+
+ if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)screen_info.ext_lfb_base << 32;
+
+ if (!base)
+ return;
+
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ struct resource *res = &dev->resource[i];
+
+ if (!(res->flags & IORESOURCE_MEM))
+ continue;
+
+ if (res->start <= base && res->end >= base + size - 1) {
+ claim_efifb_bar(dev, i);
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+ 16, efifb_fixup_resources);
+
+#endif
return 0;
}
-static void check_required_callbacks(struct omapfb_device *fbdev)
-{
-#define _C(x) (fbdev->ctrl->x != NULL)
-#define _P(x) (fbdev->panel->x != NULL)
- BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
- BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
- _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
- _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
- _P(get_caps)));
-#undef _P
-#undef _C
-}
-
/*
* Called by LDM binding to probe and attach a new device.
* Initialization sequence:
omapfb_ops.fb_mmap = omapfb_mmap;
init_state++;
- check_required_callbacks(fbdev);
-
r = planes_init(fbdev);
if (r)
goto cleanup;
par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
if (IS_ERR(par->vbat_reg)) {
- dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
- PTR_ERR(par->vbat_reg));
ret = PTR_ERR(par->vbat_reg);
- goto fb_alloc_error;
+ if (ret == -ENODEV) {
+ par->vbat_reg = NULL;
+ } else {
+ dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
+ ret);
+ goto fb_alloc_error;
+ }
}
if (of_property_read_u32(node, "solomon,width", &par->width))
udelay(4);
}
- ret = regulator_enable(par->vbat_reg);
- if (ret) {
- dev_err(&client->dev, "failed to enable VBAT: %d\n", ret);
- goto reset_oled_error;
+ if (par->vbat_reg) {
+ ret = regulator_enable(par->vbat_reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable VBAT: %d\n",
+ ret);
+ goto reset_oled_error;
+ }
}
ret = ssd1307fb_init(par);
pwm_put(par->pwm);
};
regulator_enable_error:
- regulator_disable(par->vbat_reg);
+ if (par->vbat_reg)
+ regulator_disable(par->vbat_reg);
reset_oled_error:
fb_deferred_io_cleanup(info);
fb_alloc_error:
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_read_unsigned(info->xbdev->otherend,
"request-update", 0))
if (device_features & (1ULL << i))
__virtio_set_bit(dev, i);
+ if (drv->validate) {
+ err = drv->validate(dev);
+ if (err)
+ goto err;
+ }
+
err = virtio_finalize_features(dev);
if (err)
goto err;
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
-static void update_balloon_stats(struct virtio_balloon *vb)
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
struct sysinfo i;
- int idx = 0;
+ unsigned int idx = 0;
long available;
all_vm_events(events);
available = si_mem_available();
+#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
pages_to_bytes(i.totalram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
pages_to_bytes(available));
+
+ return idx;
}
/*
{
struct virtqueue *vq;
struct scatterlist sg;
- unsigned int len;
+ unsigned int len, num_stats;
- update_balloon_stats(vb);
+ num_stats = update_balloon_stats(vb);
vq = vb->stats_vq;
if (!virtqueue_get_buf(vq, &len))
return;
- sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
vb->deflate_vq = vqs[1];
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
struct scatterlist sg;
+ unsigned int num_stats;
vb->stats_vq = vqs[2];
/*
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later (it can't be broken yet!).
*/
- sg_init_one(&sg, vb->stats, sizeof vb->stats);
+ num_stats = update_balloon_stats(vb);
+
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
BUG();
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
- synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0));
- for (i = 1; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->intx_enabled)
+ synchronize_irq(vp_dev->pci_dev->irq);
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
+ struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE;
- struct virtqueue *vq;
+ unsigned long flags;
- list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
- if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->virtqueues, node) {
+ if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
return ret;
}
return vp_vring_interrupt(irq, opaque);
}
-static void vp_remove_vqs(struct virtio_device *vdev)
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+ bool per_vq_vectors, struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtqueue *vq, *n;
+ const char *name = dev_name(&vp_dev->vdev.dev);
+ unsigned i, v;
+ int err = -ENOMEM;
- list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->msix_vector_map) {
- int v = vp_dev->msix_vector_map[vq->index];
+ vp_dev->msix_vectors = nvectors;
- if (v != VIRTIO_MSI_NO_VECTOR)
- free_irq(pci_irq_vector(vp_dev->pci_dev, v),
- vq);
- }
- vp_dev->del_vq(vq);
+ vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+ GFP_KERNEL);
+ if (!vp_dev->msix_names)
+ goto error;
+ vp_dev->msix_affinity_masks
+ = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+ GFP_KERNEL);
+ if (!vp_dev->msix_affinity_masks)
+ goto error;
+ for (i = 0; i < nvectors; ++i)
+ if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+ GFP_KERNEL))
+ goto error;
+
+ err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+ nvectors, PCI_IRQ_MSIX |
+ (desc ? PCI_IRQ_AFFINITY : 0),
+ desc);
+ if (err < 0)
+ goto error;
+ vp_dev->msix_enabled = 1;
+
+ /* Set the vector used for configuration */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-config", name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+ vp_config_changed, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error;
+ ++vp_dev->msix_used_vectors;
+
+ v = vp_dev->config_vector(vp_dev, v);
+ /* Verify we had enough resources to assign the vector */
+ if (v == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto error;
}
+
+ if (!per_vq_vectors) {
+ /* Shared vector for all VQs */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-virtqueues", name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+ vp_vring_interrupt, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error;
+ ++vp_dev->msix_used_vectors;
+ }
+ return 0;
+error:
+ return err;
+}
+
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+ struct virtqueue *vq;
+ unsigned long flags;
+
+ /* fill out our structure that represents an active queue */
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
+ msix_vec);
+ if (IS_ERR(vq))
+ goto out_info;
+
+ info->vq = vq;
+ if (callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+ vp_dev->vqs[index] = info;
+ return vq;
+
+out_info:
+ kfree(info);
+ return vq;
+}
+
+static void vp_del_vq(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ vp_dev->del_vq(info);
+ kfree(info);
}
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq, *n;
int i;
- if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
- return;
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ if (vp_dev->per_vq_vectors) {
+ int v = vp_dev->vqs[vq->index]->msix_vector;
- vp_remove_vqs(vdev);
+ if (v != VIRTIO_MSI_NO_VECTOR) {
+ int irq = pci_irq_vector(vp_dev->pci_dev, v);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, vq);
+ }
+ }
+ vp_del_vq(vq);
+ }
+ vp_dev->per_vq_vectors = false;
+
+ if (vp_dev->intx_enabled) {
+ free_irq(vp_dev->pci_dev->irq, vp_dev);
+ vp_dev->intx_enabled = 0;
+ }
- if (vp_dev->pci_dev->msix_enabled) {
- for (i = 0; i < vp_dev->msix_vectors; i++)
+ for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+ free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
- kfree(vp_dev->msix_affinity_masks);
- kfree(vp_dev->msix_names);
- kfree(vp_dev->msix_vector_map);
+ pci_free_irq_vectors(vp_dev->pci_dev);
+ vp_dev->msix_enabled = 0;
}
- free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
- pci_free_irq_vectors(vp_dev->pci_dev);
+ vp_dev->msix_vectors = 0;
+ vp_dev->msix_used_vectors = 0;
+ kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
+ kfree(vp_dev->msix_affinity_masks);
+ vp_dev->msix_affinity_masks = NULL;
+ kfree(vp_dev->vqs);
+ vp_dev->vqs = NULL;
}
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], struct irq_affinity *desc)
+ const char * const names[], bool per_vq_vectors,
+ struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- const char *name = dev_name(&vp_dev->vdev.dev);
- int i, err = -ENOMEM, allocated_vectors, nvectors;
- unsigned flags = PCI_IRQ_MSIX;
- bool shared = false;
u16 msix_vec;
+ int i, err, nvectors, allocated_vectors;
- if (desc) {
- flags |= PCI_IRQ_AFFINITY;
- desc->pre_vectors++; /* virtio config vector */
- }
-
- nvectors = 1;
- for (i = 0; i < nvqs; i++)
- if (callbacks[i])
- nvectors++;
-
- /* Try one vector per queue first. */
- err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
- nvectors, flags, desc);
- if (err < 0) {
- /* Fallback to one vector for config, one shared for queues. */
- shared = true;
- err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
- PCI_IRQ_MSIX);
- if (err < 0)
- return err;
- }
- if (err < 0)
- return err;
-
- vp_dev->msix_vectors = nvectors;
- vp_dev->msix_names = kmalloc_array(nvectors,
- sizeof(*vp_dev->msix_names), GFP_KERNEL);
- if (!vp_dev->msix_names)
- goto out_free_irq_vectors;
-
- vp_dev->msix_affinity_masks = kcalloc(nvectors,
- sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
- if (!vp_dev->msix_affinity_masks)
- goto out_free_msix_names;
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+ if (!vp_dev->vqs)
+ return -ENOMEM;
- for (i = 0; i < nvectors; ++i) {
- if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
- GFP_KERNEL))
- goto out_free_msix_affinity_masks;
+ if (per_vq_vectors) {
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++nvectors;
+ } else {
+ /* Second best: one for change, shared for all vqs. */
+ nvectors = 2;
}
- /* Set the vector used for configuration */
- snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names),
- "%s-config", name);
- err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
- 0, vp_dev->msix_names[0], vp_dev);
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
+ per_vq_vectors ? desc : NULL);
if (err)
- goto out_free_msix_affinity_masks;
+ goto error_find;
- /* Verify we had enough resources to assign the vector */
- if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto out_free_config_irq;
- }
-
- vp_dev->msix_vector_map = kmalloc_array(nvqs,
- sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
- if (!vp_dev->msix_vector_map)
- goto out_disable_config_irq;
-
- allocated_vectors = 1; /* vector 0 is the config interrupt */
+ vp_dev->per_vq_vectors = per_vq_vectors;
+ allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
- if (callbacks[i])
- msix_vec = allocated_vectors;
- else
+ if (!callbacks[i])
msix_vec = VIRTIO_MSI_NO_VECTOR;
-
- vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
- msix_vec);
+ else if (vp_dev->per_vq_vectors)
+ msix_vec = allocated_vectors++;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ msix_vec);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
- goto out_remove_vqs;
+ goto error_find;
}
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
+ if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
continue;
- }
- snprintf(vp_dev->msix_names[i + 1],
- sizeof(*vp_dev->msix_names), "%s-%s",
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec],
+ sizeof *vp_dev->msix_names,
+ "%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, IRQF_SHARED,
- vp_dev->msix_names[i + 1], vqs[i]);
- if (err) {
- /* don't free this irq on error */
- vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
- goto out_remove_vqs;
- }
- vp_dev->msix_vector_map[i] = msix_vec;
-
- /*
- * Use a different vector for each queue if they are available,
- * else share the same vector for all VQs.
- */
- if (!shared)
- allocated_vectors++;
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec],
+ vqs[i]);
+ if (err)
+ goto error_find;
}
-
return 0;
-out_remove_vqs:
- vp_remove_vqs(vdev);
- kfree(vp_dev->msix_vector_map);
-out_disable_config_irq:
- vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-out_free_config_irq:
- free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-out_free_msix_affinity_masks:
- for (i = 0; i < nvectors; i++) {
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
- }
- kfree(vp_dev->msix_affinity_masks);
-out_free_msix_names:
- kfree(vp_dev->msix_names);
-out_free_irq_vectors:
- pci_free_irq_vectors(vp_dev->pci_dev);
+error_find:
+ vp_del_vqs(vdev);
return err;
}
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err;
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+ if (!vp_dev->vqs)
+ return -ENOMEM;
+
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
- return err;
+ goto out_del_vqs;
+ vp_dev->intx_enabled = 1;
+ vp_dev->per_vq_vectors = false;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
- VIRTIO_MSI_NO_VECTOR);
+ vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
- goto out_remove_vqs;
+ goto out_del_vqs;
}
}
return 0;
-
-out_remove_vqs:
- vp_remove_vqs(vdev);
- free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
+out_del_vqs:
+ vp_del_vqs(vdev);
return err;
}
{
int err;
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc);
+ /* Try MSI-X with one vector per queue. */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
if (!err)
return 0;
+ /* Fallback: MSI-X with one vector for config, one shared for queues. */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
+ if (!err)
+ return 0;
+ /* Finally fall back to regular interrupts. */
return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
}
{
struct virtio_device *vdev = vq->vdev;
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+ struct cpumask *mask;
+ unsigned int irq;
if (!vq->callback)
return -EINVAL;
- if (vp_dev->pci_dev->msix_enabled) {
- int vec = vp_dev->msix_vector_map[vq->index];
- struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
- unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
-
+ if (vp_dev->msix_enabled) {
+ mask = vp_dev->msix_affinity_masks[info->msix_vector];
+ irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- unsigned int *map = vp_dev->msix_vector_map;
- if (!map || map[index] == VIRTIO_MSI_NO_VECTOR)
+ if (!vp_dev->per_vq_vectors ||
+ vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
return NULL;
- return pci_irq_get_affinity(vp_dev->pci_dev, map[index]);
+ return pci_irq_get_affinity(vp_dev->pci_dev,
+ vp_dev->vqs[index]->msix_vector);
}
#ifdef CONFIG_PM_SLEEP
vp_dev->vdev.dev.parent = &pci_dev->dev;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
+ INIT_LIST_HEAD(&vp_dev->virtqueues);
+ spin_lock_init(&vp_dev->lock);
/* enable the device */
rc = pci_enable_device(pci_dev);
#include <linux/highmem.h>
#include <linux/spinlock.h>
+struct virtio_pci_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+
+ /* MSI-X vector (or none) */
+ unsigned msix_vector;
+};
+
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev;
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
+ /* a list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+
+ /* array of all queues for house-keeping */
+ struct virtio_pci_vq_info **vqs;
+
+ /* MSI-X support */
+ int msix_enabled;
+ int intx_enabled;
cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
- /* Total Number of MSI-X vectors (including per-VQ ones). */
- int msix_vectors;
- /* Map of per-VQ MSI-X vectors, may be NULL */
- unsigned *msix_vector_map;
+ /* Number of available vectors */
+ unsigned msix_vectors;
+ /* Vectors allocated, excluding per-vq vectors if any */
+ unsigned msix_used_vectors;
+
+ /* Whether we have vector per vq */
+ bool per_vq_vectors;
struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
+ struct virtio_pci_vq_info *info,
unsigned idx,
void (*callback)(struct virtqueue *vq),
const char *name,
u16 msix_vec);
- void (*del_vq)(struct virtqueue *vq);
+ void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
};
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+ VP_MSIX_CONFIG_VECTOR = 0,
+ VP_MSIX_VQ_VECTOR = 1,
+};
+
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ struct virtio_pci_vq_info *info,
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name,
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT);
+ info->msix_vector = msix_vec;
+
/* create the vring */
vq = vring_create_virtqueue(index, num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
return ERR_PTR(err);
}
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
{
+ struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
- if (vp_dev->pci_dev->msix_enabled) {
+ if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
/* Flush the write out to device */
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ struct virtio_pci_vq_info *info,
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name,
/* get offset of notification word for this vq */
off = vp_ioread16(&cfg->queue_notify_off);
+ info->msix_vector = msix_vec;
+
/* create the vring */
vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
return 0;
}
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
{
+ struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
vp_iowrite16(vq->index, &vp_dev->common->queue_select);
- if (vp_dev->pci_dev->msix_enabled) {
+ if (vp_dev->msix_enabled) {
vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
&vp_dev->common->queue_msix_vector);
/* Flush the write out to device */
return xenbus_command_reply(u, XS_ERROR, "ENOENT");
rc = xenbus_dev_request_and_reply(&u->u.msg, u);
- if (rc)
+ if (rc && trans) {
+ list_del(&trans->list);
kfree(trans);
+ }
out:
return rc;
atomic_t will_be_snapshoted;
/* For qgroup metadata space reserve */
- atomic_t qgroup_meta_rsv;
+ atomic64_t qgroup_meta_rsv;
};
static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
{
atomic_set(&root->orphan_inodes, 0);
atomic_set(&root->refs, 1);
atomic_set(&root->will_be_snapshoted, 0);
- atomic_set(&root->qgroup_meta_rsv, 0);
+ atomic64_set(&root->qgroup_meta_rsv, 0);
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
if (tree->ops) {
ret = tree->ops->readpage_io_failed_hook(page, mirror);
- if (!ret && !bio->bi_error)
- uptodate = 1;
- } else {
+ if (ret == -EAGAIN) {
+ /*
+ * Data inode's readpage_io_failed_hook() always
+ * returns -EAGAIN.
+ *
+ * The generic bio_readpage_error handles errors
+ * the following way: If possible, new read
+ * requests are created and submitted and will
+ * end up in end_bio_extent_readpage as well (if
+ * we're lucky, not in the !uptodate case). In
+ * that case it returns 0 and we just go on with
+ * the next page in our bio. If it can't handle
+ * the error it will return -EIO and we remain
+ * responsible for that page.
+ */
+ ret = bio_readpage_error(bio, offset, page,
+ start, end, mirror);
+ if (ret == 0) {
+ uptodate = !bio->bi_error;
+ offset += len;
+ continue;
+ }
+ }
+
/*
- * The generic bio_readpage_error handles errors the
- * following way: If possible, new read requests are
- * created and submitted and will end up in
- * end_bio_extent_readpage as well (if we're lucky, not
- * in the !uptodate case). In that case it returns 0 and
- * we just go on with the next page in our bio. If it
- * can't handle the error it will return -EIO and we
- * remain responsible for that page.
+ * metadata's readpage_io_failed_hook() always returns
+ * -EIO and fixes nothing. -EIO is also returned if
+ * data inode error could not be fixed.
*/
- ret = bio_readpage_error(bio, offset, page, start, end,
- mirror);
- if (ret == 0) {
- uptodate = !bio->bi_error;
- offset += len;
- continue;
- }
+ ASSERT(ret == -EIO);
}
readpage_ok:
if (likely(uptodate)) {
static void btrfs_retry_endio_nocsum(struct bio *bio)
{
struct btrfs_retry_complete *done = bio->bi_private;
- struct inode *inode;
struct bio_vec *bvec;
int i;
goto end;
ASSERT(bio->bi_vcnt == 1);
- inode = bio->bi_io_vec->bv_page->mapping->host;
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+ ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
done->uptodate = 1;
bio_for_each_segment_all(bvec, bio, i)
- clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0);
+ clean_io_failure(BTRFS_I(done->inode), done->start,
+ bvec->bv_page, 0);
end:
complete(&done->done);
bio_put(bio);
start += sectorsize;
- if (nr_sectors--) {
+ nr_sectors--;
+ if (nr_sectors) {
pgoff += sectorsize;
+ ASSERT(pgoff < PAGE_SIZE);
goto next_block_or_try_again;
}
}
{
struct btrfs_retry_complete *done = bio->bi_private;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
- struct inode *inode;
struct bio_vec *bvec;
- u64 start;
int uptodate;
int ret;
int i;
uptodate = 1;
- start = done->start;
-
ASSERT(bio->bi_vcnt == 1);
- inode = bio->bi_io_vec->bv_page->mapping->host;
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+ ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
bio_for_each_segment_all(bvec, bio, i) {
ret = __readpage_endio_check(done->inode, io_bio, i,
ASSERT(nr_sectors);
- if (--nr_sectors) {
+ nr_sectors--;
+ if (nr_sectors) {
pgoff += sectorsize;
+ ASSERT(pgoff < PAGE_SIZE);
goto next_block;
}
}
}
__attribute__((const))
-static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror)
+static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
{
- return 0;
+ return -EAGAIN;
}
static const struct inode_operations btrfs_dir_inode_operations = {
.submit_bio_hook = btrfs_submit_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
.merge_bio_hook = btrfs_merge_bio_hook,
- .readpage_io_failed_hook = dummy_readpage_io_failed_hook,
+ .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
/* optional callbacks */
.fill_delalloc = run_delalloc_range,
ret = qgroup_reserve(root, num_bytes, enforce);
if (ret < 0)
return ret;
- atomic_add(num_bytes, &root->qgroup_meta_rsv);
+ atomic64_add(num_bytes, &root->qgroup_meta_rsv);
return ret;
}
void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int reserved;
+ u64 reserved;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
- reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
+ reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
if (reserved == 0)
return;
btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
return;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
- atomic_sub(num_bytes, &root->qgroup_meta_rsv);
+ WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
+ atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
}
goto out;
}
+ /*
+ * Check that we don't overflow at later allocations, we request
+ * clone_sources_count + 1 items, and compare to unsigned long inside
+ * access_ok.
+ */
if (arg->clone_sources_count >
- ULLONG_MAX / sizeof(*arg->clone_sources)) {
+ ULONG_MAX / sizeof(struct clone_root) - 1) {
ret = -EINVAL;
goto out;
}
case Opt_ssd:
btrfs_set_and_info(info, SSD,
"use ssd allocation scheme");
+ btrfs_clear_opt(info->mount_opt, NOSSD);
break;
case Opt_ssd_spread:
btrfs_set_and_info(info, SSD_SPREAD,
"use spread ssd allocation scheme");
btrfs_set_opt(info->mount_opt, SSD);
+ btrfs_clear_opt(info->mount_opt, NOSSD);
break;
case Opt_nossd:
btrfs_set_and_info(info, NOSSD,
"not using ssd allocation scheme");
btrfs_clear_opt(info->mount_opt, SSD);
+ btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
break;
case Opt_barrier:
btrfs_clear_and_info(info, NOBARRIER,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev ||
- (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
+ (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
bbio_error(bbio, first_bio, logical);
continue;
}
return rc;
}
+ssize_t cifs_file_copychunk_range(unsigned int xid,
+ struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags)
+{
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
+ struct cifsFileInfo *smb_file_src;
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *src_tcon;
+ struct cifs_tcon *target_tcon;
+ ssize_t rc;
+
+ cifs_dbg(FYI, "copychunk range\n");
+
+ if (src_inode == target_inode) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!src_file->private_data || !dst_file->private_data) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ goto out;
+ }
+
+ rc = -EXDEV;
+ smb_file_target = dst_file->private_data;
+ smb_file_src = src_file->private_data;
+ src_tcon = tlink_tcon(smb_file_src->tlink);
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ if (src_tcon->ses != target_tcon->ses) {
+ cifs_dbg(VFS, "source and target of copy not on same server\n");
+ goto out;
+ }
+
+ /*
+ * Note: cifs case is easier than btrfs since server responsible for
+ * checks for proper open modes and file type and if it wants
+ * server could even support copy of range where source = target
+ */
+ lock_two_nondirectories(target_inode, src_inode);
+
+ cifs_dbg(FYI, "about to flush pages\n");
+ /* should we flush first and last page first */
+ truncate_inode_pages(&target_inode->i_data, 0);
+
+ if (target_tcon->ses->server->ops->copychunk_range)
+ rc = target_tcon->ses->server->ops->copychunk_range(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
+ else
+ rc = -EOPNOTSUPP;
+
+ /* force revalidate of size and timestamps of target file now
+ * that target is updated on the server
+ */
+ CIFS_I(target_inode)->time = 0;
+ /* although unlocking in the reverse order from locking is not
+ * strictly necessary here it is a little cleaner to be consistent
+ */
+ unlock_two_nondirectories(src_inode, target_inode);
+
+out:
+ return rc;
+}
+
+static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags)
+{
+ unsigned int xid = get_xid();
+ ssize_t rc;
+
+ rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
+ len, flags);
+ free_xid(xid);
+ return rc;
+}
+
const struct file_operations cifs_file_ops = {
.read_iter = cifs_loose_read_iter,
.write_iter = cifs_file_write_iter,
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
.release = cifs_closedir,
.read = generic_read_dir,
.unlocked_ioctl = cifs_ioctl,
+ .copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
.llseek = generic_file_llseek,
};
# define cifs_listxattr NULL
#endif
+extern ssize_t cifs_file_copychunk_range(unsigned int xid,
+ struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff,
+ size_t len, unsigned int flags);
+
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops;
/* verify the message */
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *,
struct cifsInodeInfo *, bool);
/* process transaction2 response */
char * (*create_lease_buf)(u8 *, u8);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *, unsigned int *);
- int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
- struct cifsFileInfo *target_file, u64 src_off, u64 len,
- u64 dest_off);
+ ssize_t (*copychunk_range)(const unsigned int,
+ struct cifsFileInfo *src_file,
+ struct cifsFileInfo *target_file,
+ u64 src_off, u64 len, u64 dest_off);
int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
struct cifsFileInfo *target_file, u64 src_off, u64 len,
u64 dest_off);
bool use_persistent:1; /* use persistent instead of durable handles */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
void *callback_data; /* general purpose pointer for callback */
void *resp_buf; /* pointer to received SMB header */
int mid_state; /* wish this were enum but can not pass to wait_event */
+ unsigned int mid_flags;
__le16 command; /* smb command code */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool decrypted:1; /* decrypted entry */
};
+struct close_cancelled_open {
+ struct cifs_fid fid;
+ struct cifs_tcon *tcon;
+ struct work_struct work;
+};
+
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
#ifdef CONFIG_CIFS_STATS2
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
+/* Flags */
+#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
+
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
#define CIFS_SMALL_BUFFER 1
length = cifs_discard_remaining_data(server);
dequeue_mid(mid, rdata->result);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
return length;
}
return cifs_readv_discard(server, mid);
dequeue_mid(mid, false);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
return length;
}
server->lstrp = jiffies;
if (mid_entry != NULL) {
+ if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+ mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(
+ mid_entry->resp_buf,
+ server);
+
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!server->ops->is_oplock_break ||
- !server->ops->is_oplock_break(buf, server)) {
+ } else if (server->ops->is_oplock_break &&
+ server->ops->is_oplock_break(buf, server)) {
+ cifs_dbg(FYI, "Received oplock break\n");
+ } else {
cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf,
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
+ if (rc == -EACCES)
+ goto mount_fail_check;
+
goto remote_path_check;
}
wdata->credits = credits;
if (!wdata->cfile->invalidHandle ||
- !cifs_reopen_file(wdata->cfile, false))
+ !(rc = cifs_reopen_file(wdata->cfile, false)))
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
if (rc) {
rdata->credits = credits;
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
error:
if (rc) {
}
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
if (rc) {
add_credits_and_wake_if(server, rdata->credits, 0);
#include "cifs_ioctl.h"
#include <linux/btrfs.h>
-static int cifs_file_clone_range(unsigned int xid, struct file *src_file,
- struct file *dst_file)
-{
- struct inode *src_inode = file_inode(src_file);
- struct inode *target_inode = file_inode(dst_file);
- struct cifsFileInfo *smb_file_src;
- struct cifsFileInfo *smb_file_target;
- struct cifs_tcon *src_tcon;
- struct cifs_tcon *target_tcon;
- int rc;
-
- cifs_dbg(FYI, "ioctl clone range\n");
-
- if (!src_file->private_data || !dst_file->private_data) {
- rc = -EBADF;
- cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
- goto out;
- }
-
- rc = -EXDEV;
- smb_file_target = dst_file->private_data;
- smb_file_src = src_file->private_data;
- src_tcon = tlink_tcon(smb_file_src->tlink);
- target_tcon = tlink_tcon(smb_file_target->tlink);
-
- if (src_tcon->ses != target_tcon->ses) {
- cifs_dbg(VFS, "source and target of copy not on same server\n");
- goto out;
- }
-
- /*
- * Note: cifs case is easier than btrfs since server responsible for
- * checks for proper open modes and file type and if it wants
- * server could even support copy of range where source = target
- */
- lock_two_nondirectories(target_inode, src_inode);
-
- cifs_dbg(FYI, "about to flush pages\n");
- /* should we flush first and last page first */
- truncate_inode_pages(&target_inode->i_data, 0);
-
- if (target_tcon->ses->server->ops->clone_range)
- rc = target_tcon->ses->server->ops->clone_range(xid,
- smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
- else
- rc = -EOPNOTSUPP;
-
- /* force revalidate of size and timestamps of target file now
- that target is updated on the server */
- CIFS_I(target_inode)->time = 0;
- /* although unlocking in the reverse order from locking is not
- strictly necessary here it is a little cleaner to be consistent */
- unlock_two_nondirectories(src_inode, target_inode);
-out:
- return rc;
-}
-
-static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
unsigned long srcfd)
{
int rc;
struct fd src_file;
struct inode *src_inode;
- cifs_dbg(FYI, "ioctl clone range\n");
+ cifs_dbg(FYI, "ioctl copychunk range\n");
/* the destination must be opened for writing */
if (!(dst_file->f_mode & FMODE_WRITE)) {
cifs_dbg(FYI, "file target not open for write\n");
if (S_ISDIR(src_inode->i_mode))
goto out_fput;
- rc = cifs_file_clone_range(xid, src_file.file, dst_file);
+ rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+ src_inode->i_size, 0);
out_fput:
fdput(src_file);
}
break;
case CIFS_IOC_COPYCHUNK_FILE:
- rc = cifs_ioctl_clone(xid, filep, arg);
+ rc = cifs_ioctl_copychunk(xid, filep, arg);
break;
case CIFS_IOC_SET_INTEGRITY:
if (pSMBFile == NULL)
cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
return false;
}
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+ struct close_cancelled_open *cancelled = container_of(work,
+ struct close_cancelled_open, work);
+
+ cifs_dbg(VFS, "Close unmatched open\n");
+
+ SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+ cancelled->fid.volatile_fid);
+ cifs_put_tcon(cancelled->tcon);
+ kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
+ struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+ struct cifs_tcon *tcon;
+ struct close_cancelled_open *cancelled;
+
+ if (sync_hdr->Command != SMB2_CREATE ||
+ sync_hdr->Status != STATUS_SUCCESS)
+ return 0;
+
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+ tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
+ sync_hdr->TreeId);
+ if (!tcon) {
+ kfree(cancelled);
+ return -ENOENT;
+ }
+
+ cancelled->fid.persistent_fid = rsp->PersistentFileId;
+ cancelled->fid.volatile_fid = rsp->VolatileFileId;
+ cancelled->tcon = tcon;
+ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+ queue_work(cifsiod_wq, &cancelled->work);
+
+ return 0;
+}
#include <linux/vfs.h>
#include <linux/falloc.h>
#include <linux/scatterlist.h>
+#include <linux/uuid.h>
#include <crypto/aead.h>
#include "cifsglob.h"
#include "smb2pdu.h"
return rc;
}
-static int
-smb2_clone_range(const unsigned int xid,
+static ssize_t
+smb2_copychunk_range(const unsigned int xid,
struct cifsFileInfo *srcfile,
struct cifsFileInfo *trgtfile, u64 src_off,
u64 len, u64 dest_off)
struct cifs_tcon *tcon;
int chunks_copied = 0;
bool chunk_sizes_updated = false;
+ ssize_t bytes_written, total_bytes_written = 0;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
if (pcchunk == NULL)
return -ENOMEM;
- cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+ cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
/* Request a key from the server to identify the source of the copy */
rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
srcfile->fid.persistent_fid,
}
chunks_copied++;
- src_off += le32_to_cpu(retbuf->TotalBytesWritten);
- dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
- len -= le32_to_cpu(retbuf->TotalBytesWritten);
+ bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
+ src_off += bytes_written;
+ dest_off += bytes_written;
+ len -= bytes_written;
+ total_bytes_written += bytes_written;
- cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
+ cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
le32_to_cpu(retbuf->ChunksWritten),
le32_to_cpu(retbuf->ChunkBytesWritten),
- le32_to_cpu(retbuf->TotalBytesWritten));
+ bytes_written);
} else if (rc == -EINVAL) {
if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
goto cchunk_out;
cchunk_out:
kfree(pcchunk);
kfree(retbuf);
- return rc;
+ if (rc)
+ return rc;
+ else
+ return total_bytes_written;
}
static int
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.set_oplock_level = smb2_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.get_dfs_refer = smb2_get_dfs_refer,
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.set_oplock_level = smb21_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.enum_snapshots = smb3_enum_snapshots,
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.set_oplock_level = smb3_set_oplock_level,
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.duplicate_extents = smb2_duplicate_extents,
.validate_negotiate = smb3_validate_negotiate,
.wp_retry_size = smb2_wp_retry_size,
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.set_oplock_level = smb3_set_oplock_level,
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
- .clone_range = smb2_clone_range,
+ .copychunk_range = smb2_copychunk_range,
.duplicate_extents = smb2_duplicate_extents,
/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
.wp_retry_size = smb2_wp_retry_size,
* but for time being this is our only auth choice so doesn't matter.
* We just found a server which sets blob length to zero expecting raw.
*/
- if (blob_length == 0)
+ if (blob_length == 0) {
cifs_dbg(FYI, "missing security blob on negprot\n");
+ server->sec_ntlmssp = true;
+ }
rc = cifs_enable_signing(server, ses->sign);
if (rc)
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
return -EINVAL;
}
+ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+ if (tcon)
+ tcon->tid = 0;
+
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
tcon_error_exit:
if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
struct cifs_tcon *tcon, *tcon2;
struct list_head tmp_list;
int tcon_exist = false;
+ int rc;
+ int resched = false;
+
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
mutex_lock(&server->reconnect_mutex);
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
- if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+ if (!rc)
cifs_reopen_persistent_handles(tcon);
+ else
+ resched = true;
list_del_init(&tcon->rlist);
cifs_put_tcon(tcon);
}
cifs_dbg(FYI, "Reconnecting tcons finished\n");
+ if (resched)
+ queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
mutex_unlock(&server->reconnect_mutex);
/* now we can safely release srv struct */
struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+ __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ __u64 ses_id, __u32 tid);
extern int smb2_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
extern int smb3_calc_signature(struct smb_rqst *rqst,
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+ struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
return 0;
}
-struct cifs_ses *
-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+static struct cifs_ses *
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
{
struct cifs_ses *ses;
- spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid != ses_id)
continue;
- spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
+
+ return NULL;
+}
+
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+ struct cifs_ses *ses;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
spin_unlock(&cifs_tcp_ses_lock);
+ return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
+{
+ struct cifs_tcon *tcon;
+
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid != tid)
+ continue;
+ ++tcon->tc_count;
+ return tcon;
+ }
+
return NULL;
}
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
+ if (!ses) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return NULL;
+ }
+ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return tcon;
+}
+
int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
+ cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
send_cancel(ses->server, rqst, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ midQ->mid_flags |= MID_WAIT_CANCELLED;
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
add_credits(ses->server, 1, optype);
}
spin_lock_irq(&mapping->tree_lock);
+ if (!entry) {
+ /*
+ * We needed to drop the page_tree lock while calling
+ * radix_tree_preload() and we didn't have an entry to
+ * lock. See if another thread inserted an entry at
+ * our index during this time.
+ */
+ entry = __radix_tree_lookup(&mapping->page_tree, index,
+ NULL, &slot);
+ if (entry) {
+ radix_tree_preload_end();
+ spin_unlock_irq(&mapping->tree_lock);
+ goto restart;
+ }
+ }
+
if (pmd_downgrade) {
radix_tree_delete(&mapping->page_tree, index);
mapping->nrexceptional--;
if (err) {
spin_unlock_irq(&mapping->tree_lock);
/*
- * Someone already created the entry? This is a
- * normal failure when inserting PMDs in a range
- * that already contains PTEs. In that case we want
- * to return -EEXIST immediately.
- */
- if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
- goto restart;
- /*
- * Our insertion of a DAX PMD entry failed, most
- * likely because it collided with a PTE sized entry
- * at a different index in the PMD range. We haven't
- * inserted anything into the radix tree and have no
- * waiters to wake.
+ * Our insertion of a DAX entry failed, most likely
+ * because we were inserting a PMD entry and it
+ * collided with a PTE sized entry at a different
+ * index in the PMD range. We haven't inserted
+ * anything into the radix tree and have no waiters to
+ * wake.
*/
return ERR_PTR(err);
}
extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
+extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
- .getattr = ext4_getattr,
+ .getattr = ext4_file_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
int ext4_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- struct inode *inode;
- unsigned long long delalloc_blocks;
+ struct inode *inode = d_inode(path->dentry);
+ struct ext4_inode *raw_inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int flags;
+
+ if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = ei->i_crtime.tv_sec;
+ stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
+ }
+
+ flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
+ if (flags & EXT4_APPEND_FL)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (flags & EXT4_COMPR_FL)
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+ if (flags & EXT4_ENCRYPT_FL)
+ stat->attributes |= STATX_ATTR_ENCRYPTED;
+ if (flags & EXT4_IMMUTABLE_FL)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (flags & EXT4_NODUMP_FL)
+ stat->attributes |= STATX_ATTR_NODUMP;
+
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_COMPRESSED |
+ STATX_ATTR_ENCRYPTED |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
- inode = d_inode(path->dentry);
generic_fillattr(inode, stat);
+ return 0;
+}
+
+int ext4_file_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
+{
+ struct inode *inode = d_inode(path->dentry);
+ u64 delalloc_blocks;
+
+ ext4_getattr(path, stat, request_mask, query_flags);
/*
* If there is inline data in the inode, the inode will normally not
.tmpfile = ext4_tmpfile,
.rename = ext4_rename2,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
const struct inode_operations ext4_special_inode_operations = {
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
.get_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
const struct inode_operations ext4_encrypted_symlink_inode_operations = {
.get_link = ext4_encrypted_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
const struct inode_operations ext4_symlink_inode_operations = {
.get_link = page_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
const struct inode_operations ext4_fast_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = ext4_setattr,
+ .getattr = ext4_getattr,
.listxattr = ext4_listxattr,
};
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma->vm_ops = &hugetlb_vm_ops;
+ /*
+ * Offset passed to mmap (before page shift) could have been
+ * negative when represented as a (l)off_t.
+ */
+ if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
+ return -EINVAL;
+
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+ len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ /* check for overflow */
+ if (len < vma_len)
+ return -EINVAL;
inode_lock(inode);
file_accessed(file);
ret = -ENOMEM;
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
ret = 0;
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
- inode->i_size = len;
+ i_size_write(inode, len);
out:
inode_unlock(inode);
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode->i_mode = S_IFDIR | config->mode;
inode->i_uid = config->uid;
inode->i_gid = config->gid;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- info = HUGETLBFS_I(inode);
- mpol_shared_policy_init(&info->policy, NULL);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_mapping->private_data = resv_map;
- info = HUGETLBFS_I(inode);
- /*
- * The policy is initialized here even if we are creating a
- * private inode because initialization simply creates an
- * an empty rb tree and calls rwlock_init(), later when we
- * call mpol_free_shared_policy() it will just return because
- * the rb tree will still be empty.
- */
- mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
+
+ /*
+ * Any time after allocation, hugetlbfs_destroy_inode can be called
+ * for the inode. mpol_free_shared_policy is unconditionally called
+ * as part of hugetlbfs_destroy_inode. So, initialize policy here
+ * in case of a quick call to destroy.
+ *
+ * Note that the policy is initialized even if we are creating a
+ * private inode. This simplifies hugetlbfs_destroy_inode.
+ */
+ mpol_shared_policy_init(&p->policy, NULL);
+
return &p->vfs_inode;
}
int retval = 0;
const char *s = nd->name->name;
+ if (!*s)
+ flags &= ~LOOKUP_RCU;
+
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct dentry *dentry = NULL, *rehash = NULL;
+ struct dentry *dentry = NULL;
struct rpc_task *task;
int error = -EBUSY;
* To prevent any new references to the target during the
* rename, we unhash the dentry in advance.
*/
- if (!d_unhashed(new_dentry)) {
+ if (!d_unhashed(new_dentry))
d_drop(new_dentry);
- rehash = new_dentry;
- }
if (d_count(new_dentry) > 2) {
int err;
goto out;
new_dentry = dentry;
- rehash = NULL;
new_inode = NULL;
}
}
error = task->tk_status;
rpc_put_task(task);
out:
- if (rehash)
- d_rehash(rehash);
trace_nfs_rename_exit(old_dir, old_dentry,
new_dir, new_dentry, error);
/* new dentry created? */
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
pnfs_error_mark_layout_for_return(inode, lseg);
- pnfs_set_lo_fail(lseg);
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
+ pnfs_set_lo_fail(lseg);
reset:
dprintk("%s Retry through MDS. Error %d\n", __func__,
task->tk_status);
return PNFS_ATTEMPTED;
}
+static int
+filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
+ struct nfs4_filelayout_segment *fl,
+ gfp_t gfp_flags)
+{
+ struct nfs4_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ int status = -EINVAL;
+
+ /* find and reference the deviceid */
+ d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
+ lo->plh_lc_cred, gfp_flags);
+ if (d == NULL)
+ goto out;
+
+ dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
+ /* Found deviceid is unavailable */
+ if (filelayout_test_devid_unavailable(&dsaddr->id_node))
+ goto out_put;
+
+ fl->dsaddr = dsaddr;
+
+ if (fl->first_stripe_index >= dsaddr->stripe_count) {
+ dprintk("%s Bad first_stripe_index %u\n",
+ __func__, fl->first_stripe_index);
+ goto out_put;
+ }
+
+ if ((fl->stripe_type == STRIPE_SPARSE &&
+ fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
+ (fl->stripe_type == STRIPE_DENSE &&
+ fl->num_fh != dsaddr->stripe_count)) {
+ dprintk("%s num_fh %u not valid for given packing\n",
+ __func__, fl->num_fh);
+ goto out_put;
+ }
+ status = 0;
+out:
+ return status;
+out_put:
+ nfs4_fl_put_deviceid(dsaddr);
+ goto out;
+}
+
/*
* filelayout_check_layout()
*
filelayout_check_layout(struct pnfs_layout_hdr *lo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
- struct nfs4_deviceid *id,
gfp_t gfp_flags)
{
- struct nfs4_deviceid_node *d;
- struct nfs4_file_layout_dsaddr *dsaddr;
int status = -EINVAL;
dprintk("--> %s\n", __func__);
goto out;
}
- /* find and reference the deviceid */
- d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
- lo->plh_lc_cred, gfp_flags);
- if (d == NULL)
- goto out;
-
- dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
- /* Found deviceid is unavailable */
- if (filelayout_test_devid_unavailable(&dsaddr->id_node))
- goto out_put;
-
- fl->dsaddr = dsaddr;
-
- if (fl->first_stripe_index >= dsaddr->stripe_count) {
- dprintk("%s Bad first_stripe_index %u\n",
- __func__, fl->first_stripe_index);
- goto out_put;
- }
-
- if ((fl->stripe_type == STRIPE_SPARSE &&
- fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
- (fl->stripe_type == STRIPE_DENSE &&
- fl->num_fh != dsaddr->stripe_count)) {
- dprintk("%s num_fh %u not valid for given packing\n",
- __func__, fl->num_fh);
- goto out_put;
- }
-
status = 0;
out:
dprintk("--> %s returns %d\n", __func__, status);
return status;
-out_put:
- nfs4_fl_put_deviceid(dsaddr);
- goto out;
}
static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
filelayout_decode_layout(struct pnfs_layout_hdr *flo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
- struct nfs4_deviceid *id,
gfp_t gfp_flags)
{
struct xdr_stream stream;
if (unlikely(!p))
goto out_err;
- memcpy(id, p, sizeof(*id));
+ memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
- nfs4_print_deviceid(id);
+ nfs4_print_deviceid(&fl->deviceid);
nfl_util = be32_to_cpup(p++);
if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
{
struct nfs4_filelayout_segment *fl;
int rc;
- struct nfs4_deviceid id;
dprintk("--> %s\n", __func__);
fl = kzalloc(sizeof(*fl), gfp_flags);
if (!fl)
return NULL;
- rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
- if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
+ rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
+ if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
_filelayout_free_lseg(fl);
return NULL;
}
return min(stripe_unit - (unsigned int)stripe_offset, size);
}
+static struct pnfs_layout_segment *
+fl_pnfs_update_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ loff_t pos,
+ u64 count,
+ enum pnfs_iomode iomode,
+ bool strict_iomode,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_segment *lseg = NULL;
+ struct pnfs_layout_hdr *lo;
+ struct nfs4_filelayout_segment *fl;
+ int status;
+
+ lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
+ gfp_flags);
+ if (!lseg)
+ lseg = ERR_PTR(-ENOMEM);
+ if (IS_ERR(lseg))
+ goto out;
+
+ lo = NFS_I(ino)->layout;
+ fl = FILELAYOUT_LSEG(lseg);
+
+ status = filelayout_check_deviceid(lo, fl, gfp_flags);
+ if (status)
+ lseg = ERR_PTR(status);
+out:
+ if (IS_ERR(lseg))
+ pnfs_put_lseg(lseg);
+ return lseg;
+}
+
static void
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
if (!pgio->pg_lseg) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- 0,
- NFS4_MAX_UINT64,
- IOMODE_READ,
- false,
- GFP_KERNEL);
+ pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ false,
+ GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
int status;
if (!pgio->pg_lseg) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- 0,
- NFS4_MAX_UINT64,
- IOMODE_RW,
- false,
- GFP_NOFS);
+ pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ false,
+ GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
};
struct nfs4_filelayout_segment {
- struct pnfs_layout_segment generic_hdr;
- u32 stripe_type;
- u32 commit_through_mds;
- u32 stripe_unit;
- u32 first_stripe_index;
- u64 pattern_offset;
- struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
- unsigned int num_fh;
- struct nfs_fh **fh_array;
+ struct pnfs_layout_segment generic_hdr;
+ u32 stripe_type;
+ u32 commit_through_mds;
+ u32 stripe_unit;
+ u32 first_stripe_index;
+ u64 pattern_offset;
+ struct nfs4_deviceid deviceid;
+ struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
+ unsigned int num_fh;
+ struct nfs_fh **fh_array;
};
struct nfs4_filelayout {
} else
goto outerr;
}
+
+ if (IS_ERR(mirror->mirror_ds))
+ goto outerr;
+
if (mirror->mirror_ds->ds == NULL) {
struct nfs4_deviceid_node *devid;
devid = &mirror->mirror_ds->id_node;
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
+ !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags)) {
rcu_read_unlock();
nfs_finish_clear_delegation_stateid(state, &stateid);
return;
}
- if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
- rcu_read_unlock();
- return;
- }
-
cred = get_rpccred(delegation->cred);
rcu_read_unlock();
status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
static ssize_t
nfsd_print_version_support(char *buf, int remaining, const char *sep,
- unsigned vers, unsigned minor)
+ unsigned vers, int minor)
{
- const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u";
+ const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
bool supported = !!nfsd_vers(vers, NFSD_TEST);
- if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST))
+ if (vers == 4 && minor >= 0 &&
+ !nfsd_minorversion(minor, NFSD_TEST))
supported = false;
+ if (minor == 0 && supported)
+ /*
+ * special case for backward compatability.
+ * +4.0 is never reported, it is implied by
+ * +4, unless -4.0 is present.
+ */
+ return 0;
return snprintf(buf, remaining, format, sep,
supported ? '+' : '-', vers, minor);
}
char *mesg = buf;
char *vers, *minorp, sign;
int len, num, remaining;
- unsigned minor;
ssize_t tlen = 0;
char *sep;
struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
if (len <= 0) return -EINVAL;
do {
enum vers_op cmd;
+ unsigned minor;
sign = *vers;
if (sign == '+' || sign == '-')
num = simple_strtol((vers+1), &minorp, 0);
return -EINVAL;
if (kstrtouint(minorp+1, 0, &minor) < 0)
return -EINVAL;
- } else
- minor = 0;
+ }
+
cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
switch(num) {
case 2:
nfsd_vers(num, cmd);
break;
case 4:
- if (nfsd_minorversion(minor, cmd) >= 0)
- break;
+ if (*minorp == '.') {
+ if (nfsd_minorversion(minor, cmd) < 0)
+ return -EINVAL;
+ } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
+ /*
+ * Either we have +4 and no minors are enabled,
+ * or we have -4 and at least one minor is enabled.
+ * In either case, propagate 'cmd' to all minors.
+ */
+ minor = 0;
+ while (nfsd_minorversion(minor, cmd) >= 0)
+ minor++;
+ }
+ break;
default:
return -EINVAL;
}
sep = "";
remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++) {
+ int minor;
if (!nfsd_vers(num, NFSD_AVAIL))
continue;
- minor = 0;
+
+ minor = -1;
do {
len = nfsd_print_version_support(buf, remaining,
sep, num, minor);
buf += len;
tlen += len;
minor++;
- sep = " ";
+ if (len)
+ sep = " ";
} while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
}
out:
{ nfserr_serverfault, -ESERVERFAULT },
{ nfserr_serverfault, -ENFILE },
{ nfserr_io, -EUCLEAN },
+ { nfserr_perm, -ENOKEY },
};
int i;
int nfsd_minorversion(u32 minorversion, enum vers_op change)
{
- if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
+ if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
+ change != NFSD_AVAIL)
return -1;
switch(change) {
case NFSD_SET:
void nfsd_reset_versions(void)
{
- int found_one = 0;
int i;
- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
- if (nfsd_program.pg_vers[i])
- found_one = 1;
- }
+ for (i = 0; i < NFSD_NRVERS; i++)
+ if (nfsd_vers(i, NFSD_TEST))
+ return;
- if (!found_one) {
- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
- nfsd_program.pg_vers[i] = nfsd_version[i];
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
- for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
- nfsd_acl_program.pg_vers[i] =
- nfsd_acl_version[i];
-#endif
- }
+ for (i = 0; i < NFSD_NRVERS; i++)
+ if (i != 4)
+ nfsd_vers(i, NFSD_SET);
+ else {
+ int minor = 0;
+ while (nfsd_minorversion(minor, NFSD_SET) >= 0)
+ minor++;
+ }
}
/*
continue;
/*
* Skip ops whose filesystem we don't know about unless
- * it is being mounted.
+ * it is being mounted or unmounted. It is possible for
+ * a filesystem we don't know about to be unmounted if
+ * it fails to mount in the kernel after userspace has
+ * been sent the mount request.
*/
/* XXX: is there a better way to detect this? */
} else if (ret == -1 &&
!(op->upcall.type ==
ORANGEFS_VFS_OP_FS_MOUNT ||
op->upcall.type ==
- ORANGEFS_VFS_OP_GETATTR)) {
+ ORANGEFS_VFS_OP_GETATTR ||
+ op->upcall.type ==
+ ORANGEFS_VFS_OP_FS_UMOUNT)) {
gossip_debug(GOSSIP_DEV_DEBUG,
"orangefs: skipping op tag %llu %s\n",
llu(op->tag), get_opname_string(op));
char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
struct super_block *sb;
int mount_pending;
+ int no_list;
struct list_head list;
};
if (!new_op)
return -ENOMEM;
new_op->upcall.req.features.features = 0;
- ret = service_operation(new_op, "orangefs_features", 0);
- orangefs_features = new_op->downcall.resp.features.features;
+ ret = service_operation(new_op, "orangefs_features",
+ ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
+ if (!ret)
+ orangefs_features =
+ new_op->downcall.resp.features.features;
+ else
+ orangefs_features = 0;
op_release(new_op);
} else {
orangefs_features = 0;
if (ret) {
d = ERR_PTR(ret);
- goto free_op;
+ goto free_sb_and_op;
}
/*
spin_unlock(&orangefs_superblocks_lock);
op_release(new_op);
+ /* Must be removed from the list now. */
+ ORANGEFS_SB(sb)->no_list = 0;
+
if (orangefs_userspace_version >= 20906) {
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
if (!new_op)
return dget(sb->s_root);
+free_sb_and_op:
+ /* Will call orangefs_kill_sb with sb not in list. */
+ ORANGEFS_SB(sb)->no_list = 1;
+ deactivate_locked_super(sb);
free_op:
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
if (ret == -EINVAL) {
*/
orangefs_unmount_sb(sb);
- /* remove the sb from our list of orangefs specific sb's */
-
- spin_lock(&orangefs_superblocks_lock);
- __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
- ORANGEFS_SB(sb)->list.prev = NULL;
- spin_unlock(&orangefs_superblocks_lock);
+ if (!ORANGEFS_SB(sb)->no_list) {
+ /* remove the sb from our list of orangefs specific sb's */
+ spin_lock(&orangefs_superblocks_lock);
+ /* not list_del_init */
+ __list_del_entry(&ORANGEFS_SB(sb)->list);
+ ORANGEFS_SB(sb)->list.prev = NULL;
+ spin_unlock(&orangefs_superblocks_lock);
+ }
/*
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
if ((table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
+ (table->proc_handler == proc_douintvec) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+ pmd_t pmd = *pmdp;
+
+ /* See comment in change_huge_pmd() */
+ pmdp_invalidate(vma, addr, pmdp);
+ if (pmd_dirty(*pmdp))
+ pmd = pmd_mkdirty(pmd);
+ if (pmd_young(*pmdp))
+ pmd = pmd_mkyoung(pmd);
pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd);
int vfs_statx_fd(unsigned int fd, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- struct fd f = fdget_raw(fd);
+ struct fd f;
int error = -EBADF;
+ if (query_flags & ~KSTAT_QUERY_FLAGS)
+ return -EINVAL;
+
+ f = fdget_raw(fd);
if (f.file) {
error = vfs_getattr(&f.file->f_path, stat,
request_mask, query_flags);
* Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
* at the given name from being referenced.
*
- * The caller must have preset stat->request_mask as for vfs_getattr(). The
- * flags are also used to load up stat->query_flags.
- *
* 0 will be returned on success, and a -ve error code if unsuccessful.
*/
int vfs_statx(int dfd, const char __user *filename, int flags,
}
#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
-static inline int __put_timestamp(struct timespec *kts,
- struct statx_timestamp __user *uts)
-{
- return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
- __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
- __put_user(0, &uts->__reserved ));
-}
-
-/*
- * Set the statx results.
- */
-static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
+static noinline_for_stack int
+cp_statx(const struct kstat *stat, struct statx __user *buffer)
{
- uid_t uid = from_kuid_munged(current_user_ns(), stat->uid);
- gid_t gid = from_kgid_munged(current_user_ns(), stat->gid);
-
- if (__put_user(stat->result_mask, &buffer->stx_mask ) ||
- __put_user(stat->mode, &buffer->stx_mode ) ||
- __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) ||
- __put_user(stat->nlink, &buffer->stx_nlink ) ||
- __put_user(uid, &buffer->stx_uid ) ||
- __put_user(gid, &buffer->stx_gid ) ||
- __put_user(stat->attributes, &buffer->stx_attributes ) ||
- __put_user(stat->blksize, &buffer->stx_blksize ) ||
- __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) ||
- __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) ||
- __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) ||
- __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) ||
- __put_timestamp(&stat->atime, &buffer->stx_atime ) ||
- __put_timestamp(&stat->btime, &buffer->stx_btime ) ||
- __put_timestamp(&stat->ctime, &buffer->stx_ctime ) ||
- __put_timestamp(&stat->mtime, &buffer->stx_mtime ) ||
- __put_user(stat->ino, &buffer->stx_ino ) ||
- __put_user(stat->size, &buffer->stx_size ) ||
- __put_user(stat->blocks, &buffer->stx_blocks ) ||
- __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) ||
- __clear_user(&buffer->__spare2, sizeof(buffer->__spare2)))
- return -EFAULT;
-
- return 0;
+ struct statx tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ tmp.stx_mask = stat->result_mask;
+ tmp.stx_blksize = stat->blksize;
+ tmp.stx_attributes = stat->attributes;
+ tmp.stx_nlink = stat->nlink;
+ tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
+ tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
+ tmp.stx_mode = stat->mode;
+ tmp.stx_ino = stat->ino;
+ tmp.stx_size = stat->size;
+ tmp.stx_blocks = stat->blocks;
+ tmp.stx_attributes_mask = stat->attributes_mask;
+ tmp.stx_atime.tv_sec = stat->atime.tv_sec;
+ tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
+ tmp.stx_btime.tv_sec = stat->btime.tv_sec;
+ tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
+ tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
+ tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
+ tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
+ tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
+ tmp.stx_rdev_major = MAJOR(stat->rdev);
+ tmp.stx_rdev_minor = MINOR(stat->rdev);
+ tmp.stx_dev_major = MAJOR(stat->dev);
+ tmp.stx_dev_minor = MINOR(stat->dev);
+
+ return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
/**
struct kstat stat;
int error;
+ if (mask & STATX__RESERVED)
+ return -EINVAL;
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
- return -EFAULT;
if (filename)
error = vfs_statx(dfd, filename, flags, &stat, mask);
error = vfs_statx_fd(dfd, &stat, mask, flags);
if (error)
return error;
- return statx_set_result(&stat, buffer);
+
+ return cp_statx(&stat, buffer);
}
/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
struct kobject *kobj = of->kn->parent->priv;
- size_t len;
+ ssize_t len;
/*
* If buf != of->prealloc_buf, we don't know how
if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (len < 0)
+ return len;
if (pos) {
if (len <= pos)
return 0;
len -= pos;
memmove(buf, buf + pos, len);
}
- return min(count, len);
+ return min_t(ssize_t, count, len);
}
/* kernfs write callback for regular sysfs files */
* protocols: aa:... bb:...
*/
seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
- pending, total, UFFD_API, UFFD_API_FEATURES,
+ pending, total, UFFD_API, ctx->features,
UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
}
#endif
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
- int size);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
/* Verify the consistency of an inline directory. */
int
xfs_dir2_sf_verify(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *sfp,
- int size)
+ struct xfs_inode *ip)
{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dir2_sf_hdr *sfp;
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
char *endp;
const struct xfs_dir_ops *dops;
+ struct xfs_ifork *ifp;
xfs_ino_t ino;
int i;
int i8count;
int offset;
+ int size;
+ int error;
__uint8_t filetype;
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ /*
+ * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+ * so we can only trust the mountpoint to have the right pointer.
+ */
dops = xfs_dir_get_ops(mp, NULL);
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+
/*
* Give up if the directory is way too short.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, size >
- offsetof(struct xfs_dir2_sf_hdr, parent));
- XFS_WANT_CORRUPTED_RETURN(mp, size >=
- xfs_dir2_sf_hdr_size(sfp->i8count));
+ if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+ size < xfs_dir2_sf_hdr_size(sfp->i8count))
+ return -EFSCORRUPTED;
endp = (char *)sfp + size;
/* Check .. entry */
ino = dops->sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
- XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
offset = dops->data_first_offset;
/* Check all reported entries */
* Check the fixed-offset parts of the structure are
* within the data buffer.
*/
- XFS_WANT_CORRUPTED_RETURN(mp,
- ((char *)sfep + sizeof(*sfep)) < endp);
+ if (((char *)sfep + sizeof(*sfep)) >= endp)
+ return -EFSCORRUPTED;
/* Don't allow names with known bad length. */
- XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
- XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+ if (sfep->namelen == 0)
+ return -EFSCORRUPTED;
/*
* Check that the variable-length part of the structure is
* name component, so nextentry is an acceptable test.
*/
next_sfep = dops->sf_nextentry(sfp, sfep);
- XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+ if (endp < (char *)next_sfep)
+ return -EFSCORRUPTED;
/* Check that the offsets always increase. */
- XFS_WANT_CORRUPTED_RETURN(mp,
- xfs_dir2_sf_get_offset(sfep) >= offset);
+ if (xfs_dir2_sf_get_offset(sfep) < offset)
+ return -EFSCORRUPTED;
/* Check the inode number. */
ino = dops->sf_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
- XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ error = xfs_dir_ino_validate(mp, ino);
+ if (error)
+ return error;
/* Check the file type. */
filetype = dops->sf_get_ftype(sfep);
- XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+ if (filetype >= XFS_DIR3_FT_MAX)
+ return -EFSCORRUPTED;
offset = xfs_dir2_sf_get_offset(sfep) +
dops->data_entsize(sfep->namelen);
sfep = next_sfep;
}
- XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
- XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+ if (i8count != sfp->i8count)
+ return -EFSCORRUPTED;
+ if ((void *)sfep != (void *)endp)
+ return -EFSCORRUPTED;
/* Make sure this whole thing ought to be in local format. */
- XFS_WANT_CORRUPTED_RETURN(mp, offset +
- (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
- (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+ if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+ return -EFSCORRUPTED;
return 0;
}
if (error)
return error;
+ /* Check inline dir contents. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ dip->di_format == XFS_DINODE_FMT_LOCAL) {
+ error = xfs_dir2_sf_verify(ip);
+ if (error) {
+ xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ return error;
+ }
+ }
+
if (xfs_is_reflink_inode(ip)) {
ASSERT(ip->i_cowfp == NULL);
xfs_ifork_init_cow(ip);
int whichfork,
int size)
{
- int error;
-
/*
* If the size is unreasonable, then something
* is wrong and we just bail out rather than crash in
return -EFSCORRUPTED;
}
- if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
- error = xfs_dir2_sf_verify(ip->i_mount,
- (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
- size);
- if (error)
- return error;
- }
-
xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
return 0;
}
* In these cases, the format always takes precedence, because the
* format indicates the current state of the fork.
*/
-int
+void
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
char *cp;
xfs_ifork_t *ifp;
xfs_mount_t *mp;
- int error;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
static const short dataflag[2] =
{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
if (!iip)
- return 0;
+ return;
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
* This can happen if we gave up in iformat in an error path,
*/
if (!ifp) {
ASSERT(whichfork == XFS_ATTR_FORK);
- return 0;
+ return;
}
cp = XFS_DFORK_PTR(dip, whichfork);
mp = ip->i_mount;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
- if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
- error = xfs_dir2_sf_verify(mp,
- (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
- ifp->if_bytes);
- if (error)
- return error;
- }
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
ASSERT(0);
break;
}
- return 0;
}
/*
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
void xfs_idata_realloc(struct xfs_inode *, int, int);
/*
* Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. xfs_zero_range is
- * smart enough to skip any holes, including those we just created.
+ * smart enough to skip any holes, including those we just created,
+ * but we must take care not to zero beyond EOF and enlarge i_size.
*/
+
+ if (offset >= XFS_ISIZE(ip))
+ return 0;
+
+ if (offset + len > XFS_ISIZE(ip))
+ len = XFS_ISIZE(ip) - offset;
+
return xfs_zero_range(ip, offset, len, NULL);
}
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_inode_zone;
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_dinode *dip;
struct xfs_mount *mp = ip->i_mount;
- int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
if (ip->i_d.di_version < 3)
ip->i_d.di_flushiter++;
+ /* Check the inline directory data. */
+ if (S_ISDIR(VFS_I(ip)->i_mode) &&
+ ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+ xfs_dir2_sf_verify(ip))
+ goto corrupt_out;
+
/*
* Copy the dirty parts of the inode into the on-disk inode. We always
* copy out the core of the inode, because if the inode is dirty at all
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
ip->i_d.di_flushiter = 0;
- error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
- if (error)
- return error;
- if (XFS_IFORK_Q(ip)) {
- error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
- if (error)
- return error;
- }
+ xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+ if (XFS_IFORK_Q(ip))
+ xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
xfs_inobp_check(mp, bp);
/*
stat->blocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+ if (ip->i_d.di_version == 3) {
+ if (request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
+ stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
+ }
+ }
+
+ if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
+ stat->attributes |= STATX_ATTR_NODUMP;
switch (inode->i_mode & S_IFMT) {
case S_IFBLK:
return error;
bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
- buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+ buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
do {
struct xfs_inobt_rec_incore r;
int stat;
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections
- * [__start_data_ro_after_init, __end_data_ro_after_init]:
- * contains data.ro_after_init section
+ * [__start_ro_after_init, __end_ro_after_init]:
+ * contains .data..ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
-extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
KEEP(*(__##name##_of_table_end))
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
+#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
- __start_data_ro_after_init = .; \
+ VMLINUX_SYMBOL(__start_ro_after_init) = .; \
*(.data..ro_after_init) \
- __end_data_ro_after_init = .;
+ VMLINUX_SYMBOL(__end_ro_after_init) = .;
#endif
/*
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
+ CLKEVT_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
CPUIDLE_METHOD_OF_TABLES() \
struct dw_hdmi;
+/**
+ * DOC: Supported input formats and encodings
+ *
+ * Depending on the Hardware configuration of the Controller IP, it supports
+ * a subset of the following input formats and encodings on its internal
+ * 48bit bus.
+ *
+ * +----------------------+----------------------------------+------------------------------+
+ * + Format Name + Format Code + Encodings +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ */
+
enum {
DW_HDMI_RES_8,
DW_HDMI_RES_10,
struct drm_display_mode *mode);
void (*disable)(struct dw_hdmi *hdmi, void *data);
enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data);
+ void (*update_hpd)(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense);
+ void (*setup_hpd)(struct dw_hdmi *hdmi, void *data);
};
struct dw_hdmi_plat_data {
struct regmap *regm;
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
+ unsigned long input_bus_format;
+ unsigned long input_bus_encoding;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_sysfs.h>
struct module;
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
- /* sysfs support (drm_sysfs.c) */
-extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-
-
/*@}*/
/* returns true if currently okay to sleep */
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
* @num_connector: size of the @connectors and @connector_states arrays
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
- bool legacy_set_config : 1;
struct __drm_planes_state *planes;
struct __drm_crtcs_state *crtcs;
int num_connector;
struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
- uint32_t size);
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
struct drm_device;
struct drm_connector_helper_funcs;
+struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
struct drm_encoder;
* the helper library vtable purely for historical reasons. The only DRM
* core entry point to probe connector state is @fill_modes.
*
+ * Note that the helper library will already hold
+ * &drm_mode_config.connection_mutex. Drivers which need to grab additional
+ * locks to avoid races with concurrent modeset changes need to use
+ * &drm_connector_helper_funcs.detect_ctx instead.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
* hooks.
*/
int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size);
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @destroy:
*/
spinlock_t commit_lock;
- /**
- * @acquire_ctx:
- *
- * Per-CRTC implicit acquire context used by atomic drivers for legacy
- * IOCTLs, so that atomic drivers can get at the locking acquire
- * context.
- */
- struct drm_modeset_acquire_ctx *acquire_ctx;
-
#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
uint32_t maxY);
+int drm_helper_probe_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
#define _DRM_IOCTL_H_
#include <linux/types.h>
+#include <linux/bitops.h>
#include <asm/ioctl.h>
struct file;
/**
- * Ioctl function type.
+ * drm_ioctl_t - DRM ioctl function type.
+ * @dev: DRM device inode
+ * @data: private pointer of the ioctl call
+ * @file_priv: DRM file this ioctl was made on
*
- * \param inode device inode.
- * \param file_priv DRM file private pointer.
- * \param cmd command.
- * \param arg argument.
+ * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data
+ * into kernel-space, and will also copy it back, depending upon the read/write
+ * settings in the ioctl command code.
*/
typedef int drm_ioctl_t(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/**
+ * drm_ioctl_compat_t - compatibility DRM ioctl function type.
+ * @filp: file pointer
+ * @cmd: ioctl command code
+ * @arg: DRM file this ioctl was made on
+ *
+ * Just a typedef to make declaring an array of compatibility handlers easier.
+ * New drivers shouldn't screw up the structure layout for their ioctl
+ * structures and hence never need this.
+ */
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_MAJOR 226
-#define DRM_AUTH 0x1
-#define DRM_MASTER 0x2
-#define DRM_ROOT_ONLY 0x4
-#define DRM_CONTROL_ALLOW 0x8
-#define DRM_UNLOCKED 0x10
-#define DRM_RENDER_ALLOW 0x20
+/**
+ * enum drm_ioctl_flags - DRM ioctl flags
+ *
+ * Various flags that can be set in &drm_ioctl_desc.flags to control how
+ * userspace can use a given ioctl.
+ */
+enum drm_ioctl_flags {
+ /**
+ * @DRM_AUTH:
+ *
+ * This is for ioctl which are used for rendering, and require that the
+ * file descriptor is either for a render node, or if it's a
+ * legacy/primary node, then it must be authenticated.
+ */
+ DRM_AUTH = BIT(0),
+ /**
+ * @DRM_MASTER:
+ *
+ * This must be set for any ioctl which can change the modeset or
+ * display state. Userspace must call the ioctl through a primary node,
+ * while it is the active master.
+ *
+ * Note that read-only modeset ioctl can also be called by
+ * unauthenticated clients, or when a master is not the currently active
+ * one.
+ */
+ DRM_MASTER = BIT(1),
+ /**
+ * @DRM_ROOT_ONLY:
+ *
+ * Anything that could potentially wreak a master file descriptor needs
+ * to have this flag set. Current that's only for the SETMASTER and
+ * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving
+ * master (display compositor) into compliance.
+ *
+ * This is equivalent to callers with the SYSADMIN capability.
+ */
+ DRM_ROOT_ONLY = BIT(2),
+ /**
+ * @DRM_CONTROL_ALLOW:
+ *
+ * Deprecated, do not use. Control nodes are in the process of getting
+ * removed.
+ */
+ DRM_CONTROL_ALLOW = BIT(3),
+ /**
+ * @DRM_UNLOCKED:
+ *
+ * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
+ * or not. Enforced as the default for all modern drivers, hence there
+ * should never be a need to set this flag.
+ */
+ DRM_UNLOCKED = BIT(4),
+ /**
+ * @DRM_RENDER_ALLOW:
+ *
+ * This is used for all ioctl needed for rendering only, for drivers
+ * which support render nodes. This should be all new render drivers,
+ * and hence it should be always set for any ioctl with DRM_AUTH set.
+ * Note though that read-only query ioctl might have this set, but have
+ * not set DRM_AUTH because they do not require authentication.
+ */
+ DRM_RENDER_ALLOW = BIT(5),
+};
+/**
+ * struct drm_ioctl_desc - DRM driver ioctl entry
+ * @cmd: ioctl command number, without flags
+ * @flags: a bitmask of &enum drm_ioctl_flags
+ * @func: handler for this ioctl
+ * @name: user-readable name for debug output
+ *
+ * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV()
+ * macro.
+ */
struct drm_ioctl_desc {
unsigned int cmd;
- int flags;
+ enum drm_ioctl_flags flags;
drm_ioctl_t *func;
const char *name;
};
/**
- * Creates a driver or general drm_ioctl_desc array entry for the given
- * ioctl, for use by drm_ioctl().
+ * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc
+ * @ioctl: ioctl command suffix
+ * @_func: handler for the ioctl
+ * @_flags: a bitmask of &enum drm_ioctl_flags
+ *
+ * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl
+ * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that
+ * to DRM_IOCTL_NR().
*/
-
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
.cmd = DRM_IOCTL_##ioctl, \
* This callback is used by the probe helpers in e.g.
* drm_helper_probe_single_connector_modes().
*
+ * To avoid races with concurrent connector state updates, the helper
+ * libraries always call this with the &drm_mode_config.connection_mutex
+ * held. Because of this it's safe to inspect &drm_connector->state.
+ *
* RETURNS:
*
* The number of modes added by calling drm_mode_probed_add().
*/
int (*get_modes)(struct drm_connector *connector);
+ /**
+ * @detect_ctx:
+ *
+ * Check to see if anything is attached to the connector. The parameter
+ * force is set to false whilst polling, true when checking the
+ * connector due to a user request. force can be used by the driver to
+ * avoid expensive, destructive operations during automated probing.
+ *
+ * This callback is optional, if not implemented the connector will be
+ * considered as always being attached.
+ *
+ * This is the atomic version of &drm_connector_funcs.detect.
+ *
+ * To avoid races against concurrent connector state updates, the
+ * helper libraries always call this with ctx set to a valid context,
+ * and &drm_mode_config.connection_mutex will always be locked with
+ * the ctx parameter set to this ctx. This allows taking additional
+ * locks as required.
+ *
+ * RETURNS:
+ *
+ * &drm_connector_status indicating the connector's status,
+ * or the error code returned by drm_modeset_lock(), -EDEADLK.
+ */
+ int (*detect_ctx)(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
+
/**
* @mode_valid:
*
* CRTC helpers will not call this function. Drivers therefore must
* still fully validate any mode passed in in a modeset request.
*
+ * To avoid races with concurrent connector state updates, the helper
+ * libraries always call this with the &drm_mode_config.connection_mutex
+ * held. Because of this it's safe to inspect &drm_connector->state.
+ *
* RETURNS:
*
* Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum
*/
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
+
+ /**
+ * @atomic_check:
+ *
+ * This hook is used to validate connector state. This function is
+ * called from &drm_atomic_helper_check_modeset, and is called when
+ * a connector property is set, or a modeset on the crtc is forced.
+ *
+ * Because &drm_atomic_helper_check_modeset may be called multiple times,
+ * this function should handle being called multiple times as well.
+ *
+ * This function is also allowed to inspect any other object's state and
+ * can add more state objects to the atomic commit if needed. Care must
+ * be taken though to ensure that state check and compute functions for
+ * these added states are all called, and derived state in other objects
+ * all updated. Again the recommendation is to just call check helpers
+ * until a maximal configuration is reached.
+ *
+ * NOTE:
+ *
+ * This function is called in the check phase of an atomic update. The
+ * driver is not allowed to change anything outside of the free-standing
+ * state objects passed-in or assembled in the overall &drm_atomic_state
+ * update tracking structure.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the state or the transition can't be
+ * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
+ * attempt to obtain another state object ran into a &drm_modeset_lock
+ * deadlock.
+ */
+ int (*atomic_check)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
void drm_modeset_lock_all(struct drm_device *dev);
void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_modeset_lock_crtc(struct drm_crtc *crtc,
- struct drm_plane *plane);
-void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
-struct drm_modeset_acquire_ctx *
-drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
struct device;
struct drm_device;
struct drm_encoder;
+struct drm_panel;
+struct drm_bridge;
struct device_node;
#ifdef CONFIG_OF
int drm_of_encoder_active_endpoint(struct device_node *node,
struct drm_encoder *encoder,
struct of_endpoint *endpoint);
+int drm_of_find_panel_or_bridge(const struct device_node *np,
+ int port, int endpoint,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge);
#else
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
return -EINVAL;
}
+static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
+ int port, int endpoint,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge)
+{
+ return -EINVAL;
+}
#endif
static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
int drm_panel_detach(struct drm_panel *panel);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
#ifndef _DRM_SYSFS_H_
#define _DRM_SYSFS_H_
-/**
- * This minimalistic include file is intended for users (read TTM) that
- * don't want to include the full drmP.h file.
- */
+struct drm_device;
+struct device;
int drm_class_device_register(struct device *dev);
void drm_class_device_unregister(struct device *dev);
+void drm_sysfs_hotplug_event(struct drm_device *dev);
+
#endif
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
+/**
+ * ttm_bo_default_iomem_pfn - get a pfn for a page offset
+ *
+ * @bo: the BO we need to look up the pfn for
+ * @page_offset: offset inside the BO to look up.
+ *
+ * Calculate the PFN for iomem based mappings during page fault
+ */
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
};
/**
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
*
* Checks that the base object is shareable and adds a ref object to it.
*
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed);
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
+void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
atomic_t nr_active;
+ struct delayed_work delayed_run_work;
struct delayed_work delay_work;
struct hlist_node cpuhp_dead;
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
return true;
}
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
- struct bio *next)
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq,
+ struct bio *prev,
+ struct bio *next)
{
if (bio_has_data(prev) && queue_virt_boundary(q)) {
struct bio_vec pb, nb;
+ /*
+ * don't merge if the 1st bio starts with non-zero
+ * offset, otherwise it is quite difficult to respect
+ * sg gap limit. We work hard to merge a huge number of small
+ * single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset)
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the
+ * merged segment ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, req->biotail, bio);
+ return bio_will_gap(req->q, req, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, bio, req->bio);
+ return bio_will_gap(req->q, NULL, bio, req->bio);
}
int kblockd_schedule_work(struct work_struct *work);
pr_cont_kernfs_path(cgrp->kn);
}
+static inline void cgroup_init_kthreadd(void)
+{
+ /*
+ * kthreadd is inherited by all kthreads, keep it in the root so
+ * that the new kthreads are guaranteed to stay in the root until
+ * initialization is finished.
+ */
+ current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+ /*
+ * This kthread finished initialization. The creator should have
+ * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+ */
+ current->no_cgroup_migration = 0;
+}
+
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
#ifdef CONFIG_CLKEVT_PROBE
extern int clockevent_probe(void);
-#els
+#else
static inline int clockevent_probe(void) { return 0; }
#endif
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @kmap_atomic: maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @kmap: maps a page from the buffer into kernel address space.
- * @kunmap: [optional] unmaps a page from the buffer.
+ * @map_atomic: maps a page from the buffer into kernel address
+ * space, users may not block until the subsequent unmap call.
+ * This callback must not sleep.
+ * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
+ * This Callback must not sleep.
+ * @map: maps a page from the buffer into kernel address space.
+ * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*kmap_atomic)(struct dma_buf *, unsigned long);
- void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
- void *(*kmap)(struct dma_buf *, unsigned long);
- void (*kunmap)(struct dma_buf *, unsigned long, void *);
+ void *(*map_atomic)(struct dma_buf *, unsigned long);
+ void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
+ void *(*map)(struct dma_buf *, unsigned long);
+ void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct elevator_queue *);
+extern void elevator_exit(struct request_queue *, struct elevator_queue *);
extern int elevator_change(struct request_queue *, const char *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
#define GICH_MISR_EOI (1 << 0)
#define GICH_MISR_U (1 << 1)
+#define GICV_PMR_PRIORITY_SHIFT 3
+#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/irqdomain.h>
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
size_t kasan_metadata_size(struct kmem_cache *cache);
+bool kasan_save_enable_multi_shot(void);
+void kasan_restore_multi_shot(bool enabled);
+
#else /* CONFIG_KASAN */
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
return false;
}
+static inline void mem_cgroup_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx,
+ int nr)
+{
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
* Max bus-specific overhead incurred by request/responses.
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
* */
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 2
+#define EC_MAX_RESPONSE_OVERHEAD 32
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
struct writeback_control;
struct bdi_writeback;
+void init_mm_internals(void);
+
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
___pud; \
})
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
- mmu_notifier_invalidate_range(__mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
- NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
+ NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
* RDMA_QPTYPE field
*/
enum {
- NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
- NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
- NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
- NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
- NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
+ NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
+ NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
+ NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
+ NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
};
/* RDMA Connection Management Service Type codes for Discovery Log Page
* entry TSAS RDMA_CMS field
*/
enum {
- NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
+ NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
#define NVMF_AQ_DEPTH 32
extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev);
+extern int pinctrl_enable(struct pinctrl_dev *pctldev);
-/* Please use pinctrl_register_and_init() instead */
+/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
bool optional);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+ int index, bool shared,
+ bool optional);
void reset_control_put(struct reset_control *rstc);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline struct reset_control *__reset_control_get(
+ struct device *dev, const char *id,
+ int index, bool shared, bool optional)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional)
#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
#endif
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- false);
+ return __reset_control_get(dev, id, 0, false, false);
}
/**
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- false);
+ return __reset_control_get(dev, id, 0, true, false);
}
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- true);
+ return __reset_control_get(dev, id, 0, false, true);
}
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- true);
+ return __reset_control_get(dev, id, 0, true, true);
}
/**
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+#ifdef CONFIG_CGROUPS
+ /* disallow userland-initiated cgroup migration */
+ unsigned no_cgroup_migration:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
}
#else
extern void sched_clock_init_late(void);
-/*
- * Architectures can set this to 1 if they have specified
- * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
- * but then during bootup it turns out that sched_clock()
- * is reliable after all:
- */
extern int sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
+/*
+ * When sched_clock_stable(), __sched_clock_offset provides the offset
+ * between local_clock() and sched_clock().
+ */
+extern u64 __sched_clock_offset;
+
+
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
unsigned int nlink;
uint32_t blksize; /* Preferred I/O size */
u64 attributes;
+ u64 attributes_mask;
#define KSTAT_ATTR_FS_IOC_FLAGS \
(STATX_ATTR_COMPRESSED | \
STATX_ATTR_IMMUTABLE | \
};
union {
unsigned long nr_segs;
- int idx;
+ struct {
+ int idx;
+ int start_idx;
+ };
};
};
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
unsigned int feature_table_size;
const unsigned int *feature_table_legacy;
unsigned int feature_table_size_legacy;
+ int (*validate)(struct virtio_device *dev);
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
return frag;
}
-static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
{
-
- sctp_assoc_sync_pmtu(sk, asoc);
+ sctp_assoc_sync_pmtu(asoc);
asoc->pmtu_pending = 0;
}
*/
static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
- if (t->dst && (!dst_check(t->dst, t->dst_cookie) ||
- t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
- SCTP_DEFAULT_MINSEGMENT)))
+ if (t->dst && !dst_check(t->dst, t->dst_cookie))
sctp_transport_dst_release(t);
return t->dst;
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+ SCTP_DEFAULT_MINSEGMENT);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
#endif /* __net_sctp_h__ */
__u64 hb_nonce;
} sctp_sender_hb_info_t;
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp);
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
void sctp_stream_free(struct sctp_stream *stream);
void sctp_stream_clear(struct sctp_stream *stream);
/* Did the messenge fail to send? */
int send_error;
u8 send_failed:1,
- force_delay:1,
can_delay; /* should this message be Nagle delayed */
};
void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
-void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
+void sctp_transport_reset(struct sctp_transport *t);
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
+ force_delay:1,
prsctp_enable:1,
reconf_enable:1;
__u32 sctp_association_get_next_tsn(struct sctp_association *);
-void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
void sctp_assoc_set_primary(struct sctp_association *,
TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_COMPLETE_QF_WP = 18,
TRANSPORT_COMPLETE_QF_OK = 19,
+ TRANSPORT_COMPLETE_QF_ERR = 20,
};
/* Used for struct se_cmd->se_cmd_flags */
u16 tg_pt_gp_id;
int tg_pt_gp_valid_id;
int tg_pt_gp_alua_supported_states;
- int tg_pt_gp_alua_pending_state;
- int tg_pt_gp_alua_previous_state;
int tg_pt_gp_alua_access_status;
int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs;
int tg_pt_gp_pref;
int tg_pt_gp_write_metadata;
u32 tg_pt_gp_members;
- atomic_t tg_pt_gp_alua_access_state;
+ int tg_pt_gp_alua_access_state;
atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock;
- struct mutex tg_pt_gp_md_mutex;
+ struct mutex tg_pt_gp_transition_mutex;
struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl;
- struct work_struct tg_pt_gp_transition_work;
- struct completion *tg_pt_gp_transition_complete;
};
struct t10_vpd {
u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
+ bool lun_shutdown;
bool lun_access_ro;
u32 lun_index;
#define DRM_CAP_CURSOR_HEIGHT 0x9
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
- __u32 reserved;
+ __u32 crtc_id; /* 0 on older kernels that do not support this */
};
/* typedef area */
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
+#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
+#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
+#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
+#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
+ ETNA_SUBMIT_FENCE_FD_IN | \
+ ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
__u64 stream; /* in, ptr to cmdstream */
+ __u32 flags; /* in, mask of ETNA_SUBMIT_x */
+ __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
#define MSM_PARAM_CHIP_ID 0x03
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
header-y += unix_diag.h
header-y += usbdevice_fs.h
header-y += usbip.h
+header-y += userio.h
header-y += utime.h
header-y += utsname.h
header-y += uuid.h
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1018 */
+/* RGB - next is 0x101b */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
+#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
+#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
+#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x2026 */
+/* YUV (including grey) - next is 0x202c */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
+#define MEDIA_BUS_FMT_UYYVYY8_0_5X24 0x2026
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
+#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028
+#define MEDIA_BUS_FMT_YUV12_1X36 0x2029
+#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
+#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
/* Bayer - next is 0x3021 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
__u64 stx_ino; /* Inode number */
__u64 stx_size; /* File size */
__u64 stx_blocks; /* Number of 512-byte blocks allocated */
- __u64 __spare1[1];
+ __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
/* 0x40 */
struct statx_timestamp stx_atime; /* Last access time */
struct statx_timestamp stx_btime; /* File creation time */
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_ALL 0x00000fffU /* All currently supported flags */
+#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
/*
- * Attributes to be found in stx_attributes
+ * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
*
* These give information about the features or the state of a file that might
* be of use to ordinary userspace programs such as GUIs or ls rather than
* configuration space */
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
-#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled)
+#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0
workqueue_init();
+ init_mm_internals();
+
do_pre_smp_initcalls();
lockup_detector_init();
/* queue msgs to send via kauditd_task */
static struct sk_buff_head audit_queue;
-static void kauditd_hold_skb(struct sk_buff *skb);
/* queue msgs due to temporary unicast send problems */
static struct sk_buff_head audit_retry_queue;
/* queue msgs waiting for new auditd connection */
spin_unlock_irqrestore(&auditd_conn.lock, flags);
}
-/**
- * auditd_reset - Disconnect the auditd connection
- *
- * Description:
- * Break the auditd/kauditd connection and move all the queued records into the
- * hold queue in case auditd reconnects.
- */
-static void auditd_reset(void)
-{
- struct sk_buff *skb;
-
- /* if it isn't already broken, break the connection */
- rcu_read_lock();
- if (auditd_conn.pid)
- auditd_set(0, 0, NULL);
- rcu_read_unlock();
-
- /* flush all of the main and retry queues to the hold queue */
- while ((skb = skb_dequeue(&audit_retry_queue)))
- kauditd_hold_skb(skb);
- while ((skb = skb_dequeue(&audit_queue)))
- kauditd_hold_skb(skb);
-}
-
/**
* kauditd_print_skb - Print the audit record to the ring buffer
* @skb: audit record
{
/* put the record back in the queue at the same place */
skb_queue_head(&audit_hold_queue, skb);
-
- /* fail the auditd connection */
- auditd_reset();
}
/**
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
kfree_skb(skb);
-
- /* fail the auditd connection */
- auditd_reset();
}
/**
skb_queue_tail(&audit_retry_queue, skb);
}
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+ struct sk_buff *skb;
+
+ /* if it isn't already broken, break the connection */
+ rcu_read_lock();
+ if (auditd_conn.pid)
+ auditd_set(0, 0, NULL);
+ rcu_read_unlock();
+
+ /* flush all of the main and retry queues to the hold queue */
+ while ((skb = skb_dequeue(&audit_retry_queue)))
+ kauditd_hold_skb(skb);
+ while ((skb = skb_dequeue(&audit_queue)))
+ kauditd_hold_skb(skb);
+}
+
/**
* auditd_send_unicast_skb - Send a record via unicast to auditd
* @skb: audit record
NULL, kauditd_rehold_skb);
if (rc < 0) {
sk = NULL;
+ auditd_reset();
goto main_queue;
}
NULL, kauditd_hold_skb);
if (rc < 0) {
sk = NULL;
+ auditd_reset();
goto main_queue;
}
* unicast, dump failed record sends to the retry queue; if
* sk == NULL due to previous failures we will just do the
* multicast send and move the record to the retry queue */
- kauditd_send_queue(sk, portid, &audit_queue, 1,
- kauditd_send_multicast_skb,
- kauditd_retry_skb);
+ rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
+ kauditd_send_multicast_skb,
+ kauditd_retry_skb);
+ if (sk == NULL || rc < 0)
+ auditd_reset();
+ sk = NULL;
/* drop our netns reference, no auditd sends past this line */
if (net) {
put_net(net);
net = NULL;
}
- sk = NULL;
/* we have processed all the queues so wake everyone */
wake_up(&audit_backlog_wait);
extern int audit_filter(int msgtype, unsigned int listtype);
#ifdef CONFIG_AUDITSYSCALL
-extern int __audit_signal_info(int sig, struct task_struct *t);
-static inline int audit_signal_info(int sig, struct task_struct *t)
-{
- if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
- return __audit_signal_info(sig, t);
- return 0;
-}
+extern int audit_signal_info(int sig, struct task_struct *t);
extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
extern struct list_head *audit_killed_trees(void);
#else
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
-int __audit_signal_info(int sig, struct task_struct *t)
+int audit_signal_info(int sig, struct task_struct *t)
{
struct audit_aux_data_pids *axp;
struct task_struct *tsk = current;
struct audit_context *ctx = tsk->audit_context;
kuid_t uid = current_uid(), t_uid = task_uid(t);
- if (auditd_test_task(t)) {
- if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
- audit_sig_pid = task_tgid_nr(tsk);
- if (uid_valid(tsk->loginuid))
- audit_sig_uid = tsk->loginuid;
- else
- audit_sig_uid = uid;
- security_task_getsecid(tsk, &audit_sig_sid);
- }
- if (!audit_signals || audit_dummy_context())
- return 0;
+ if (auditd_test_task(t) &&
+ (sig == SIGTERM || sig == SIGHUP ||
+ sig == SIGUSR1 || sig == SIGUSR2)) {
+ audit_sig_pid = task_tgid_nr(tsk);
+ if (uid_valid(tsk->loginuid))
+ audit_sig_uid = tsk->loginuid;
+ else
+ audit_sig_uid = uid;
+ security_task_getsecid(tsk, &audit_sig_sid);
}
+ if (!audit_signals || audit_dummy_context())
+ return 0;
+
/* optimize the common case by putting first signal recipient directly
* in audit_context */
if (!ctx->target_pid) {
LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
off = IMM;
load_word:
- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
- * only appearing in the programs where ctx ==
- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
- * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
- * internal BPF verifier will check that BPF_R6 ==
- * ctx.
+ /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
+ * appearing in the programs where ctx == skb
+ * (see may_access_skb() in the verifier). All programs
+ * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
+ * bpf_convert_filter() saves it in BPF_R6, internal BPF
+ * verifier will check that BPF_R6 == ctx.
*
* BPF_ABS and BPF_IND are wrappers of function calls,
* so they scratch BPF_R1-BPF_R5 registers, preserve
}
}
-static int check_ptr_alignment(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, int off, int size)
+static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+ int off, int size)
{
- if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
- if (off % size != 0) {
- verbose("misaligned access off %d size %d\n",
- off, size);
- return -EACCES;
- } else {
- return 0;
- }
- }
-
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
- /* misaligned access to packet is ok on x86,arm,arm64 */
- return 0;
-
if (reg->id && size != 1) {
- verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+ verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
return -EACCES;
}
/* skb->data is NET_IP_ALIGN-ed */
- if (reg->type == PTR_TO_PACKET &&
- (NET_IP_ALIGN + reg->off + off) % size != 0) {
+ if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
verbose("misaligned packet access off %d+%d+%d size %d\n",
NET_IP_ALIGN, reg->off, off, size);
return -EACCES;
}
+
return 0;
}
+static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
+ int size)
+{
+ if (size != 1) {
+ verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int check_ptr_alignment(const struct bpf_reg_state *reg,
+ int off, int size)
+{
+ switch (reg->type) {
+ case PTR_TO_PACKET:
+ return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+ check_pkt_ptr_alignment(reg, off, size);
+ case PTR_TO_MAP_VALUE_ADJ:
+ return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+ check_val_ptr_alignment(reg, size);
+ default:
+ if (off % size != 0) {
+ verbose("misaligned access off %d size %d\n",
+ off, size);
+ return -EACCES;
+ }
+
+ return 0;
+ }
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
if (size < 0)
return size;
- err = check_ptr_alignment(env, reg, off, size);
+ err = check_ptr_alignment(reg, off, size);
if (err)
return err;
* register as unknown.
*/
if (env->allow_ptr_leaks &&
+ BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
(dst_reg->type == PTR_TO_MAP_VALUE ||
dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
- regs[i].range = dst_reg->off;
+ /* keep the maximum range already checked */
+ regs[i].range = max(regs[i].range, dst_reg->off);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
- reg->range = dst_reg->off;
+ reg->range = max(reg->range, dst_reg->off);
}
}
tsk = tsk->group_leader;
/*
- * Workqueue threads may acquire PF_NO_SETAFFINITY and become
- * trapped in a cpuset, or RT worker may be born in a cgroup
- * with no rt_runtime allocated. Just say no.
+ * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+ * If userland migrates such a kthread to a non-root cgroup, it can
+ * become trapped in a cpuset, or RT kthread may be born in a
+ * cgroup with no rt_runtime allocated. Just say no.
*/
- if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+ if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out_unlock_rcu;
}
struct cpumask *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
- int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+ int n, nodes, cpus_per_vec, extra_vecs, curvec;
int affv = nvecs - affd->pre_vectors - affd->post_vectors;
int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE;
goto done;
}
- /* Spread the vectors per node */
- vecs_per_node = affv / nodes;
- /* Account for rounding errors */
- extra_vecs = affv - (nodes * vecs_per_node);
-
for_each_node_mask(n, nodemsk) {
- int ncpus, v, vecs_to_assign = vecs_per_node;
+ int ncpus, v, vecs_to_assign, vecs_per_node;
+
+ /* Spread the vectors per node */
+ vecs_per_node = (affv - curvec) / nodes;
/* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
+ vecs_to_assign = min(vecs_per_node, ncpus);
+
+ /* Account for rounding errors */
+ extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
for (v = 0; curvec < last_affv && v < vecs_to_assign;
curvec++, v++) {
/* Account for extra vectors to compensate rounding errors */
if (extra_vecs) {
cpus_per_vec++;
- if (!--extra_vecs)
- vecs_per_node++;
+ --extra_vecs;
}
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
}
if (curvec >= last_affv)
break;
+ --nodes;
}
done:
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/cgroup.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+ cgroup_kthread_ready();
__kthread_parkme(self);
ret = threadfn(data);
}
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
+ cgroup_init_kthreadd();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
reorder = &next_queue->reorder;
+ spin_lock(&reorder->lock);
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
- spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- spin_unlock(&reorder->lock);
pd->processed++;
+ spin_unlock(&reorder->lock);
goto out;
}
+ spin_unlock(&reorder->lock);
if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
WARN_ON(!task->ptrace || task->parent != current);
+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}
static int __sched_clock_stable_early = 1;
/*
- * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
+ * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
*/
-static __read_mostly u64 raw_offset;
-static __read_mostly u64 gtod_offset;
+__read_mostly u64 __sched_clock_offset;
+static __read_mostly u64 __gtod_offset;
struct sched_clock_data {
u64 tick_raw;
/*
* Attempt to make the (initial) unstable->stable transition continuous.
*/
- raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
+ __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
- scd->tick_gtod, gtod_offset,
- scd->tick_raw, raw_offset);
+ scd->tick_gtod, __gtod_offset,
+ scd->tick_raw, __sched_clock_offset);
static_branch_enable(&__sched_clock_stable);
tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
}
-static void __clear_sched_clock_stable(struct work_struct *work)
+static void __sched_clock_work(struct work_struct *work)
+{
+ static_branch_disable(&__sched_clock_stable);
+}
+
+static DECLARE_WORK(sched_clock_work, __sched_clock_work);
+
+static void __clear_sched_clock_stable(void)
{
struct sched_clock_data *scd = this_scd();
*
* Still do what we can.
*/
- gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
+ __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
- scd->tick_gtod, gtod_offset,
- scd->tick_raw, raw_offset);
+ scd->tick_gtod, __gtod_offset,
+ scd->tick_raw, __sched_clock_offset);
- static_branch_disable(&__sched_clock_stable);
tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
-}
-static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
+ if (sched_clock_stable())
+ schedule_work(&sched_clock_work);
+}
void clear_sched_clock_stable(void)
{
smp_mb(); /* matches sched_clock_init_late() */
if (sched_clock_running == 2)
- schedule_work(&sched_clock_work);
+ __clear_sched_clock_stable();
}
void sched_clock_init_late(void)
*/
static u64 sched_clock_local(struct sched_clock_data *scd)
{
- u64 now, clock, old_clock, min_clock, max_clock;
+ u64 now, clock, old_clock, min_clock, max_clock, gtod;
s64 delta;
again:
* scd->tick_gtod + TICK_NSEC);
*/
- clock = scd->tick_gtod + gtod_offset + delta;
- min_clock = wrap_max(scd->tick_gtod, old_clock);
- max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
+ gtod = scd->tick_gtod + __gtod_offset;
+ clock = gtod + delta;
+ min_clock = wrap_max(gtod, old_clock);
+ max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
u64 clock;
if (sched_clock_stable())
- return sched_clock() + raw_offset;
+ return sched_clock() + __sched_clock_offset;
if (unlikely(!sched_clock_running))
return 0ull;
if (write) {
if (*negp)
return -EINVAL;
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
+ *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
ftrace_probe_registered = 1;
}
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
{
int i;
if (!ftrace_probe_registered)
- return;
+ return false;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
if (hhd->first)
- return;
+ return false;
}
/* no more funcs left */
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
+ return true;
}
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int i, ret;
+ bool disabled;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL;
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash)
/* Hmm, should report this somehow */
}
}
mutex_lock(&ftrace_lock);
- __disable_ftrace_function_probe();
+ disabled = __disable_ftrace_function_probe();
/*
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+ /* still need to update the function call sites */
+ if (ftrace_enabled && !disabled)
+ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+ &old_hash_ops);
synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
rb_data[cpu].cnt = cpu;
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
"rbtester/%d", cpu);
- if (WARN_ON(!rb_threads[cpu])) {
+ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_threads[cpu]);
goto out_free;
}
/* Now create the rb hammer! */
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
- if (WARN_ON(!rb_hammer)) {
+ if (WARN_ON(IS_ERR(rb_hammer))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_hammer);
goto out_free;
}
}
EXPORT_SYMBOL(iov_iter_advance);
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= (n - unroll);
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
/*
* Return the count of just the current iov_iter segment.
*/
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
+ i->start_idx = i->idx;
}
EXPORT_SYMBOL(iov_iter_pipe);
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
+ *sp = *pc = 0;
*callno = -1;
return 0;
}
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
static int __init kmalloc_tests_init(void)
{
+ /*
+ * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+ * report for the first case.
+ */
+ bool multishot = kasan_save_enable_multi_shot();
+
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
ksize_unpoisons_memory();
copy_user_test();
use_after_scope_test();
+
+ kasan_restore_multi_shot(multishot);
+
return -EAGAIN;
}
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
- } else if (!memcmp("defer", buf,
- min(sizeof("defer")-1, count))) {
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
- clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
- set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
} else if (!memcmp("defer+madvise", buf,
min(sizeof("defer+madvise")-1, count))) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ } else if (!memcmp("defer", buf,
+ min(sizeof("defer")-1, count))) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
} else if (!memcmp("madvise", buf,
min(sizeof("madvise")-1, count))) {
clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
deactivate_page(page);
if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
- orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
- tlb->fullmm);
+ pmdp_invalidate(vma, addr, pmd);
orig_pmd = pmd_mkold(orig_pmd);
orig_pmd = pmd_mkclean(orig_pmd);
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
- int ret = 0;
+ pmd_t entry;
+ bool preserve_write;
+ int ret;
ptl = __pmd_trans_huge_lock(pmd, vma);
- if (ptl) {
- pmd_t entry;
- bool preserve_write = prot_numa && pmd_write(*pmd);
- ret = 1;
+ if (!ptl)
+ return 0;
- /*
- * Avoid trapping faults against the zero page. The read-only
- * data is likely to be read-cached on the local CPU and
- * local/remote hits to the zero page are not interesting.
- */
- if (prot_numa && is_huge_zero_pmd(*pmd)) {
- spin_unlock(ptl);
- return ret;
- }
+ preserve_write = prot_numa && pmd_write(*pmd);
+ ret = 1;
- if (!prot_numa || !pmd_protnone(*pmd)) {
- entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
- entry = pmd_modify(entry, newprot);
- if (preserve_write)
- entry = pmd_mk_savedwrite(entry);
- ret = HPAGE_PMD_NR;
- set_pmd_at(mm, addr, pmd, entry);
- BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
- pmd_write(entry));
- }
- spin_unlock(ptl);
- }
+ /*
+ * Avoid trapping faults against the zero page. The read-only
+ * data is likely to be read-cached on the local CPU and
+ * local/remote hits to the zero page are not interesting.
+ */
+ if (prot_numa && is_huge_zero_pmd(*pmd))
+ goto unlock;
+
+ if (prot_numa && pmd_protnone(*pmd))
+ goto unlock;
+
+ /*
+ * In case prot_numa, we are under down_read(mmap_sem). It's critical
+ * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+ * which is also under down_read(mmap_sem):
+ *
+ * CPU0: CPU1:
+ * change_huge_pmd(prot_numa=1)
+ * pmdp_huge_get_and_clear_notify()
+ * madvise_dontneed()
+ * zap_pmd_range()
+ * pmd_trans_huge(*pmd) == 0 (without ptl)
+ * // skip the pmd
+ * set_pmd_at();
+ * // pmd is re-established
+ *
+ * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+ * which may break userspace.
+ *
+ * pmdp_invalidate() is required to make sure we don't miss
+ * dirty/young flags set by hardware.
+ */
+ entry = *pmd;
+ pmdp_invalidate(vma, addr, pmd);
+ /*
+ * Recover dirty/young flags. It relies on pmdp_invalidate to not
+ * corrupt them.
+ */
+ if (pmd_dirty(*pmd))
+ entry = pmd_mkdirty(entry);
+ if (pmd_young(*pmd))
+ entry = pmd_mkyoung(entry);
+
+ entry = pmd_modify(entry, newprot);
+ if (preserve_write)
+ entry = pmd_mk_savedwrite(entry);
+ ret = HPAGE_PMD_NR;
+ set_pmd_at(mm, addr, pmd, entry);
+ BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+ spin_unlock(ptl);
return ret;
}
return 0;
out_err:
if (!vma || vma->vm_flags & VM_MAYSHARE)
- region_abort(resv_map, from, to);
+ /* Don't call region_abort if region_chg failed */
+ if (chg >= 0)
+ region_abort(resv_map, from, to);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return ret;
{
struct page *page = NULL;
spinlock_t *ptl;
+ pte_t pte;
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
*/
if (!pmd_huge(*pmd))
goto out;
- if (pmd_present(*pmd)) {
+ pte = huge_ptep_get((pte_t *)pmd);
+ if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
- if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+ if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
goto retry;
enum ttu_flags;
struct tlbflush_unmap_batch;
+
+/*
+ * only for MM internal work items which do not depend on
+ * any allocations or locks which might depend on allocations
+ */
+extern struct workqueue_struct *mm_percpu_wq;
+
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
<< KASAN_SHADOW_SCALE_SHIFT);
}
-static inline bool kasan_report_enabled(void)
-{
- return !current->kasan_depth;
-}
-
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_double_free(struct kmem_cache *cache, void *object,
*
*/
+#include <linux/bitops.h>
#include <linux/ftrace.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
kasan_end_report(&flags);
}
+static unsigned long kasan_flags;
+
+#define KASAN_BIT_REPORTED 0
+#define KASAN_BIT_MULTI_SHOT 1
+
+bool kasan_save_enable_multi_shot(void)
+{
+ return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
+
+void kasan_restore_multi_shot(bool enabled)
+{
+ if (!enabled)
+ clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
+
+static int __init kasan_set_multi_shot(char *str)
+{
+ set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+ return 1;
+}
+__setup("kasan_multi_shot", kasan_set_multi_shot);
+
+static inline bool kasan_report_enabled(void)
+{
+ if (current->kasan_depth)
+ return false;
+ if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ return true;
+ return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
+}
+
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
{
/* data/bss scanning */
scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop);
- scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
+ scan_large_block(__start_ro_after_init, __end_ro_after_init);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
- new = page - pvmw.page->index +
- linear_page_index(vma, pvmw.address);
+ if (PageKsm(page))
+ new = page;
+ else
+ new = page - pvmw.page->index +
+ linear_page_index(vma, pvmw.address);
get_page(new);
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
*/
static cpumask_t cpus_with_pcps;
+ /*
+ * Make sure nobody triggers this path before mm_percpu_wq is fully
+ * initialized.
+ */
+ if (WARN_ON_ONCE(!mm_percpu_wq))
+ return;
+
/* Workqueues cannot recurse */
if (current->flags & PF_WQ_WORKER)
return;
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
- schedule_work_on(cpu, work);
+ queue_work_on(cpu, mm_percpu_wq, work);
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
K(node_page_state(pgdat, NR_FILE_MAPPED)),
K(node_page_state(pgdat, NR_FILE_DIRTY)),
K(node_page_state(pgdat, NR_WRITEBACK)),
+ K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
* HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
#endif
- K(node_page_state(pgdat, NR_SHMEM)),
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
node_page_state(pgdat, NR_PAGES_SCANNED),
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
- /* Only for THP, seek to next pte entry makes sense */
- if (pvmw->pte) {
- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
- return not_found(pvmw);
+ if (pvmw->pte)
goto next_pte;
- }
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
while (1) {
if (check_pte(pvmw))
return true;
-next_pte: do {
+next_pte:
+ /* Seek to next pte only makes sense for THP */
+ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+ return not_found(pvmw);
+ do {
pvmw->address += PAGE_SIZE;
- if (pvmw->address >=
+ if (pvmw->address >= pvmw->vma->vm_end ||
+ pvmw->address >=
__vma_address(pvmw->page, pvmw->vma) +
hpage_nr_pages(pvmw->page) * PAGE_SIZE)
return not_found(pvmw);
goto out;
}
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
out:
unlock_page_memcg(page);
}
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-/*
- * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
- * workqueue, aiding in getting memory freed.
- */
-static struct workqueue_struct *lru_add_drain_wq;
-
-static int __init lru_init(void)
-{
- lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
-
- if (WARN(!lru_add_drain_wq,
- "Failed to create workqueue lru_add_drain_wq"))
- return -ENOMEM;
-
- return 0;
-}
-early_initcall(lru_init);
-
void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
int cpu;
+ /*
+ * Make sure nobody triggers this path before mm_percpu_wq is fully
+ * initialized.
+ */
+ if (WARN_ON(!mm_percpu_wq))
+ return;
+
mutex_lock(&lock);
get_online_cpus();
cpumask_clear(&has_work);
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
- queue_work_on(cpu, lru_add_drain_wq, work);
+ queue_work_on(cpu, mm_percpu_wq, work);
cpumask_set_cpu(cpu, &has_work);
}
}
struct page *page = map[i];
if (page)
__free_page(page);
+ if (!(i % SWAP_CLUSTER_MAX))
+ cond_resched();
}
vfree(map);
}
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
-static struct workqueue_struct *vmstat_wq;
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
* to occur in the future. Keep on running the
* update worker thread.
*/
- queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+ queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
if (!delayed_work_pending(dw) && need_update(cpu))
- queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+ queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
}
put_online_cpus();
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
- vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
}
#endif
-static int __init setup_vmstat(void)
+struct workqueue_struct *mm_percpu_wq;
+
+void __init init_mm_internals(void)
{
-#ifdef CONFIG_SMP
- int ret;
+ int ret __maybe_unused;
+
+ mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+#ifdef CONFIG_SMP
ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
NULL, vmstat_cpu_dead);
if (ret < 0)
proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
#endif
- return 0;
}
-module_init(setup_vmstat)
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
- ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key);
+ ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
spin_lock(&zhdr->page_lock);
}
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+ return spin_trylock(&zhdr->page_lock);
+}
+
/* Unlock a z3fold page */
static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
{
spin_lock(&pool->lock);
zhdr = list_first_entry_or_null(&pool->unbuddied[i],
struct z3fold_header, buddy);
- if (!zhdr) {
+ if (!zhdr || !z3fold_page_trylock(zhdr)) {
spin_unlock(&pool->lock);
continue;
}
spin_unlock(&pool->lock);
page = virt_to_page(zhdr);
- z3fold_page_lock(zhdr);
if (zhdr->first_chunks == 0) {
if (zhdr->middle_chunks != 0 &&
chunks >= zhdr->start_middle)
struct zspage {
struct {
unsigned int fullness:FULLNESS_BITS;
- unsigned int class:CLASS_BITS;
+ unsigned int class:CLASS_BITS + 1;
unsigned int isolated:ISOLATED_BITS;
unsigned int magic:MAGIC_VAL_BITS;
};
return err;
}
+static void br_dev_uninit(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ br_multicast_uninit_stats(br);
+ br_vlan_flush(br);
+ free_percpu(br->stats);
+}
+
static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_init = br_dev_init,
+ .ndo_uninit = br_dev_uninit,
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_features_check = passthru_features_check,
};
-static void br_dev_free(struct net_device *dev)
-{
- struct net_bridge *br = netdev_priv(dev);
-
- free_percpu(br->stats);
- free_netdev(dev);
-}
-
static struct device_type br_type = {
.name = "bridge",
};
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
- dev->destructor = br_dev_free;
+ dev->destructor = free_netdev;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
br_fdb_delete_by_port(br, NULL, 0, 1);
- br_vlan_flush(br);
br_multicast_dev_del(br);
cancel_delayed_work_sync(&br->gc_work);
out:
spin_unlock_bh(&br->multicast_lock);
-
- free_percpu(br->mcast_stats);
}
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
return 0;
}
+void br_multicast_uninit_stats(struct net_bridge *br)
+{
+ free_percpu(br->mcast_stats);
+}
+
static void mcast_stats_add_dir(u64 *dst, u64 *src)
{
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
spin_unlock_bh(&br->lock);
}
- err = br_changelink(dev, tb, data);
+ err = register_netdevice(dev);
if (err)
return err;
- return register_netdevice(dev);
+ err = br_changelink(dev, tb, data);
+ if (err)
+ unregister_netdevice(dev);
+ return err;
}
static size_t br_get_size(const struct net_device *brdev)
void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
const struct sk_buff *skb, u8 type, u8 dir);
int br_multicast_init_stats(struct net_bridge *br);
+void br_multicast_uninit_stats(struct net_bridge *br);
void br_multicast_get_stats(const struct net_bridge *br,
const struct net_bridge_port *p,
struct br_mcast_stats *dest);
return 0;
}
+static inline void br_multicast_uninit_stats(struct net_bridge *br)
+{
+}
+
static inline int br_multicast_igmp_type(const struct sk_buff *skb)
{
return 0;
struct iov_iter *to, int len)
{
int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int i, copy = start - offset, start_off = offset, n;
struct sk_buff *frag_iter;
trace_skb_copy_datagram_iovec(skb, len);
if (copy > 0) {
if (copy > len)
copy = len;
- if (copy_to_iter(skb->data + offset, copy, to) != copy)
+ n = copy_to_iter(skb->data + offset, copy, to);
+ offset += n;
+ if (n != copy)
goto short_copy;
if ((len -= copy) == 0)
return 0;
- offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (copy_page_to_iter(skb_frag_page(frag),
+ n = copy_page_to_iter(skb_frag_page(frag),
frag->page_offset + offset -
- start, copy, to) != copy)
+ start, copy, to);
+ offset += n;
+ if (n != copy)
goto short_copy;
if (!(len -= copy))
return 0;
- offset += copy;
}
start = end;
}
*/
fault:
+ iov_iter_revert(to, offset - start_off);
return -EFAULT;
short_copy:
__wsum *csump)
{
int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int i, copy = start - offset, start_off = offset;
struct sk_buff *frag_iter;
int pos = 0;
int n;
if (copy > len)
copy = len;
n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+ offset += n;
if (n != copy)
goto fault;
if ((len -= copy) == 0)
return 0;
- offset += copy;
pos = copy;
}
offset - start, copy,
&csum2, to);
kunmap(page);
+ offset += n;
if (n != copy)
goto fault;
*csump = csum_block_add(*csump, csum2, pos);
if (!(len -= copy))
return 0;
- offset += copy;
pos += copy;
}
start = end;
return 0;
fault:
+ iov_iter_revert(to, offset - start_off);
return -EFAULT;
}
}
return 0;
csum_error:
+ iov_iter_revert(&msg->msg_iter, chunk);
return -EINVAL;
fault:
return -EFAULT;
return err;
}
-EXPORT_SYMBOL(dev_change_xdp_fd);
/**
* dev_new_index - allocate an ifindex
unsigned char ar_tip[4];
} *arp_eth, _arp_eth;
const struct arphdr *arp;
- struct arphdr *_arp;
+ struct arphdr _arp;
arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
hlen, &_arp);
if (skb)
skb = skb_clone(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
+ if (neigh->ops->solicit)
+ neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
kfree_skb(skb);
}
#include <net/tcp.h>
static siphash_key_t net_secret __read_mostly;
+static siphash_key_t ts_secret __read_mostly;
static __always_inline void net_secret_init(void)
{
+ net_get_random_once(&ts_secret, sizeof(ts_secret));
net_get_random_once(&net_secret, sizeof(net_secret));
}
#endif
#endif
#if IS_ENABLED(CONFIG_IPV6)
+static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+{
+ const struct {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ } __aligned(SIPHASH_ALIGNMENT) combined = {
+ .saddr = *(struct in6_addr *)saddr,
+ .daddr = *(struct in6_addr *)daddr,
+ };
+
+ if (sysctl_tcp_timestamps != 1)
+ return 0;
+
+ return siphash(&combined, offsetofend(typeof(combined), daddr),
+ &ts_secret);
+}
+
u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport, u32 *tsoff)
{
net_secret_init();
hash = siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
- *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+ *tsoff = secure_tcpv6_ts_off(saddr, daddr);
return seq_scale(hash);
}
EXPORT_SYMBOL(secure_tcpv6_sequence_number);
#endif
#ifdef CONFIG_INET
+static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+{
+ if (sysctl_tcp_timestamps != 1)
+ return 0;
+
+ return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+ &ts_secret);
+}
/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
* but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u32)sport << 16 | (__force u32)dport,
&net_secret);
- *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+ *tsoff = secure_tcp_ts_off(saddr, daddr);
return seq_scale(hash);
}
.data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
{
.procname = "busy_read",
.data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
#endif
#ifdef CONFIG_NET_SCHED
while ((d = next)) {
next = d->next;
dev = d->dev;
- if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+ if (d != ic_dev && !netdev_uses_dsa(dev)) {
pr_debug("IP-Config: Downing %s\n", dev->name);
dev_change_flags(dev, d->flags);
}
clusterip_config_put(cipinfo->config);
- nf_ct_netns_get(par->net, par->family);
+ nf_ct_netns_put(par->net, par->family);
}
#ifdef CONFIG_COMPAT
.timeout = 180,
};
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
- .me = THIS_MODULE,
- .help = help,
- .expect_policy = &snmp_exp_policy,
- .name = "snmp",
- .tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
- .tuple.dst.protonum = IPPROTO_UDP,
-};
-
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
static int __init nf_nat_snmp_basic_init(void)
{
- int ret = 0;
-
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);
- ret = nf_conntrack_helper_register(&snmp_trap_helper);
- if (ret < 0) {
- nf_conntrack_helper_unregister(&snmp_helper);
- return ret;
- }
- return ret;
+ return nf_conntrack_helper_register(&snmp_trap_helper);
}
static void __exit nf_nat_snmp_basic_fini(void)
{
RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+ synchronize_rcu();
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
skb_reset_network_header(skb);
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
- ip_hdr(skb)->protocol = IPPROTO_ICMP;
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
+ tcp_saved_syn_free(tp);
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
#define REXMIT_LOST 1 /* retransmit packets marked lost */
#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
-static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
+ unsigned int len)
{
static bool __once __read_mostly;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
- pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
- dev ? dev->name : "Unknown driver");
+ if (!dev || len >= dev->mtu)
+ pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+ dev ? dev->name : "Unknown driver");
rcu_read_unlock();
}
}
if (len >= icsk->icsk_ack.rcv_mss) {
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
- if (unlikely(icsk->icsk_ack.rcv_mss != len))
- tcp_gro_dev_warn(sk, skb);
+ /* Account for possibly-removed options */
+ if (unlikely(len > icsk->icsk_ack.rcv_mss +
+ MAX_TCP_OPTION_SPACE))
+ tcp_gro_dev_warn(sk, skb, len);
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (metric > tp->reordering) {
- int mib_idx;
+ int mib_idx;
+ if (metric > tp->reordering) {
tp->reordering = min(sysctl_tcp_max_reordering, metric);
- /* This exciting event is worth to be remembered. 8) */
- if (ts)
- mib_idx = LINUX_MIB_TCPTSREORDER;
- else if (tcp_is_reno(tp))
- mib_idx = LINUX_MIB_TCPRENOREORDER;
- else if (tcp_is_fack(tp))
- mib_idx = LINUX_MIB_TCPFACKREORDER;
- else
- mib_idx = LINUX_MIB_TCPSACKREORDER;
-
- NET_INC_STATS(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
}
tp->rack.reord = 1;
+
+ /* This exciting event is worth to be remembered. 8) */
+ if (ts)
+ mib_idx = LINUX_MIB_TCPTSREORDER;
+ else if (tcp_is_reno(tp))
+ mib_idx = LINUX_MIB_TCPRENOREORDER;
+ else if (tcp_is_fack(tp))
+ mib_idx = LINUX_MIB_TCPFACKREORDER;
+ else
+ mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+ NET_INC_STATS(sock_net(sk), mib_idx);
}
/* This must be called before lost_out is incremented */
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct sk_buff *skb;
+ bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
bool mark_lost;
tp->high_seq = tp->snd_nxt;
tcp_ecn_queue_cwr(tp);
- /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
- * if a previous recovery is underway, otherwise it may incorrectly
- * call a timeout spurious if some previously retransmitted packets
- * are s/acked (sec 3.2). We do not apply that retriction since
- * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
- * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
- * on PTMU discovery to avoid sending new data.
+ /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+ * loss recovery is underway except recurring timeout(s) on
+ * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+ *
+ * In theory F-RTO can be used repeatedly during loss recovery.
+ * In practice this interacts badly with broken middle-boxes that
+ * falsely raise the receive window, which results in repeated
+ * timeouts and stop-and-go behavior.
*/
- tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
+ tp->frto = sysctl_tcp_frto &&
+ (new_recovery || icsk->icsk_retransmits) &&
+ !inet_csk(sk)->icsk_mtup.probe_size;
}
/* If ACK arrived pointing to a remembered SACK, it means that our
{
struct sk_buff *skb;
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
+
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
-
- TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
}
/* Send a crossed SYN-ACK during socket establishment.
/* Account for retransmits that are lost again */
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
+ tcp_skb_pcount(skb));
}
}
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
struct rt6_info *rt = NULL;
+ bool keep;
addrconf_del_dad_work(ifa);
+ keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+ !addr_is_local(&ifa->addr);
+ if (!keep)
+ list_move(&ifa->if_list, &del_list);
+
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
- if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
- !addr_is_local(&ifa->addr)) {
+ if (keep) {
/* set state to skip the notifier below */
state = INET6_IFADDR_STATE_DEAD;
ifa->state = 0;
} else {
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
-
- list_move(&ifa->if_list, &del_list);
}
spin_unlock_bh(&ifa->lock);
struct kcm_attach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_attach_ioctl(sock, &info);
struct kcm_unattach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_unattach_ioctl(sock, &info);
struct socket *newsock = NULL;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_clone(sock, &info, &newsock);
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ if (!tunnel) {
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ session_list = l2tp_session_id_hash_2(pn, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+ }
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, session_list, hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct l2tp_session *session;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session_walk, head, hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist;
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+ session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_for_each_entry(session_walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist_glob;
+
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ hlist_add_head(&session->hlist, head);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return 0;
+
+exist_glob:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return -EEXIST;
+}
/* Lookup a tunnel by id
*/
* a data (not control) frame before coming here. Fields up to the
* session-id have already been parsed and ptr points to the data
* after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
int offset;
u32 ns, nr;
- /* The ref count is increased since we now hold a pointer to
- * the session. Take care to decrement the refcnt when exiting
- * this function from now on...
- */
- l2tp_session_inc_refcount(session);
- if (session->ref)
- (*session->ref)(session);
-
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);
- l2tp_session_dec_refcount(session);
-
return;
discard:
if (session->deref)
(*session->deref)(session);
-
- l2tp_session_dec_refcount(session);
}
EXPORT_SYMBOL(l2tp_recv_common);
}
/* Find the session context */
- session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
if (!session || !session->recv_skb) {
+ if (session) {
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ }
+
/* Not found? Pass to userspace to deal with */
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: no session found (%u/%u). Passing up.\n",
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
+ int err;
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
l2tp_session_set_header_len(session, tunnel->version);
+ err = l2tp_session_add_to_tunnel(tunnel, session);
+ if (err) {
+ kfree(session);
+
+ return ERR_PTR(err);
+ }
+
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
-
- /* And to the global session list if L2TPv3 */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- }
-
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
+
+ return session;
}
- return session;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(l2tp_session_create);
return tunnel;
}
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref);
struct l2tp_session *l2tp_session_find(struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref);
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
}
/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
goto out;
}
- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- rc = -EEXIST;
- goto out;
- }
-
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
- if (!session) {
- rc = -ENOMEM;
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
goto out;
}
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
+
return 0;
pass_up:
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
}
return session;
session_id, peer_session_id, &cfg);
if (ret >= 0) {
- session = l2tp_session_find(net, tunnel, session_id);
- if (session)
+ session = l2tp_session_get(net, tunnel, session_id, false);
+ if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
+ l2tp_session_dec_refcount(session);
+ }
}
out:
struct l2tp_session *session;
u16 pw_type;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
int ret = 0;
struct l2tp_session *session;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
struct sk_buff *msg;
int ret;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
0, session, L2TP_CMD_SESSION_GET);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
-err_out:
- nlmsg_free(msg);
+ l2tp_session_dec_refcount(session);
-out:
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}
goto out;
}
- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session, L2TP_CMD_SESSION_GET) < 0)
+ session, L2TP_CMD_SESSION_GET) < 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);
si++;
}
static void pppol2tp_session_destruct(struct sock *sk)
{
struct l2tp_session *session = sk->sk_user_data;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
if (session) {
sk->sk_user_data = NULL;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
l2tp_session_queue_purge(session);
sock_put(sk);
}
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
/* This will delete the session context via
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
+ bool drop_refcnt = false;
int ver = 2;
int fd;
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = peer_tunnel_id;
- /* Create session if it doesn't already exist. We handle the
- * case where a session was previously created by the netlink
- * interface by checking that the session doesn't already have
- * a socket and its tunnel socket are what we expect. If any
- * of those checks fail, return EEXIST to the caller.
- */
- session = l2tp_session_find(sock_net(sk), tunnel, session_id);
- if (session == NULL) {
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
+ session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+ if (session) {
+ drop_refcnt = true;
+ ps = l2tp_session_priv(session);
+
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
*/
- cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (ps->sock) {
+ error = -EEXIST;
+ goto end;
+ }
- /* Allocate and initialize a new session context. */
- session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
- if (session == NULL) {
- error = -ENOMEM;
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock) {
+ error = -EEXIST;
goto end;
}
} else {
- ps = l2tp_session_priv(session);
- error = -EEXIST;
- if (ps->sock != NULL)
- goto end;
+ /* Default MTU must allow space for UDP/L2TP/PPP headers */
+ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ cfg.mru = cfg.mtu;
- /* consistency checks */
- if (ps->tunnel_sock != tunnel->sock)
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto end;
+ }
}
/* Associate session with its PPPoL2TP socket */
session->name);
end:
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
release_sock(sk);
return error;
if (tunnel->sock == NULL)
goto out;
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = l2tp_session_find(net, tunnel, session_id);
- if (session != NULL)
- goto out;
-
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
cfg->mru = cfg->mtu;
/* Allocate and initialize a new session context. */
- error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
- if (session == NULL)
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto out;
+ }
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
- l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
+ l2tp_session_get(sock_net(sk), tunnel,
+ stats.session_id, true);
+
+ if (session) {
+ err = pppol2tp_session_ioctl(session, cmd,
+ arg);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ } else {
err = -EBADR;
+ }
break;
}
#ifdef CONFIG_XFRM
} else
err = pppol2tp_session_setsockopt(sk, session, optname, val);
- err = 0;
-
end_put_sess:
sock_put(sk);
end:
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sock_put(ps->tunnel_sock);
- } else
+ if (err)
+ goto end_put_sess;
+ } else {
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+ if (err)
+ goto end_put_sess;
+ }
err = -EFAULT;
if (put_user(len, optlen))
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
ieee80211_recalc_ps(local);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ local->ops->wake_tx_queue) {
/* XXX: for AP_VLAN, actually track AP queues */
netif_tx_start_all_queues(dev);
} else if (dev) {
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
hlist_del_rcu(&exp->hnode);
net->ct.expect_count--;
- hlist_del(&exp->lnode);
+ hlist_del_rcu(&exp->lnode);
master_help->expecting[exp->class]--;
nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
/* two references : one for hash insert, one for the timer */
atomic_add(2, &exp->use);
- hlist_add_head(&exp->lnode, &master_help->expectations);
+ hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
master_help->expecting[exp->class]++;
hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len + var_alloc_len;
alloc_size = t->alloc_size + var_alloc_len;
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
newoff = ALIGN(old->len, t->align);
newlen = newoff + t->len + var_alloc_len;
RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
{
struct nf_conntrack_helper *h;
+ rcu_read_lock();
+
h = __nf_conntrack_helper_find(name, l3num, protonum);
#ifdef CONFIG_MODULES
if (h == NULL) {
- if (request_module("nfct-helper-%s", name) == 0)
+ rcu_read_unlock();
+ if (request_module("nfct-helper-%s", name) == 0) {
+ rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
+ } else {
+ return h;
+ }
}
#endif
if (h != NULL && !try_module_get(h->me))
h = NULL;
+ rcu_read_unlock();
+
return h;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
+/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_name(const char *name)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
- rcu_read_lock();
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (!strcmp(cur->name, name)) {
found = true;
break;
}
}
- rcu_read_unlock();
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
+/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
- rcu_read_lock();
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (cur->expectfn == symbol) {
found = true;
break;
}
}
- rcu_read_unlock();
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
* treat the second attempt as a no-op instead of returning
* an error.
*/
- if (help && help->helper &&
- !strcmp(help->helper->name, helpname))
- return 0;
- else
- return -EBUSY;
+ err = -EBUSY;
+ if (help) {
+ rcu_read_lock();
+ helper = rcu_dereference(help->helper);
+ if (helper && !strcmp(helper->name, helpname))
+ err = 0;
+ rcu_read_unlock();
+ }
+
+ return err;
}
if (!strcmp(helpname, "")) {
err = 0;
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
- events = IPCT_RELATED;
+ events = 1 << IPCT_RELATED;
else
- events = IPCT_NEW;
+ events = 1 << IPCT_NEW;
if (cda[CTA_LABELS] &&
ctnetlink_attach_labels(ct, cda) == 0)
last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
- hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
- hnode) {
+ hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
+ hnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
rcu_read_lock();
last = (struct nf_conntrack_expect *)cb->args[1];
restart:
- hlist_for_each_entry(exp, &help->expectations, lnode) {
+ hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (cb->args[1]) {
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
+ /* No expectation linked to this connection tracking. */
+ if (!nfct_help(ct)) {
+ nf_ct_put(ct);
+ return 0;
+ }
+
c.data = ct;
err = netlink_dump_start(ctnl, skb, nlh, &c);
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
+ rcu_read_lock();
if (cda[CTA_EXPECT_HELP_NAME]) {
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
helper = __nf_conntrack_helper_find(helpname, u3,
nf_ct_protonum(ct));
if (helper == NULL) {
+ rcu_read_unlock();
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
err = -EOPNOTSUPP;
goto err_ct;
}
+ rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname, u3,
nf_ct_protonum(ct));
if (helper) {
err = -EAGAIN;
- goto err_ct;
+ goto err_rcu;
}
+ rcu_read_unlock();
#endif
err = -EOPNOTSUPP;
goto err_ct;
exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
if (IS_ERR(exp)) {
err = PTR_ERR(exp);
- goto err_ct;
+ goto err_rcu;
}
err = nf_ct_expect_related_report(exp, portid, report);
nf_ct_expect_put(exp);
+err_rcu:
+ rcu_read_unlock();
err_ct:
nf_ct_put(ct);
return err;
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
RCU_INIT_POINTER(nfnl_ct_hook, NULL);
#endif
+ synchronize_rcu();
}
module_init(ctnetlink_init);
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
+ synchronize_rcu();
+
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
+ read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
newdst = ifa->addr;
addr = true;
break;
}
+ read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+struct nfnl_cthelper {
+ struct list_head list;
+ struct nf_conntrack_helper helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
static int
nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
int i, ret;
struct nf_conntrack_expect_policy *expect_policy;
struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+ unsigned int class_max;
ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set);
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
- helper->expect_class_max =
- ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
- if (helper->expect_class_max != 0 &&
- helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (class_max == 0)
+ return -EINVAL;
+ if (class_max > NF_CT_MAX_EXPECT_CLASSES)
return -EOVERFLOW;
expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
- helper->expect_class_max, GFP_KERNEL);
+ class_max, GFP_KERNEL);
if (expect_policy == NULL)
return -ENOMEM;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < class_max; i++) {
if (!tb[NFCTH_POLICY_SET+i])
goto err;
if (ret < 0)
goto err;
}
+
+ helper->expect_class_max = class_max - 1;
helper->expect_policy = expect_policy;
return 0;
err:
struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
+ struct nfnl_cthelper *nfcth;
int ret;
if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
return -EINVAL;
- helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
- if (helper == NULL)
+ nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+ if (nfcth == NULL)
return -ENOMEM;
+ helper = &nfcth->helper;
ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
- goto err;
+ goto err1;
strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
ret = nf_conntrack_helper_register(helper);
if (ret < 0)
- goto err;
+ goto err2;
+ list_add_tail(&nfcth->list, &nfnl_cthelper_list);
return 0;
-err:
- kfree(helper);
+err2:
+ kfree(helper->expect_policy);
+err1:
+ kfree(nfcth);
return ret;
}
+static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+ struct nf_conntrack_expect_policy *new_policy,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+ nfnl_cthelper_expect_pol);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFCTH_POLICY_NAME] ||
+ !tb[NFCTH_POLICY_EXPECT_MAX] ||
+ !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+ return -EINVAL;
+
+ if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+ return -EBUSY;
+
+ new_policy->max_expected =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+ new_policy->timeout =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+ return 0;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+ struct nf_conntrack_helper *helper)
+{
+ struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+ struct nf_conntrack_expect_policy *policy;
+ int i, err;
+
+ /* Check first that all policy attributes are well-formed, so we don't
+ * leave things in inconsistent state on errors.
+ */
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+ if (!tb[NFCTH_POLICY_SET + i])
+ return -EINVAL;
+
+ err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+ &new_policy[i],
+ tb[NFCTH_POLICY_SET + i]);
+ if (err < 0)
+ return err;
+ }
+ /* Now we can safely update them. */
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
+ policy = (struct nf_conntrack_expect_policy *)
+ &helper->expect_policy[i];
+ policy->max_expected = new_policy->max_expected;
+ policy->timeout = new_policy->timeout;
+ }
+
+ return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+ unsigned int class_max;
+ int err;
+
+ err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+ nfnl_cthelper_expect_policy_set);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFCTH_POLICY_SET_NUM])
+ return -EINVAL;
+
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (helper->expect_class_max + 1 != class_max)
+ return -EBUSY;
+
+ return nfnl_cthelper_update_policy_all(tb, helper);
+}
+
static int
nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper)
return -EBUSY;
if (tb[NFCTH_POLICY]) {
- ret = nfnl_cthelper_parse_expect_policy(helper,
- tb[NFCTH_POLICY]);
+ ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
return ret;
}
const char *helper_name;
struct nf_conntrack_helper *cur, *helper = NULL;
struct nf_conntrack_tuple tuple;
- int ret = 0, i;
+ struct nfnl_cthelper *nlcth;
+ int ret = 0;
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
return -EINVAL;
if (ret < 0)
return ret;
- rcu_read_lock();
- for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- if (strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0)
- continue;
+ if ((tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- if ((tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
- if (nlh->nlmsg_flags & NLM_F_EXCL) {
- ret = -EEXIST;
- goto err;
- }
- helper = cur;
- break;
- }
+ helper = cur;
+ break;
}
- rcu_read_unlock();
if (helper == NULL)
ret = nfnl_cthelper_create(tb, &tuple);
ret = nfnl_cthelper_update(tb, helper);
return ret;
-err:
- rcu_read_unlock();
- return ret;
}
static int
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
- htonl(helper->expect_class_max)))
+ htonl(helper->expect_class_max + 1)))
goto nla_put_failure;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
nest_parms2 = nla_nest_start(skb,
(NFCTH_POLICY_SET+i) | NLA_F_NESTED);
if (nest_parms2 == NULL)
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const tb[])
{
- int ret = -ENOENT, i;
+ int ret = -ENOENT;
struct nf_conntrack_helper *cur;
struct sk_buff *skb2;
char *helper_name = NULL;
struct nf_conntrack_tuple tuple;
+ struct nfnl_cthelper *nlcth;
bool tuple_set = false;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
tuple_set = true;
}
- for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
+ if (helper_name &&
+ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- if (helper_name && strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0) {
- continue;
- }
- if (tuple_set &&
- (tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
-
- skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL) {
- ret = -ENOMEM;
- break;
- }
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
- ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
- nlh->nlmsg_seq,
- NFNL_MSG_TYPE(nlh->nlmsg_type),
- NFNL_MSG_CTHELPER_NEW, cur);
- if (ret <= 0) {
- kfree_skb(skb2);
- break;
- }
+ ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
- ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
- }
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
}
return ret;
}
{
char *helper_name = NULL;
struct nf_conntrack_helper *cur;
- struct hlist_node *tmp;
struct nf_conntrack_tuple tuple;
bool tuple_set = false, found = false;
- int i, j = 0, ret;
+ struct nfnl_cthelper *nlcth, *n;
+ int j = 0, ret;
if (tb[NFCTH_NAME])
helper_name = nla_data(tb[NFCTH_NAME]);
tuple_set = true;
}
- for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
- hnode) {
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
+ j++;
- j++;
+ if (helper_name &&
+ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+ continue;
- if (helper_name && strncmp(cur->name, helper_name,
- NF_CT_HELPER_NAME_LEN) != 0) {
- continue;
- }
- if (tuple_set &&
- (tuple.src.l3num != cur->tuple.src.l3num ||
- tuple.dst.protonum != cur->tuple.dst.protonum))
- continue;
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
- found = true;
- nf_conntrack_helper_unregister(cur);
- }
+ found = true;
+ nf_conntrack_helper_unregister(cur);
+ kfree(cur->expect_policy);
+
+ list_del(&nlcth->list);
+ kfree(nlcth);
}
+
/* Make sure we return success if we flush and there is no helpers */
return (found || j == 0) ? 0 : -ENOENT;
}
static void __exit nfnl_cthelper_exit(void)
{
struct nf_conntrack_helper *cur;
- struct hlist_node *tmp;
- int i;
+ struct nfnl_cthelper *nlcth, *n;
nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
- for (i=0; i<nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
- hnode) {
- /* skip non-userspace conntrack helpers. */
- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
- continue;
+ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+ cur = &nlcth->helper;
- nf_conntrack_helper_unregister(cur);
- }
+ nf_conntrack_helper_unregister(cur);
+ kfree(cur->expect_policy);
+ kfree(nlcth);
}
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+ synchronize_rcu();
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- rcu_barrier();
}
module_init(cttimeout_init);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
- return NULL;
+ goto nlmsg_failure;
}
nlh = nlmsg_put(skb, 0, 0,
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
- return NULL;
+ goto nlmsg_failure;
}
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->state.pf;
}
nlh->nlmsg_len = skb->len;
+ if (seclen)
+ security_release_secctx(secdata, seclen);
return skb;
nla_put_failure:
skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+ if (seclen)
+ security_release_secctx(secdata, seclen);
return NULL;
}
enum nft_registers sreg:8;
enum nft_registers dreg:8;
u8 len;
+ bool autogen_seed:1;
u32 modulus;
u32 seed;
u32 offset;
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- if (tb[NFTA_HASH_SEED])
+ if (tb[NFTA_HASH_SEED]) {
priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
- else
+ } else {
+ priv->autogen_seed = true;
get_random_bytes(&priv->seed, sizeof(priv->seed));
+ }
return nft_validate_register_load(priv->sreg, len) &&
nft_validate_register_store(ctx, priv->dreg, NULL,
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
+ if (!priv->autogen_seed &&
+ nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
goto nla_put_failure;
if (priv->offset != 0)
if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
tcp_hdrlen = tcph->doff * 4;
- if (len < tcp_hdrlen)
+ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (len > tcp_hdrlen)
return 0;
+ /* tcph->doff has 4 bits, do not wrap it to 0 */
+ if (tcp_hdrlen >= 15 * 4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
rcu_read_lock();
indev = __in6_dev_get(skb->dev);
- if (indev)
+ if (indev) {
+ read_lock_bh(&indev->lock);
list_for_each_entry(ifa, &indev->addr_list, if_list) {
if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
continue;
laddr = &ifa->addr;
break;
}
+ read_unlock_bh(&indev->lock);
+ }
rcu_read_unlock();
return laddr ? laddr : daddr;
*/
if (nf_ct_is_confirmed(ct))
nf_ct_delete(ct, 0, 0);
- else
- nf_conntrack_put(&ct->ct_general);
+
+ nf_conntrack_put(&ct->ct_general);
nf_ct_set(skb, NULL, 0);
return false;
}
/* Link layer. */
clear_vlan(key);
- if (key->mac_proto == MAC_PROTO_NONE) {
+ if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
if (unlikely(eth_type_vlan(skb->protocol)))
return -EINVAL;
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
{
- return key_extract(skb, key);
+ int res;
+
+ res = key_extract(skb, key);
+ if (!res)
+ key->mac_proto &= ~SW_FLOW_KEY_INVALID;
+
+ return res;
}
static int key_extract_mac_proto(struct sk_buff *skb)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
po->tp_reserve = val;
return 0;
}
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
if (po->tp_version >= TPACKET_V3 &&
- (int)(req->tp_block_size -
- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+ req->tp_block_size <=
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
}
}
#ifdef CONFIG_NET_SCHED
- if (dev->qdisc)
+ if (dev->qdisc != &noop_qdisc)
qdisc_hash_add(dev->qdisc);
#endif
}
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
+ if (sctp_stream_new(asoc, gfp))
+ goto fail_init;
+
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
- goto fail_init;
+ goto stream_free;
asoc->active_key_id = ep->active_key_id;
asoc->prsctp_enable = ep->prsctp_enable;
return asoc;
+stream_free:
+ sctp_stream_free(asoc->stream);
fail_init:
sock_put(asoc->base.sk);
sctp_endpoint_put(asoc->ep);
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
-void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
{
struct sctp_transport *t;
__u32 pmtu = 0;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(sk, t,
- SCTP_TRUNC4(dst_mtu(t->dst)));
+ sctp_transport_update_pmtu(
+ t, SCTP_TRUNC4(dst_mtu(t->dst)));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
- sctp_transport_update_pmtu(sk, t, pmtu);
+ sctp_transport_update_pmtu(t, pmtu);
/* Update association pmtu. */
- sctp_assoc_sync_pmtu(sk, asoc);
+ sctp_assoc_sync_pmtu(asoc);
}
/* Retransmit with the new pmtu setting.
{
struct sctp_transport *tp = packet->transport;
struct sctp_association *asoc = tp->asoc;
+ struct sock *sk;
pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
-
packet->vtag = vtag;
- if (asoc && tp->dst) {
- struct sock *sk = asoc->base.sk;
-
- rcu_read_lock();
- if (__sk_dst_get(sk) != tp->dst) {
- dst_hold(tp->dst);
- sk_setup_caps(sk, tp->dst);
- }
-
- if (sk_can_gso(sk)) {
- struct net_device *dev = tp->dst->dev;
+ /* do the following jobs only once for a flush schedule */
+ if (!sctp_packet_empty(packet))
+ return;
- packet->max_size = dev->gso_max_size;
- } else {
- packet->max_size = asoc->pathmtu;
- }
- rcu_read_unlock();
+ /* set packet max_size with pathmtu */
+ packet->max_size = tp->pathmtu;
+ if (!asoc)
+ return;
- } else {
- packet->max_size = tp->pathmtu;
+ /* update dst or transport pathmtu if in need */
+ sk = asoc->base.sk;
+ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ } else if (!sctp_transport_pmtu_check(tp)) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
}
- if (ecn_capable && sctp_packet_empty(packet)) {
- struct sctp_chunk *chunk;
+ /* If there a is a prepend chunk stick it on the list before
+ * any other chunks get appended.
+ */
+ if (ecn_capable) {
+ struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
- /* If there a is a prepend chunk stick it on the list before
- * any other chunks get appended.
- */
- chunk = sctp_get_ecne_prepend(asoc);
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
+
+ if (!tp->dst)
+ return;
+
+ /* set packet max_size with gso_max_size if gso is enabled*/
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+ packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+ : asoc->pathmtu;
+ rcu_read_unlock();
}
/* Initialize the packet structure. */
sh->vtag = htonl(packet->vtag);
sh->checksum = 0;
- /* update dst if in need */
- if (!sctp_transport_dst_check(tp)) {
- sctp_transport_route(tp, NULL, sctp_sk(sk));
- if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
- sctp_assoc_sync_pmtu(sk, asoc);
- }
+ /* drop packet if no dst */
dst = dst_clone(tp->dst);
if (!dst) {
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
*/
if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
- !chunk->msg->force_delay)
+ !asoc->force_delay)
/* Nothing unacked */
return SCTP_XMIT_OK;
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
- if (chunk->sinfo.sinfo_stream >=
- asoc->c.sinit_num_ostreams) {
+ if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
/* Mark as failed send. */
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
sctp_seq_dump_remote_addrs(seq, assoc);
seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
"%8d %8d %8d %8d",
- assoc->hbinterval, assoc->c.sinit_max_instreams,
- assoc->c.sinit_num_ostreams, assoc->max_retrans,
+ assoc->hbinterval, assoc->stream->incnt,
+ assoc->stream->outcnt, assoc->max_retrans,
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
atomic_read(&sk->sk_wmem_alloc),
* association.
*/
if (!asoc->temp) {
- int error;
-
- asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
- asoc->c.sinit_num_ostreams, gfp);
- if (!asoc->stream)
+ if (sctp_stream_init(asoc, gfp))
goto clean_up;
- error = sctp_assoc_set_id(asoc, gfp);
- if (error)
+ if (sctp_assoc_set_id(asoc, gfp))
goto clean_up;
}
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ if (ntohs(skip->stream) >= asoc->stream->incnt)
goto discard_noforce;
}
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ if (ntohs(skip->stream) >= asoc->stream->incnt)
goto gen_shutdown;
}
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
- if (sid >= asoc->c.sinit_max_instreams) {
+ if (sid >= asoc->stream->incnt) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
}
if (asoc->pmtu_pending)
- sctp_assoc_pending_pmtu(sk, asoc);
+ sctp_assoc_pending_pmtu(asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
}
/* Check for invalid stream. */
- if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
+ if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
err = -EINVAL;
goto out_free;
}
err = PTR_ERR(datamsg);
goto out_free;
}
- datamsg->force_delay = !!(msg->msg_flags & MSG_MORE);
+ asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
- sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+ sctp_assoc_sync_pmtu(asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
} else {
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
- sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+ sctp_assoc_sync_pmtu(asoc);
}
} else if (asoc) {
asoc->param_flags =
info->sctpi_rwnd = asoc->a_rwnd;
info->sctpi_unackdata = asoc->unack_data;
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- info->sctpi_instrms = asoc->c.sinit_max_instreams;
- info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+ info->sctpi_instrms = asoc->stream->incnt;
+ info->sctpi_outstrms = asoc->stream->outcnt;
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
info->sctpi_inqueue++;
list_for_each(pos, &asoc->outqueue.out_chunk_list)
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- status.sstat_instrms = asoc->c.sinit_max_instreams;
- status.sstat_outstrms = asoc->c.sinit_num_ostreams;
+ status.sstat_instrms = asoc->stream->incnt;
+ status.sstat_outstrms = asoc->stream->outcnt;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
if (sock->state != SS_UNCONNECTED)
goto out;
+ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+ goto out;
+
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_stream *stream;
int i;
stream = kzalloc(sizeof(*stream), gfp);
if (!stream)
- return NULL;
+ return -ENOMEM;
- stream->outcnt = outcnt;
+ stream->outcnt = asoc->c.sinit_num_ostreams;
stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
if (!stream->out) {
kfree(stream);
- return NULL;
+ return -ENOMEM;
}
for (i = 0; i < stream->outcnt; i++)
stream->out[i].state = SCTP_STREAM_OPEN;
- stream->incnt = incnt;
+ asoc->stream = stream;
+
+ return 0;
+}
+
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
+{
+ struct sctp_stream *stream = asoc->stream;
+ int i;
+
+ /* Initial stream->out size may be very big, so free it and alloc
+ * a new one with new outcnt to save memory.
+ */
+ kfree(stream->out);
+ stream->outcnt = asoc->c.sinit_num_ostreams;
+ stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
+ if (!stream->out)
+ goto nomem;
+
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
+
+ stream->incnt = asoc->c.sinit_max_instreams;
stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
if (!stream->in) {
kfree(stream->out);
- kfree(stream);
- return NULL;
+ goto nomem;
}
- return stream;
+ return 0;
+
+nomem:
+ asoc->stream = NULL;
+ kfree(stream);
+
+ return -ENOMEM;
}
void sctp_stream_free(struct sctp_stream *stream)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
- struct dst_entry *dst;
+ struct dst_entry *dst = sctp_transport_dst_check(t);
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
- __func__, pmtu,
- SCTP_DEFAULT_MINSEGMENT);
+ __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
t->pathmtu = pmtu;
}
- dst = sctp_transport_dst_check(t);
- if (!dst)
- t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
-
if (dst) {
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
-
+ dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
dst = sctp_transport_dst_check(t);
- if (!dst)
- t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
}
+
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
}
/* Caches the dst entry and source address for a transport's destination
xprt = &svsk->sk_xprt;
svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
+ set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
serv->sv_bc_xprt = xprt;
xprt = &cma_xprt->sc_xprt;
svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
+ set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
serv->sv_bc_xprt = xprt;
dprintk("svcrdma: %s(%p)\n", __func__, xprt);
/* Age scan results with time spent in suspend */
cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
- if (rdev->ops->resume) {
- rtnl_lock();
- if (rdev->wiphy.registered)
- ret = rdev_resume(rdev);
- rtnl_unlock();
- }
+ rtnl_lock();
+ if (rdev->wiphy.registered && rdev->ops->resume)
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
return ret;
}
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ /* Check the overall length and the internal bitmap length to avoid
+ * potential overflow. */
+ if (nla_len(rp) < ulen ||
+ xfrm_replay_state_esn_len(replay_esn) != ulen ||
+ replay_esn->bmp_len != up->bmp_len)
+ return -EINVAL;
+
+ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
return 0;
if (stx->stx_mask & STATX_BTIME)
print_time(" Birth: ", &stx->stx_btime);
- if (stx->stx_attributes) {
- unsigned char bits;
+ if (stx->stx_attributes_mask) {
+ unsigned char bits, mbits;
int loop, byte;
static char attr_representation[64 + 1] =
printf("Attributes: %016llx (", stx->stx_attributes);
for (byte = 64 - 8; byte >= 0; byte -= 8) {
bits = stx->stx_attributes >> byte;
+ mbits = stx->stx_attributes_mask >> byte;
for (loop = 7; loop >= 0; loop--) {
int bit = byte + loop;
- if (bits & 0x80)
+ if (!(mbits & 0x80))
+ putchar('.'); /* Not supported */
+ else if (bits & 0x80)
putchar(attr_representation[63 - bit]);
else
- putchar('-');
+ putchar('-'); /* Not set */
bits <<= 1;
+ mbits <<= 1;
}
if (byte)
putchar(' ');
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
+# cc-if-fullversion
+# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
+cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
+
# cc-ldoption
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
cc-ldoption = $(call try-run,\
# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
# and locates generated .h files
# FIXME: Replace both with specific CFLAGS* statements in the makefiles
-__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \
+__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
$(call flags,_c_flags)
__a_flags = $(call flags,_a_flags)
__cpp_flags = $(call flags,_cpp_flags)
current = menu;
display_tree_part();
gtk_widget_set_sensitive(back_btn, TRUE);
- } else if ((col == COL_OPTION)) {
+ } else if (col == COL_OPTION) {
toggle_sym_value(menu);
gtk_tree_view_expand_row(view, path, TRUE);
}
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);
+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
ALC292_FIXUP_DISABLE_AAMIX,
ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
[ALC275_FIXUP_DELL_XPS] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_speaker_volume,
.chained = true,
- .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
}
#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
-#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8)
+#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
static struct {
int rate;
pin->mst_capable = false;
/* if not MST, default is port[0] */
hport = &pin->ports[0];
- goto out;
} else {
for (i = 0; i < pin->num_ports; i++) {
pin->mst_capable = true;
if (pin->ports[i].id == pipe) {
hport = &pin->ports[i];
- goto out;
+ break;
}
}
}
+
+ if (hport)
+ hdac_hdmi_present_sense(pin, hport);
}
-out:
- if (pin && hport)
- hdac_hdmi_present_sense(pin, hport);
}
static struct i915_audio_component_audio_ops aops = {
struct hdac_hdmi_pin *pin, *pin_next;
struct hdac_hdmi_cvt *cvt, *cvt_next;
struct hdac_hdmi_pcm *pcm, *pcm_next;
- struct hdac_hdmi_port *port;
+ struct hdac_hdmi_port *port, *port_next;
int i;
snd_soc_unregister_codec(&edev->hdac.dev);
if (list_empty(&pcm->port_list))
continue;
- list_for_each_entry(port, &pcm->port_list, head)
- port = NULL;
+ list_for_each_entry_safe(port, port_next,
+ &pcm->port_list, head)
+ list_del(&port->head);
list_del(&pcm->head);
kfree(pcm);
static void rt5665_jd_check_handler(struct work_struct *work)
{
struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
- calibrate_work.work);
+ jd_check_work.work);
if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
/* jack out */
static const SOC_ENUM_SINGLE_DECL(
rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
- RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
+ RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
{"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
{"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
{"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
+ {"I2S1 ASRC", NULL, "CLKDET"},
+ {"I2S2 ASRC", NULL, "CLKDET"},
+ {"I2S3 ASRC", NULL, "CLKDET"},
/*Vref*/
{"Mic Det Power", NULL, "Vref2"},
{"Mono MIX", "MONOVOL Switch", "MONOVOL"},
{"Mono Amp", NULL, "Mono MIX"},
{"Mono Amp", NULL, "Vref2"},
+ {"Mono Amp", NULL, "Vref3"},
{"Mono Amp", NULL, "CLKDET SYS"},
{"Mono Amp", NULL, "CLKDET MONO"},
{"Mono Playback", "Switch", "Mono Amp"},
/* Enhance performance*/
regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
- RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09);
+ RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12);
INIT_DELAYED_WORK(&rt5665->jack_detect_work,
rt5665_jack_detect_handler);
#define RT5665_HP_DRIVER_MASK (0x3 << 2)
#define RT5665_HP_DRIVER_1X (0x0 << 2)
#define RT5665_HP_DRIVER_3X (0x1 << 2)
-#define RT5665_HP_DRIVER_5X (0x2 << 2)
+#define RT5665_HP_DRIVER_5X (0x3 << 2)
#define RT5665_LDO1_DVO_MASK (0x3)
#define RT5665_LDO1_DVO_09 (0x0)
#define RT5665_LDO1_DVO_10 (0x1)
mutex_lock(&ctl->dsp->pwr_lock);
- memcpy(ctl->cache, p, ctl->len);
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
+ else
+ memcpy(ctl->cache, p, ctl->len);
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_write_control(ctl, ctl->cache, size);
+ else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
}
mutex_unlock(&ctl->dsp->pwr_lock);
mutex_lock(&ctl->dsp->pwr_lock);
- if (ctl->enabled)
+ if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_write_acked_control(ctl, val);
else
ret = -EPERM;
clk = devm_get_clk_from_child(dev, node, NULL);
if (!IS_ERR(clk)) {
simple_dai->sysclk = clk_get_rate(clk);
+ simple_dai->clk = clk;
} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
simple_dai->sysclk = val;
} else {
if (bc->set_params != SKL_PARAM_INIT)
continue;
- mconfig->formats_config.caps = (u32 *)&bc->params;
+ mconfig->formats_config.caps = (u32 *)bc->params;
mconfig->formats_config.caps_size = bc->size;
break;
config SND_SOC_MT2701_CS42448
tristate "ASoc Audio driver for MT2701 with CS42448 codec"
- depends on SND_SOC_MT2701
+ depends on SND_SOC_MT2701 && I2C
select SND_SOC_CS42XX8_I2C
select SND_SOC_BT_SCO
help
struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
struct device *dev = rsnd_priv_to_dev(priv);
u32 data;
+ u32 path[] = {
+ [1] = 1 << 0,
+ [5] = 1 << 8,
+ [6] = 1 << 12,
+ [9] = 1 << 15,
+ };
if (!mix && !dvc)
return 0;
+ if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
+ return -ENXIO;
+
if (mix) {
struct rsnd_dai *rdai;
struct rsnd_mod *src;
struct rsnd_dai_stream *tio;
int i;
- u32 path[] = {
- [0] = 0,
- [1] = 1 << 0,
- [2] = 0,
- [3] = 0,
- [4] = 0,
- [5] = 1 << 8
- };
/*
* it is assuming that integrater is well understanding about
} else {
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
- u32 path[] = {
- [0] = 0x30000,
- [1] = 0x30001,
- [2] = 0x40000,
- [3] = 0x10000,
- [4] = 0x20000,
- [5] = 0x40100
+ u8 cmd_case[] = {
+ [0] = 0x3,
+ [1] = 0x3,
+ [2] = 0x4,
+ [3] = 0x1,
+ [4] = 0x2,
+ [5] = 0x4,
+ [6] = 0x1,
+ [9] = 0x2,
};
- data = path[rsnd_mod_id(src)];
+ data = path[rsnd_mod_id(src)] |
+ cmd_case[rsnd_mod_id(src)] << 16;
}
dev_dbg(dev, "ctu/mix path = 0x%08x", data);
return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
}
+static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
+{
+ struct rsnd_mod *mod = rsnd_mod_get(dma);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+ void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
+ u32 val = ioread32(addr);
+
+ val &= ~mask;
+ val |= (data & mask);
+
+ iowrite32(val, addr);
+}
+
static int rsnd_dmapp_stop(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
int i;
- rsnd_dmapp_write(dma, 0, PDMACHCR);
+ rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
for (i = 0; i < 1024; i++) {
- if (0 == rsnd_dmapp_read(dma, PDMACHCR))
+ if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
return 0;
udelay(1);
}
mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
mask2 = (1 << 4); /* mask sync bit */
val1 = val2 = 0;
- if (rsnd_ssi_is_pin_sharing(io)) {
+ if (id == 8) {
+ /*
+ * SSI8 pin is sharing with SSI7, nothing to do.
+ */
+ } else if (rsnd_ssi_is_pin_sharing(io)) {
int shift = -1;
switch (id) {
{
struct snd_soc_platform *platform = rtd->platform;
- return platform->driver->pcm_new(rtd);
+ if (platform->driver->pcm_new)
+ return platform->driver->pcm_new(rtd);
+ else
+ return 0;
}
static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_platform *platform = rtd->platform;
- platform->driver->pcm_free(pcm);
+ if (platform->driver->pcm_free)
+ platform->driver->pcm_free(pcm);
}
/**
struct uniperif *reader = priv->dai_data.uni;
int ret;
+ reader->substream = substream;
+
if (!UNIPERIF_TYPE_IS_TDM(reader))
return 0;
/* Stop the reader */
uni_reader_stop(reader);
}
+ reader->substream = NULL;
}
static const struct snd_soc_dai_ops uni_reader_dai_ops = {
return 0;
}
-static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = {
- SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC,
- SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0),
- SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC,
- SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0),
- SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC,
- SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0),
- SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC,
- SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0),
-};
-
-static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = {
- SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC,
+static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = {
+ SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch",
+ SUN8I_DAC_MXR_SRC,
+ SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L,
SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0),
- SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC,
+ SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch",
+ SUN8I_DAC_MXR_SRC,
+ SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L,
SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0),
- SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC,
+ SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
+ SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL,
SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0),
- SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC,
+ SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
+ SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL,
SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0),
};
SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA,
0, NULL, 0),
- /* Analog DAC */
- SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL,
- SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
- SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL,
- SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
+ /* Analog DAC AIF */
+ SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0,
+ SUN8I_AIF1_DACDAT_CTRL,
+ SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
+ SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0,
+ SUN8I_AIF1_DACDAT_CTRL,
+ SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
/* DAC Mixers */
- SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0,
- sun8i_output_left_mixer_controls,
- ARRAY_SIZE(sun8i_output_left_mixer_controls)),
- SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0,
- sun8i_output_right_mixer_controls,
- ARRAY_SIZE(sun8i_output_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
+ sun8i_dac_mixer_controls,
+ ARRAY_SIZE(sun8i_dac_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
+ sun8i_dac_mixer_controls,
+ ARRAY_SIZE(sun8i_dac_mixer_controls)),
/* Clocks */
SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL,
SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0),
-
- SND_SOC_DAPM_OUTPUT("HP"),
};
static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
{ "DAC", NULL, "MODCLK DAC" },
/* DAC Routes */
- { "Digital Left DAC", NULL, "DAC" },
- { "Digital Right DAC", NULL, "DAC" },
+ { "AIF1 Slot 0 Right", NULL, "DAC" },
+ { "AIF1 Slot 0 Left", NULL, "DAC" },
/* DAC Mixer Routes */
- { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"},
- { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"},
-
- /* End of route : HP out */
- { "HP", NULL, "Left DAC Mixer" },
- { "HP", NULL, "Right DAC Mixer" },
+ { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
+ "AIF1 Slot 0 Left"},
+ { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
+ "AIF1 Slot 0 Right"},
};
static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
.name = "powerpc",
.init = powerpc__annotate_init,
},
+ {
+ .name = "s390",
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
};
static void ins__delete(struct ins_operands *ops)
*/
case 0x2C: /* Westmere EP - Gulftown */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
case 0x3A: /* IVB */
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
\fBPkgWatt\fP Watts consumed by the whole package.
\fBCorWatt\fP Watts consumed by the core part of the package.
* it is possible for mperf's non-halted cycles + idle states
* to exceed TSC's all cycles: show c1 = 0% in that case.
*/
- if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
old->c1 = 0;
else {
/* normal case, derive c1 */
if (fp == NULL)
fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
- else
+ else {
rewind(fp);
+ fflush(fp);
+ }
retval = fscanf(fp, "%d", &gfx_cur_mhz);
if (retval != 1)
return 0;
fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
- "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
+ "(high %d guar %d eff %d low %d)\n",
cpu, msr,
(unsigned int)HWP_HIGHEST_PERF(msr),
(unsigned int)HWP_GUARANTEED_PERF(msr),
return 0;
fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
- "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
+ "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
cpu, msr,
(unsigned int)(((msr) >> 0) & 0xff),
(unsigned int)(((msr) >> 8) & 0xff),
return 0;
fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
- "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
+ "(min %d max %d des %d epp 0x%x window 0x%x)\n",
cpu, msr,
(unsigned int)(((msr) >> 0) & 0xff),
(unsigned int)(((msr) >> 8) & 0xff),
case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
- do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
BIC_PRESENT(BIC_PKG__);
BIC_PRESENT(BIC_RAM__);
if (rapl_joules) {
BIC_PRESENT(BIC_Pkg_J);
BIC_PRESENT(BIC_Cor_J);
BIC_PRESENT(BIC_RAM_J);
+ BIC_PRESENT(BIC_GFX_J);
} else {
BIC_PRESENT(BIC_PkgWatt);
BIC_PRESENT(BIC_CorWatt);
BIC_PRESENT(BIC_RAMWatt);
+ BIC_PRESENT(BIC_GFXWatt);
}
break;
case INTEL_FAM6_HASWELL_X: /* HSX */
int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned long long msr;
- unsigned int dts;
+ unsigned int dts, dts2;
int cpu;
if (!(do_dts || do_ptm))
fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
cpu, msr, tcc_activation_temp - dts);
-#ifdef THERM_DEBUG
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
return 0;
dts2 = (msr >> 8) & 0x7F;
fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
}
- if (do_dts) {
+ if (do_dts && debug) {
unsigned int resolution;
if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
cpu, msr, tcc_activation_temp - dts, resolution);
-#ifdef THERM_DEBUG
if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
return 0;
dts2 = (msr >> 8) & 0x7F;
fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
}
return 0;
}
void print_version() {
- fprintf(outf, "turbostat version 17.02.24"
+ fprintf(outf, "turbostat version 17.04.12"
" - Len Brown <lenb@kernel.org>\n");
}
LIBDIR := ../../../lib
BPFDIR := $(LIBDIR)/bpf
+APIDIR := ../../../include/uapi
+GENDIR := ../../../../include/generated
+GENHDR := $(GENDIR)/autoconf.h
-CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
+ifneq ($(wildcard $(GENHDR)),)
+ GENFLAGS := -DHAVE_GENHDR
+endif
+
+CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
LDLIBS += -lcap
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
#include <bpf/bpf.h>
+#ifdef HAVE_GENHDR
+# include "autoconf.h"
+#else
+# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
+# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
+# endif
+#endif
+
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
#define MAX_INSNS 512
#define MAX_FIXUPS 8
+#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
+
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
REJECT
} result, result_unpriv;
enum bpf_prog_type prog_type;
+ uint8_t flags;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+ {
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
{
"helper access to packet: test1, valid packet_ptr range",
.insns = {
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a variable",
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a signed variable",
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a constant",
.errstr = "R0 min value is outside of the array range",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a variable",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with no floor check",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"multiple registers share map_lookup_elem result",
.result = REJECT,
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"constant register |= constant should keep constant type",
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
+ {
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+ },
{
"invalid access of tc_classid for LWT_IN",
.insns = {
.result_unpriv = REJECT,
},
{
- "map element value (adjusted) is preserved across register spilling",
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid range check",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}
};
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
+ int fd_prog, expected_ret, reject_from_alignment;
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
int prog_type = test->prog_type;
int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
- int fd_prog, expected_ret;
const char *expected_err;
do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
+
+ reject_from_alignment = fd_prog < 0 &&
+ (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
+ strstr(bpf_vlog, "Unknown alignment.");
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (reject_from_alignment) {
+ printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
+ strerror(errno));
+ goto fail_log;
+ }
+#endif
if (expected_ret == ACCEPT) {
- if (fd_prog < 0) {
+ if (fd_prog < 0 && !reject_from_alignment) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(errno));
goto fail_log;
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
- if (!strstr(bpf_vlog, expected_err)) {
+ if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
printf("FAIL\nUnexpected error message!\n");
goto fail_log;
}
}
(*passes)++;
- printf("OK\n");
+ printf("OK%s\n", reject_from_alignment ?
+ " (NOTE: reject due to unknown alignment)" : "");
close_fds:
close(fd_prog);
close(fd_f1);
all: $(SUB_DIRS)
$(SUB_DIRS):
- BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
+ BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
include ../lib.mk
override define RUN_TESTS
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
done;
endef
override define INSTALL_RULE
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
done;
endef
override define EMIT_TESTS
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
done;
endef
clean:
@for TARGET in $(SUB_DIRS); do \
- BUILD_TARGET=$$OUTPUT/$$TARGET; \
+ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
done;
rm -f tags
return IRQ_HANDLED;
}
+/**
+ * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
+ *
+ * For a specific CPU, initialize the GIC VE hardware.
+ */
+void kvm_vgic_init_cpu_hardware(void)
+{
+ BUG_ON(preemptible());
+
+ /*
+ * We want to make sure the list registers start out clear so that we
+ * only have the program the used registers.
+ */
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_init_lrs();
+ else
+ kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
/**
* kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
* according to the host GIC model. Accordingly calls either
val = vmcr.ctlr;
break;
case GIC_CPU_PRIMASK:
- val = vmcr.pmr;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
+ GICV_PMR_PRIORITY_SHIFT;
break;
case GIC_CPU_BINPOINT:
val = vmcr.bpr;
vmcr.ctlr = val;
break;
case GIC_CPU_PRIMASK:
- vmcr.pmr = val;
+ /*
+ * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+ * the PMR field as GICH_VMCR.VMPriMask rather than
+ * GICC_PMR.Priority, so we expose the upper five bits of
+ * priority mask to userspace using the lower bits in the
+ * unsigned long.
+ */
+ vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
+ GICV_PMR_PRIORITY_MASK;
break;
case GIC_CPU_BINPOINT:
vmcr.bpr = val;
return (unsigned long *)val;
}
+static inline void vgic_v2_write_lr(int lr, u32 val)
+{
+ void __iomem *base = kvm_vgic_global_state.vctrl_base;
+
+ writel_relaxed(val, base + GICH_LR0 + (lr * 4));
+}
+
+void vgic_v2_init_lrs(void)
+{
+ int i;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
+ vgic_v2_write_lr(i, 0);
+}
+
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
GICH_VMCR_BINPOINT_MASK;
- vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
- GICH_VMCR_PRIMASK_MASK;
+ vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
+ GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
}
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
GICH_VMCR_BINPOINT_SHIFT;
- vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
- GICH_VMCR_PRIMASK_SHIFT;
+ vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
+ GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
}
void vgic_v2_enable(struct kvm_vcpu *vcpu)
return irq->pending_latch || irq->line_level;
}
+/*
+ * This struct provides an intermediate representation of the fields contained
+ * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+ * state to userspace can generate either GICv2 or GICv3 CPU interface
+ * registers regardless of the hardware backed GIC used.
+ */
struct vgic_vmcr {
u32 ctlr;
u32 abpr;
u32 bpr;
- u32 pmr;
+ u32 pmr; /* Priority mask field in the GICC_PMR and
+ * ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32 grpen0;
u32 grpen1;
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type);
+void vgic_v2_init_lrs(void);
+
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- kvm->buses[bus_idx]->ioeventfd_count--;
+ if (kvm->buses[bus_idx])
+ kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
break;
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- for (i = 0; i < KVM_NR_BUSES; i++)
- kvm_io_bus_destroy(kvm->buses[i]);
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ if (kvm->buses[i])
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm->buses[i] = NULL;
+ }
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
* changes) is disallowed above, so any other attribute changes getting
* here can be skipped.
*/
- if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+ if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new);
return r;
}
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
+ if (!bus)
+ return -ENOMEM;
+
/* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int i, r;
+ int i;
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- r = -ENOENT;
+ if (!bus)
+ return;
+
for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) {
- r = 0;
break;
}
- if (r)
- return r;
+ if (i == bus->dev_count)
+ return;
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
+ if (!new_bus) {
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
+ goto broken;
+ }
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
- return r;
+ return;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
srcu_idx = srcu_read_lock(&kvm->srcu);
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ if (!bus)
+ goto out_unlock;
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
if (dev_idx < 0)