]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_runtime_pm.c
drm/i915/cnl: Add power wells for CNL
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53                                     int power_well_id);
54
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61         switch (domain) {
62         case POWER_DOMAIN_PIPE_A:
63                 return "PIPE_A";
64         case POWER_DOMAIN_PIPE_B:
65                 return "PIPE_B";
66         case POWER_DOMAIN_PIPE_C:
67                 return "PIPE_C";
68         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69                 return "PIPE_A_PANEL_FITTER";
70         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71                 return "PIPE_B_PANEL_FITTER";
72         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73                 return "PIPE_C_PANEL_FITTER";
74         case POWER_DOMAIN_TRANSCODER_A:
75                 return "TRANSCODER_A";
76         case POWER_DOMAIN_TRANSCODER_B:
77                 return "TRANSCODER_B";
78         case POWER_DOMAIN_TRANSCODER_C:
79                 return "TRANSCODER_C";
80         case POWER_DOMAIN_TRANSCODER_EDP:
81                 return "TRANSCODER_EDP";
82         case POWER_DOMAIN_TRANSCODER_DSI_A:
83                 return "TRANSCODER_DSI_A";
84         case POWER_DOMAIN_TRANSCODER_DSI_C:
85                 return "TRANSCODER_DSI_C";
86         case POWER_DOMAIN_PORT_DDI_A_LANES:
87                 return "PORT_DDI_A_LANES";
88         case POWER_DOMAIN_PORT_DDI_B_LANES:
89                 return "PORT_DDI_B_LANES";
90         case POWER_DOMAIN_PORT_DDI_C_LANES:
91                 return "PORT_DDI_C_LANES";
92         case POWER_DOMAIN_PORT_DDI_D_LANES:
93                 return "PORT_DDI_D_LANES";
94         case POWER_DOMAIN_PORT_DDI_E_LANES:
95                 return "PORT_DDI_E_LANES";
96         case POWER_DOMAIN_PORT_DDI_A_IO:
97                 return "PORT_DDI_A_IO";
98         case POWER_DOMAIN_PORT_DDI_B_IO:
99                 return "PORT_DDI_B_IO";
100         case POWER_DOMAIN_PORT_DDI_C_IO:
101                 return "PORT_DDI_C_IO";
102         case POWER_DOMAIN_PORT_DDI_D_IO:
103                 return "PORT_DDI_D_IO";
104         case POWER_DOMAIN_PORT_DDI_E_IO:
105                 return "PORT_DDI_E_IO";
106         case POWER_DOMAIN_PORT_DSI:
107                 return "PORT_DSI";
108         case POWER_DOMAIN_PORT_CRT:
109                 return "PORT_CRT";
110         case POWER_DOMAIN_PORT_OTHER:
111                 return "PORT_OTHER";
112         case POWER_DOMAIN_VGA:
113                 return "VGA";
114         case POWER_DOMAIN_AUDIO:
115                 return "AUDIO";
116         case POWER_DOMAIN_PLLS:
117                 return "PLLS";
118         case POWER_DOMAIN_AUX_A:
119                 return "AUX_A";
120         case POWER_DOMAIN_AUX_B:
121                 return "AUX_B";
122         case POWER_DOMAIN_AUX_C:
123                 return "AUX_C";
124         case POWER_DOMAIN_AUX_D:
125                 return "AUX_D";
126         case POWER_DOMAIN_GMBUS:
127                 return "GMBUS";
128         case POWER_DOMAIN_INIT:
129                 return "INIT";
130         case POWER_DOMAIN_MODESET:
131                 return "MODESET";
132         default:
133                 MISSING_CASE(domain);
134                 return "?";
135         }
136 }
137
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139                                     struct i915_power_well *power_well)
140 {
141         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142         power_well->ops->enable(dev_priv, power_well);
143         power_well->hw_enabled = true;
144 }
145
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147                                      struct i915_power_well *power_well)
148 {
149         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150         power_well->hw_enabled = false;
151         power_well->ops->disable(dev_priv, power_well);
152 }
153
154 static void intel_power_well_get(struct drm_i915_private *dev_priv,
155                                  struct i915_power_well *power_well)
156 {
157         if (!power_well->count++)
158                 intel_power_well_enable(dev_priv, power_well);
159 }
160
161 static void intel_power_well_put(struct drm_i915_private *dev_priv,
162                                  struct i915_power_well *power_well)
163 {
164         WARN(!power_well->count, "Use count on power well %s is already zero",
165              power_well->name);
166
167         if (!--power_well->count)
168                 intel_power_well_disable(dev_priv, power_well);
169 }
170
171 /*
172  * We should only use the power well if we explicitly asked the hardware to
173  * enable it, so check if it's enabled and also check if we've requested it to
174  * be enabled.
175  */
176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
177                                    struct i915_power_well *power_well)
178 {
179         return I915_READ(HSW_PWR_WELL_DRIVER) ==
180                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
181 }
182
183 /**
184  * __intel_display_power_is_enabled - unlocked check for a power domain
185  * @dev_priv: i915 device instance
186  * @domain: power domain to check
187  *
188  * This is the unlocked version of intel_display_power_is_enabled() and should
189  * only be used from error capture and recovery code where deadlocks are
190  * possible.
191  *
192  * Returns:
193  * True when the power domain is enabled, false otherwise.
194  */
195 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
196                                       enum intel_display_power_domain domain)
197 {
198         struct i915_power_well *power_well;
199         bool is_enabled;
200
201         if (dev_priv->pm.suspended)
202                 return false;
203
204         is_enabled = true;
205
206         for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
207                 if (power_well->always_on)
208                         continue;
209
210                 if (!power_well->hw_enabled) {
211                         is_enabled = false;
212                         break;
213                 }
214         }
215
216         return is_enabled;
217 }
218
219 /**
220  * intel_display_power_is_enabled - check for a power domain
221  * @dev_priv: i915 device instance
222  * @domain: power domain to check
223  *
224  * This function can be used to check the hw power domain state. It is mostly
225  * used in hardware state readout functions. Everywhere else code should rely
226  * upon explicit power domain reference counting to ensure that the hardware
227  * block is powered up before accessing it.
228  *
229  * Callers must hold the relevant modesetting locks to ensure that concurrent
230  * threads can't disable the power well while the caller tries to read a few
231  * registers.
232  *
233  * Returns:
234  * True when the power domain is enabled, false otherwise.
235  */
236 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
237                                     enum intel_display_power_domain domain)
238 {
239         struct i915_power_domains *power_domains;
240         bool ret;
241
242         power_domains = &dev_priv->power_domains;
243
244         mutex_lock(&power_domains->lock);
245         ret = __intel_display_power_is_enabled(dev_priv, domain);
246         mutex_unlock(&power_domains->lock);
247
248         return ret;
249 }
250
251 /**
252  * intel_display_set_init_power - set the initial power domain state
253  * @dev_priv: i915 device instance
254  * @enable: whether to enable or disable the initial power domain state
255  *
256  * For simplicity our driver load/unload and system suspend/resume code assumes
257  * that all power domains are always enabled. This functions controls the state
258  * of this little hack. While the initial power domain state is enabled runtime
259  * pm is effectively disabled.
260  */
261 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
262                                   bool enable)
263 {
264         if (dev_priv->power_domains.init_power_on == enable)
265                 return;
266
267         if (enable)
268                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
269         else
270                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
271
272         dev_priv->power_domains.init_power_on = enable;
273 }
274
275 /*
276  * Starting with Haswell, we have a "Power Down Well" that can be turned off
277  * when not needed anymore. We have 4 registers that can request the power well
278  * to be enabled, and it will only be disabled if none of the registers is
279  * requesting it to be enabled.
280  */
281 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
282 {
283         struct pci_dev *pdev = dev_priv->drm.pdev;
284
285         /*
286          * After we re-enable the power well, if we touch VGA register 0x3d5
287          * we'll get unclaimed register interrupts. This stops after we write
288          * anything to the VGA MSR register. The vgacon module uses this
289          * register all the time, so if we unbind our driver and, as a
290          * consequence, bind vgacon, we'll get stuck in an infinite loop at
291          * console_unlock(). So make here we touch the VGA MSR register, making
292          * sure vgacon can keep working normally without triggering interrupts
293          * and error messages.
294          */
295         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
296         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
297         vga_put(pdev, VGA_RSRC_LEGACY_IO);
298
299         if (IS_BROADWELL(dev_priv))
300                 gen8_irq_power_well_post_enable(dev_priv,
301                                                 1 << PIPE_C | 1 << PIPE_B);
302 }
303
304 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
305 {
306         if (IS_BROADWELL(dev_priv))
307                 gen8_irq_power_well_pre_disable(dev_priv,
308                                                 1 << PIPE_C | 1 << PIPE_B);
309 }
310
311 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
312                                        struct i915_power_well *power_well)
313 {
314         struct pci_dev *pdev = dev_priv->drm.pdev;
315
316         /*
317          * After we re-enable the power well, if we touch VGA register 0x3d5
318          * we'll get unclaimed register interrupts. This stops after we write
319          * anything to the VGA MSR register. The vgacon module uses this
320          * register all the time, so if we unbind our driver and, as a
321          * consequence, bind vgacon, we'll get stuck in an infinite loop at
322          * console_unlock(). So make here we touch the VGA MSR register, making
323          * sure vgacon can keep working normally without triggering interrupts
324          * and error messages.
325          */
326         if (power_well->id == SKL_DISP_PW_2) {
327                 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
328                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
329                 vga_put(pdev, VGA_RSRC_LEGACY_IO);
330
331                 gen8_irq_power_well_post_enable(dev_priv,
332                                                 1 << PIPE_C | 1 << PIPE_B);
333         }
334 }
335
336 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
337                                        struct i915_power_well *power_well)
338 {
339         if (power_well->id == SKL_DISP_PW_2)
340                 gen8_irq_power_well_pre_disable(dev_priv,
341                                                 1 << PIPE_C | 1 << PIPE_B);
342 }
343
344 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345                                struct i915_power_well *power_well, bool enable)
346 {
347         bool is_enabled, enable_requested;
348         uint32_t tmp;
349
350         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
351         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
352         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
353
354         if (enable) {
355                 if (!enable_requested)
356                         I915_WRITE(HSW_PWR_WELL_DRIVER,
357                                    HSW_PWR_WELL_ENABLE_REQUEST);
358
359                 if (!is_enabled) {
360                         DRM_DEBUG_KMS("Enabling power well\n");
361                         if (intel_wait_for_register(dev_priv,
362                                                     HSW_PWR_WELL_DRIVER,
363                                                     HSW_PWR_WELL_STATE_ENABLED,
364                                                     HSW_PWR_WELL_STATE_ENABLED,
365                                                     20))
366                                 DRM_ERROR("Timeout enabling power well\n");
367                         hsw_power_well_post_enable(dev_priv);
368                 }
369
370         } else {
371                 if (enable_requested) {
372                         hsw_power_well_pre_disable(dev_priv);
373                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
374                         POSTING_READ(HSW_PWR_WELL_DRIVER);
375                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
376                 }
377         }
378 }
379
380 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
381         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
382         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
383         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
384         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
385         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
386         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
387         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
388         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
389         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
390         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
391         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
392         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
393         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
394         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
395         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
396         BIT_ULL(POWER_DOMAIN_VGA) |                             \
397         BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
399         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
400         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
401         BIT_ULL(POWER_DOMAIN_INIT))
402 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
403         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
404         BIT_ULL(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
406         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
407         BIT_ULL(POWER_DOMAIN_INIT))
408 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
409         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
410         BIT_ULL(POWER_DOMAIN_INIT))
411 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
412         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
413         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
414         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
415         BIT_ULL(POWER_DOMAIN_INIT))
416
417 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
418         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
419         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
420         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
421         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
422         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
423         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
424         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
425         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
426         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
427         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
428         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
429         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
430         BIT_ULL(POWER_DOMAIN_VGA) |                             \
431         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
432         BIT_ULL(POWER_DOMAIN_INIT))
433 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
434         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
435         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
436         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
437         BIT_ULL(POWER_DOMAIN_INIT))
438 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
439         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
440         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
441         BIT_ULL(POWER_DOMAIN_INIT))
442 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
443         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
444         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
445         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
446         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
447         BIT_ULL(POWER_DOMAIN_INIT))
448
449 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
450         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
451         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
452         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
453         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
454         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
455         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
456         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
457         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
458         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
459         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
460         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
461         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
462         BIT_ULL(POWER_DOMAIN_VGA) |                             \
463         BIT_ULL(POWER_DOMAIN_INIT))
464 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
465         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
466 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
467         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
468 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
469         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
470 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
471         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
472         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
473         BIT_ULL(POWER_DOMAIN_INIT))
474 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
475         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
476         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
477         BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
479         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
480         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
481         BIT_ULL(POWER_DOMAIN_INIT))
482 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
483         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
484         BIT_ULL(POWER_DOMAIN_INIT))
485 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
486         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
487         BIT_ULL(POWER_DOMAIN_INIT))
488 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
489         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
490         BIT_ULL(POWER_DOMAIN_INIT))
491 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
492         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
493         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
494         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
495         BIT_ULL(POWER_DOMAIN_INIT))
496
497 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
498         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
499         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
500         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
501         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
502         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
503         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
504         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
505         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
506         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
507         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
508         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
509         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
510         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
511         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
512         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
513         BIT_ULL(POWER_DOMAIN_VGA) |                             \
514         BIT_ULL(POWER_DOMAIN_INIT))
515 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
516         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
517         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
518         BIT_ULL(POWER_DOMAIN_INIT))
519 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
520         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
521         BIT_ULL(POWER_DOMAIN_INIT))
522 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
523         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
524         BIT_ULL(POWER_DOMAIN_INIT))
525 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
526         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
527         BIT_ULL(POWER_DOMAIN_INIT))
528 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
529         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
530         BIT_ULL(POWER_DOMAIN_INIT))
531 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
532         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
533         BIT_ULL(POWER_DOMAIN_INIT))
534 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
535         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
536         BIT_ULL(POWER_DOMAIN_INIT))
537 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
538         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
539         BIT_ULL(POWER_DOMAIN_INIT))
540 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
541         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
542         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
543         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
544         BIT_ULL(POWER_DOMAIN_INIT))
545
546 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
547 {
548         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
549                   "DC9 already programmed to be enabled.\n");
550         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
551                   "DC5 still not disabled to enable DC9.\n");
552         WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
553         WARN_ONCE(intel_irqs_enabled(dev_priv),
554                   "Interrupts not disabled yet.\n");
555
556          /*
557           * TODO: check for the following to verify the conditions to enter DC9
558           * state are satisfied:
559           * 1] Check relevant display engine registers to verify if mode set
560           * disable sequence was followed.
561           * 2] Check if display uninitialize sequence is initialized.
562           */
563 }
564
565 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
566 {
567         WARN_ONCE(intel_irqs_enabled(dev_priv),
568                   "Interrupts not disabled yet.\n");
569         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
570                   "DC5 still not disabled.\n");
571
572          /*
573           * TODO: check for the following to verify DC9 state was indeed
574           * entered before programming to disable it:
575           * 1] Check relevant display engine registers to verify if mode
576           *  set disable sequence was followed.
577           * 2] Check if display uninitialize sequence is initialized.
578           */
579 }
580
581 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
582                                 u32 state)
583 {
584         int rewrites = 0;
585         int rereads = 0;
586         u32 v;
587
588         I915_WRITE(DC_STATE_EN, state);
589
590         /* It has been observed that disabling the dc6 state sometimes
591          * doesn't stick and dmc keeps returning old value. Make sure
592          * the write really sticks enough times and also force rewrite until
593          * we are confident that state is exactly what we want.
594          */
595         do  {
596                 v = I915_READ(DC_STATE_EN);
597
598                 if (v != state) {
599                         I915_WRITE(DC_STATE_EN, state);
600                         rewrites++;
601                         rereads = 0;
602                 } else if (rereads++ > 5) {
603                         break;
604                 }
605
606         } while (rewrites < 100);
607
608         if (v != state)
609                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
610                           state, v);
611
612         /* Most of the times we need one retry, avoid spam */
613         if (rewrites > 1)
614                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
615                               state, rewrites);
616 }
617
618 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
619 {
620         u32 mask;
621
622         mask = DC_STATE_EN_UPTO_DC5;
623         if (IS_GEN9_LP(dev_priv))
624                 mask |= DC_STATE_EN_DC9;
625         else
626                 mask |= DC_STATE_EN_UPTO_DC6;
627
628         return mask;
629 }
630
631 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
632 {
633         u32 val;
634
635         val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
636
637         DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
638                       dev_priv->csr.dc_state, val);
639         dev_priv->csr.dc_state = val;
640 }
641
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
643 {
644         uint32_t val;
645         uint32_t mask;
646
647         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648                 state &= dev_priv->csr.allowed_dc_mask;
649
650         val = I915_READ(DC_STATE_EN);
651         mask = gen9_dc_mask(dev_priv);
652         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
653                       val & mask, state);
654
655         /* Check if DMC is ignoring our DC state requests */
656         if ((val & mask) != dev_priv->csr.dc_state)
657                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658                           dev_priv->csr.dc_state, val & mask);
659
660         val &= ~mask;
661         val |= state;
662
663         gen9_write_dc_state(dev_priv, val);
664
665         dev_priv->csr.dc_state = val & mask;
666 }
667
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
669 {
670         assert_can_enable_dc9(dev_priv);
671
672         DRM_DEBUG_KMS("Enabling DC9\n");
673
674         intel_power_sequencer_reset(dev_priv);
675         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
676 }
677
678 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
679 {
680         assert_can_disable_dc9(dev_priv);
681
682         DRM_DEBUG_KMS("Disabling DC9\n");
683
684         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
685
686         intel_pps_unlock_regs_wa(dev_priv);
687 }
688
689 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
690 {
691         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
692                   "CSR program storage start is NULL\n");
693         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
694         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
695 }
696
697 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
698 {
699         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
700                                         SKL_DISP_PW_2);
701
702         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
703
704         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
705                   "DC5 already programmed to be enabled.\n");
706         assert_rpm_wakelock_held(dev_priv);
707
708         assert_csr_loaded(dev_priv);
709 }
710
711 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
712 {
713         assert_can_enable_dc5(dev_priv);
714
715         DRM_DEBUG_KMS("Enabling DC5\n");
716
717         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
718 }
719
720 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
721 {
722         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
723                   "Backlight is not disabled.\n");
724         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
725                   "DC6 already programmed to be enabled.\n");
726
727         assert_csr_loaded(dev_priv);
728 }
729
730 void skl_enable_dc6(struct drm_i915_private *dev_priv)
731 {
732         assert_can_enable_dc6(dev_priv);
733
734         DRM_DEBUG_KMS("Enabling DC6\n");
735
736         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737
738 }
739
740 void skl_disable_dc6(struct drm_i915_private *dev_priv)
741 {
742         DRM_DEBUG_KMS("Disabling DC6\n");
743
744         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
745 }
746
747 static void
748 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
749                                   struct i915_power_well *power_well)
750 {
751         enum skl_disp_power_wells power_well_id = power_well->id;
752         u32 val;
753         u32 mask;
754
755         mask = SKL_POWER_WELL_REQ(power_well_id);
756
757         val = I915_READ(HSW_PWR_WELL_KVMR);
758         if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
759                       power_well->name))
760                 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
761
762         val = I915_READ(HSW_PWR_WELL_BIOS);
763         val |= I915_READ(HSW_PWR_WELL_DEBUG);
764
765         if (!(val & mask))
766                 return;
767
768         /*
769          * DMC is known to force on the request bits for power well 1 on SKL
770          * and BXT and the misc IO power well on SKL but we don't expect any
771          * other request bits to be set, so WARN for those.
772          */
773         if (power_well_id == SKL_DISP_PW_1 ||
774             (IS_GEN9_BC(dev_priv) &&
775              power_well_id == SKL_DISP_PW_MISC_IO))
776                 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
777                                  "by DMC\n", power_well->name);
778         else
779                 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
780                           power_well->name);
781
782         I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
783         I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
784 }
785
786 static void skl_set_power_well(struct drm_i915_private *dev_priv,
787                                struct i915_power_well *power_well, bool enable)
788 {
789         uint32_t tmp, fuse_status;
790         uint32_t req_mask, state_mask;
791         bool is_enabled, enable_requested, check_fuse_status = false;
792
793         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
794         fuse_status = I915_READ(SKL_FUSE_STATUS);
795
796         switch (power_well->id) {
797         case SKL_DISP_PW_1:
798                 if (intel_wait_for_register(dev_priv,
799                                             SKL_FUSE_STATUS,
800                                             SKL_FUSE_PG0_DIST_STATUS,
801                                             SKL_FUSE_PG0_DIST_STATUS,
802                                             1)) {
803                         DRM_ERROR("PG0 not enabled\n");
804                         return;
805                 }
806                 break;
807         case SKL_DISP_PW_2:
808                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
809                         DRM_ERROR("PG1 in disabled state\n");
810                         return;
811                 }
812                 break;
813         case SKL_DISP_PW_MISC_IO:
814         case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
815         case SKL_DISP_PW_DDI_B:
816         case SKL_DISP_PW_DDI_C:
817         case SKL_DISP_PW_DDI_D:
818         case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
819         case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
820         case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
821         case CNL_DISP_PW_AUX_D:
822                 break;
823         default:
824                 WARN(1, "Unknown power well %lu\n", power_well->id);
825                 return;
826         }
827
828         req_mask = SKL_POWER_WELL_REQ(power_well->id);
829         enable_requested = tmp & req_mask;
830         state_mask = SKL_POWER_WELL_STATE(power_well->id);
831         is_enabled = tmp & state_mask;
832
833         if (!enable && enable_requested)
834                 skl_power_well_pre_disable(dev_priv, power_well);
835
836         if (enable) {
837                 if (!enable_requested) {
838                         WARN((tmp & state_mask) &&
839                                 !I915_READ(HSW_PWR_WELL_BIOS),
840                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
841                                 when request is to disable!\n");
842                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
843                 }
844
845                 if (!is_enabled) {
846                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
847                         check_fuse_status = true;
848                 }
849         } else {
850                 if (enable_requested) {
851                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
852                         POSTING_READ(HSW_PWR_WELL_DRIVER);
853                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
854                 }
855
856                 if (IS_GEN9(dev_priv))
857                         gen9_sanitize_power_well_requests(dev_priv, power_well);
858         }
859
860         if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
861                      1))
862                 DRM_ERROR("%s %s timeout\n",
863                           power_well->name, enable ? "enable" : "disable");
864
865         if (check_fuse_status) {
866                 if (power_well->id == SKL_DISP_PW_1) {
867                         if (intel_wait_for_register(dev_priv,
868                                                     SKL_FUSE_STATUS,
869                                                     SKL_FUSE_PG1_DIST_STATUS,
870                                                     SKL_FUSE_PG1_DIST_STATUS,
871                                                     1))
872                                 DRM_ERROR("PG1 distributing status timeout\n");
873                 } else if (power_well->id == SKL_DISP_PW_2) {
874                         if (intel_wait_for_register(dev_priv,
875                                                     SKL_FUSE_STATUS,
876                                                     SKL_FUSE_PG2_DIST_STATUS,
877                                                     SKL_FUSE_PG2_DIST_STATUS,
878                                                     1))
879                                 DRM_ERROR("PG2 distributing status timeout\n");
880                 }
881         }
882
883         if (enable && !is_enabled)
884                 skl_power_well_post_enable(dev_priv, power_well);
885 }
886
887 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
888                                    struct i915_power_well *power_well)
889 {
890         /* Take over the request bit if set by BIOS. */
891         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
892                 if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
893                       HSW_PWR_WELL_ENABLE_REQUEST))
894                         I915_WRITE(HSW_PWR_WELL_DRIVER,
895                                    HSW_PWR_WELL_ENABLE_REQUEST);
896                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
897         }
898 }
899
900 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
901                                   struct i915_power_well *power_well)
902 {
903         hsw_set_power_well(dev_priv, power_well, true);
904 }
905
906 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
907                                    struct i915_power_well *power_well)
908 {
909         hsw_set_power_well(dev_priv, power_well, false);
910 }
911
912 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
913                                         struct i915_power_well *power_well)
914 {
915         uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
916                 SKL_POWER_WELL_STATE(power_well->id);
917
918         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
919 }
920
921 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
922                                 struct i915_power_well *power_well)
923 {
924         uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
925         uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
926
927         /* Take over the request bit if set by BIOS. */
928         if (bios_req & mask) {
929                 uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
930
931                 if (!(drv_req & mask))
932                         I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
933                 I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
934         }
935 }
936
937 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
938                                 struct i915_power_well *power_well)
939 {
940         skl_set_power_well(dev_priv, power_well, true);
941 }
942
943 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
944                                 struct i915_power_well *power_well)
945 {
946         skl_set_power_well(dev_priv, power_well, false);
947 }
948
949 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
950                                            struct i915_power_well *power_well)
951 {
952         bxt_ddi_phy_init(dev_priv, power_well->data);
953 }
954
955 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
956                                             struct i915_power_well *power_well)
957 {
958         bxt_ddi_phy_uninit(dev_priv, power_well->data);
959 }
960
961 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
962                                             struct i915_power_well *power_well)
963 {
964         return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
965 }
966
967 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
968 {
969         struct i915_power_well *power_well;
970
971         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
972         if (power_well->count > 0)
973                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
974
975         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
976         if (power_well->count > 0)
977                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
978
979         if (IS_GEMINILAKE(dev_priv)) {
980                 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
981                 if (power_well->count > 0)
982                         bxt_ddi_phy_verify_state(dev_priv, power_well->data);
983         }
984 }
985
986 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
987                                            struct i915_power_well *power_well)
988 {
989         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
990 }
991
992 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
993 {
994         u32 tmp = I915_READ(DBUF_CTL);
995
996         WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
997              (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
998              "Unexpected DBuf power power state (0x%08x)\n", tmp);
999 }
1000
1001 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1002                                           struct i915_power_well *power_well)
1003 {
1004         struct intel_cdclk_state cdclk_state = {};
1005
1006         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1007
1008         dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1009         WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
1010
1011         gen9_assert_dbuf_enabled(dev_priv);
1012
1013         if (IS_GEN9_LP(dev_priv))
1014                 bxt_verify_ddi_phy_power_wells(dev_priv);
1015 }
1016
1017 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1018                                            struct i915_power_well *power_well)
1019 {
1020         if (!dev_priv->csr.dmc_payload)
1021                 return;
1022
1023         if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1024                 skl_enable_dc6(dev_priv);
1025         else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1026                 gen9_enable_dc5(dev_priv);
1027 }
1028
1029 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1030                                          struct i915_power_well *power_well)
1031 {
1032 }
1033
1034 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1035                                            struct i915_power_well *power_well)
1036 {
1037 }
1038
1039 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1040                                              struct i915_power_well *power_well)
1041 {
1042         return true;
1043 }
1044
1045 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1046                                struct i915_power_well *power_well, bool enable)
1047 {
1048         enum punit_power_well power_well_id = power_well->id;
1049         u32 mask;
1050         u32 state;
1051         u32 ctrl;
1052
1053         mask = PUNIT_PWRGT_MASK(power_well_id);
1054         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1055                          PUNIT_PWRGT_PWR_GATE(power_well_id);
1056
1057         mutex_lock(&dev_priv->rps.hw_lock);
1058
1059 #define COND \
1060         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1061
1062         if (COND)
1063                 goto out;
1064
1065         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1066         ctrl &= ~mask;
1067         ctrl |= state;
1068         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1069
1070         if (wait_for(COND, 100))
1071                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1072                           state,
1073                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1074
1075 #undef COND
1076
1077 out:
1078         mutex_unlock(&dev_priv->rps.hw_lock);
1079 }
1080
1081 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1082                                   struct i915_power_well *power_well)
1083 {
1084         vlv_set_power_well(dev_priv, power_well, true);
1085 }
1086
1087 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1088                                    struct i915_power_well *power_well)
1089 {
1090         vlv_set_power_well(dev_priv, power_well, false);
1091 }
1092
1093 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1094                                    struct i915_power_well *power_well)
1095 {
1096         int power_well_id = power_well->id;
1097         bool enabled = false;
1098         u32 mask;
1099         u32 state;
1100         u32 ctrl;
1101
1102         mask = PUNIT_PWRGT_MASK(power_well_id);
1103         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1104
1105         mutex_lock(&dev_priv->rps.hw_lock);
1106
1107         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1108         /*
1109          * We only ever set the power-on and power-gate states, anything
1110          * else is unexpected.
1111          */
1112         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1113                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1114         if (state == ctrl)
1115                 enabled = true;
1116
1117         /*
1118          * A transient state at this point would mean some unexpected party
1119          * is poking at the power controls too.
1120          */
1121         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1122         WARN_ON(ctrl != state);
1123
1124         mutex_unlock(&dev_priv->rps.hw_lock);
1125
1126         return enabled;
1127 }
1128
1129 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1130 {
1131         u32 val;
1132
1133         /*
1134          * On driver load, a pipe may be active and driving a DSI display.
1135          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1136          * (and never recovering) in this case. intel_dsi_post_disable() will
1137          * clear it when we turn off the display.
1138          */
1139         val = I915_READ(DSPCLK_GATE_D);
1140         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1141         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1142         I915_WRITE(DSPCLK_GATE_D, val);
1143
1144         /*
1145          * Disable trickle feed and enable pnd deadline calculation
1146          */
1147         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1148         I915_WRITE(CBR1_VLV, 0);
1149
1150         WARN_ON(dev_priv->rawclk_freq == 0);
1151
1152         I915_WRITE(RAWCLK_FREQ_VLV,
1153                    DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1154 }
1155
1156 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1157 {
1158         struct intel_encoder *encoder;
1159         enum pipe pipe;
1160
1161         /*
1162          * Enable the CRI clock source so we can get at the
1163          * display and the reference clock for VGA
1164          * hotplug / manual detection. Supposedly DSI also
1165          * needs the ref clock up and running.
1166          *
1167          * CHV DPLL B/C have some issues if VGA mode is enabled.
1168          */
1169         for_each_pipe(dev_priv, pipe) {
1170                 u32 val = I915_READ(DPLL(pipe));
1171
1172                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1173                 if (pipe != PIPE_A)
1174                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1175
1176                 I915_WRITE(DPLL(pipe), val);
1177         }
1178
1179         vlv_init_display_clock_gating(dev_priv);
1180
1181         spin_lock_irq(&dev_priv->irq_lock);
1182         valleyview_enable_display_irqs(dev_priv);
1183         spin_unlock_irq(&dev_priv->irq_lock);
1184
1185         /*
1186          * During driver initialization/resume we can avoid restoring the
1187          * part of the HW/SW state that will be inited anyway explicitly.
1188          */
1189         if (dev_priv->power_domains.initializing)
1190                 return;
1191
1192         intel_hpd_init(dev_priv);
1193
1194         /* Re-enable the ADPA, if we have one */
1195         for_each_intel_encoder(&dev_priv->drm, encoder) {
1196                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1197                         intel_crt_reset(&encoder->base);
1198         }
1199
1200         i915_redisable_vga_power_on(dev_priv);
1201
1202         intel_pps_unlock_regs_wa(dev_priv);
1203 }
1204
1205 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1206 {
1207         spin_lock_irq(&dev_priv->irq_lock);
1208         valleyview_disable_display_irqs(dev_priv);
1209         spin_unlock_irq(&dev_priv->irq_lock);
1210
1211         /* make sure we're done processing display irqs */
1212         synchronize_irq(dev_priv->drm.irq);
1213
1214         intel_power_sequencer_reset(dev_priv);
1215
1216         /* Prevent us from re-enabling polling on accident in late suspend */
1217         if (!dev_priv->drm.dev->power.is_suspended)
1218                 intel_hpd_poll_init(dev_priv);
1219 }
1220
1221 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1222                                           struct i915_power_well *power_well)
1223 {
1224         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1225
1226         vlv_set_power_well(dev_priv, power_well, true);
1227
1228         vlv_display_power_well_init(dev_priv);
1229 }
1230
1231 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1232                                            struct i915_power_well *power_well)
1233 {
1234         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1235
1236         vlv_display_power_well_deinit(dev_priv);
1237
1238         vlv_set_power_well(dev_priv, power_well, false);
1239 }
1240
1241 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1242                                            struct i915_power_well *power_well)
1243 {
1244         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1245
1246         /* since ref/cri clock was enabled */
1247         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1248
1249         vlv_set_power_well(dev_priv, power_well, true);
1250
1251         /*
1252          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1253          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1254          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1255          *   b. The other bits such as sfr settings / modesel may all
1256          *      be set to 0.
1257          *
1258          * This should only be done on init and resume from S3 with
1259          * both PLLs disabled, or we risk losing DPIO and PLL
1260          * synchronization.
1261          */
1262         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1263 }
1264
1265 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1266                                             struct i915_power_well *power_well)
1267 {
1268         enum pipe pipe;
1269
1270         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1271
1272         for_each_pipe(dev_priv, pipe)
1273                 assert_pll_disabled(dev_priv, pipe);
1274
1275         /* Assert common reset */
1276         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1277
1278         vlv_set_power_well(dev_priv, power_well, false);
1279 }
1280
1281 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1282
1283 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1284                                                  int power_well_id)
1285 {
1286         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1287         int i;
1288
1289         for (i = 0; i < power_domains->power_well_count; i++) {
1290                 struct i915_power_well *power_well;
1291
1292                 power_well = &power_domains->power_wells[i];
1293                 if (power_well->id == power_well_id)
1294                         return power_well;
1295         }
1296
1297         return NULL;
1298 }
1299
1300 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1301
1302 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1303 {
1304         struct i915_power_well *cmn_bc =
1305                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1306         struct i915_power_well *cmn_d =
1307                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1308         u32 phy_control = dev_priv->chv_phy_control;
1309         u32 phy_status = 0;
1310         u32 phy_status_mask = 0xffffffff;
1311
1312         /*
1313          * The BIOS can leave the PHY is some weird state
1314          * where it doesn't fully power down some parts.
1315          * Disable the asserts until the PHY has been fully
1316          * reset (ie. the power well has been disabled at
1317          * least once).
1318          */
1319         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1320                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1321                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1322                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1323                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1324                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1325                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1326
1327         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1328                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1329                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1330                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1331
1332         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1333                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1334
1335                 /* this assumes override is only used to enable lanes */
1336                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1337                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1338
1339                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1340                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1341
1342                 /* CL1 is on whenever anything is on in either channel */
1343                 if (BITS_SET(phy_control,
1344                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1345                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1346                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1347
1348                 /*
1349                  * The DPLLB check accounts for the pipe B + port A usage
1350                  * with CL2 powered up but all the lanes in the second channel
1351                  * powered down.
1352                  */
1353                 if (BITS_SET(phy_control,
1354                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1355                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1356                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1357
1358                 if (BITS_SET(phy_control,
1359                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1360                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1361                 if (BITS_SET(phy_control,
1362                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1363                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1364
1365                 if (BITS_SET(phy_control,
1366                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1367                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1368                 if (BITS_SET(phy_control,
1369                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1370                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1371         }
1372
1373         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1374                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1375
1376                 /* this assumes override is only used to enable lanes */
1377                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1378                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1379
1380                 if (BITS_SET(phy_control,
1381                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1382                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1383
1384                 if (BITS_SET(phy_control,
1385                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1386                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1387                 if (BITS_SET(phy_control,
1388                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1389                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1390         }
1391
1392         phy_status &= phy_status_mask;
1393
1394         /*
1395          * The PHY may be busy with some initial calibration and whatnot,
1396          * so the power state can take a while to actually change.
1397          */
1398         if (intel_wait_for_register(dev_priv,
1399                                     DISPLAY_PHY_STATUS,
1400                                     phy_status_mask,
1401                                     phy_status,
1402                                     10))
1403                 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1404                           I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1405                            phy_status, dev_priv->chv_phy_control);
1406 }
1407
1408 #undef BITS_SET
1409
1410 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1411                                            struct i915_power_well *power_well)
1412 {
1413         enum dpio_phy phy;
1414         enum pipe pipe;
1415         uint32_t tmp;
1416
1417         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1418                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1419
1420         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1421                 pipe = PIPE_A;
1422                 phy = DPIO_PHY0;
1423         } else {
1424                 pipe = PIPE_C;
1425                 phy = DPIO_PHY1;
1426         }
1427
1428         /* since ref/cri clock was enabled */
1429         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1430         vlv_set_power_well(dev_priv, power_well, true);
1431
1432         /* Poll for phypwrgood signal */
1433         if (intel_wait_for_register(dev_priv,
1434                                     DISPLAY_PHY_STATUS,
1435                                     PHY_POWERGOOD(phy),
1436                                     PHY_POWERGOOD(phy),
1437                                     1))
1438                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1439
1440         mutex_lock(&dev_priv->sb_lock);
1441
1442         /* Enable dynamic power down */
1443         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1444         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1445                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1446         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1447
1448         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1449                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1450                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1451                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1452         } else {
1453                 /*
1454                  * Force the non-existing CL2 off. BXT does this
1455                  * too, so maybe it saves some power even though
1456                  * CL2 doesn't exist?
1457                  */
1458                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1459                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1460                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1461         }
1462
1463         mutex_unlock(&dev_priv->sb_lock);
1464
1465         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1466         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1467
1468         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1469                       phy, dev_priv->chv_phy_control);
1470
1471         assert_chv_phy_status(dev_priv);
1472 }
1473
1474 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1475                                             struct i915_power_well *power_well)
1476 {
1477         enum dpio_phy phy;
1478
1479         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1480                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1481
1482         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1483                 phy = DPIO_PHY0;
1484                 assert_pll_disabled(dev_priv, PIPE_A);
1485                 assert_pll_disabled(dev_priv, PIPE_B);
1486         } else {
1487                 phy = DPIO_PHY1;
1488                 assert_pll_disabled(dev_priv, PIPE_C);
1489         }
1490
1491         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1492         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1493
1494         vlv_set_power_well(dev_priv, power_well, false);
1495
1496         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1497                       phy, dev_priv->chv_phy_control);
1498
1499         /* PHY is fully reset now, so we can enable the PHY state asserts */
1500         dev_priv->chv_phy_assert[phy] = true;
1501
1502         assert_chv_phy_status(dev_priv);
1503 }
1504
1505 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1506                                      enum dpio_channel ch, bool override, unsigned int mask)
1507 {
1508         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1509         u32 reg, val, expected, actual;
1510
1511         /*
1512          * The BIOS can leave the PHY is some weird state
1513          * where it doesn't fully power down some parts.
1514          * Disable the asserts until the PHY has been fully
1515          * reset (ie. the power well has been disabled at
1516          * least once).
1517          */
1518         if (!dev_priv->chv_phy_assert[phy])
1519                 return;
1520
1521         if (ch == DPIO_CH0)
1522                 reg = _CHV_CMN_DW0_CH0;
1523         else
1524                 reg = _CHV_CMN_DW6_CH1;
1525
1526         mutex_lock(&dev_priv->sb_lock);
1527         val = vlv_dpio_read(dev_priv, pipe, reg);
1528         mutex_unlock(&dev_priv->sb_lock);
1529
1530         /*
1531          * This assumes !override is only used when the port is disabled.
1532          * All lanes should power down even without the override when
1533          * the port is disabled.
1534          */
1535         if (!override || mask == 0xf) {
1536                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1537                 /*
1538                  * If CH1 common lane is not active anymore
1539                  * (eg. for pipe B DPLL) the entire channel will
1540                  * shut down, which causes the common lane registers
1541                  * to read as 0. That means we can't actually check
1542                  * the lane power down status bits, but as the entire
1543                  * register reads as 0 it's a good indication that the
1544                  * channel is indeed entirely powered down.
1545                  */
1546                 if (ch == DPIO_CH1 && val == 0)
1547                         expected = 0;
1548         } else if (mask != 0x0) {
1549                 expected = DPIO_ANYDL_POWERDOWN;
1550         } else {
1551                 expected = 0;
1552         }
1553
1554         if (ch == DPIO_CH0)
1555                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1556         else
1557                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1558         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1559
1560         WARN(actual != expected,
1561              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1562              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1563              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1564              reg, val);
1565 }
1566
1567 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1568                           enum dpio_channel ch, bool override)
1569 {
1570         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1571         bool was_override;
1572
1573         mutex_lock(&power_domains->lock);
1574
1575         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1576
1577         if (override == was_override)
1578                 goto out;
1579
1580         if (override)
1581                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1582         else
1583                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1584
1585         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1586
1587         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1588                       phy, ch, dev_priv->chv_phy_control);
1589
1590         assert_chv_phy_status(dev_priv);
1591
1592 out:
1593         mutex_unlock(&power_domains->lock);
1594
1595         return was_override;
1596 }
1597
1598 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1599                              bool override, unsigned int mask)
1600 {
1601         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1602         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1603         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1604         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1605
1606         mutex_lock(&power_domains->lock);
1607
1608         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1609         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1610
1611         if (override)
1612                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1613         else
1614                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1615
1616         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1617
1618         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1619                       phy, ch, mask, dev_priv->chv_phy_control);
1620
1621         assert_chv_phy_status(dev_priv);
1622
1623         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1624
1625         mutex_unlock(&power_domains->lock);
1626 }
1627
1628 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1629                                         struct i915_power_well *power_well)
1630 {
1631         enum pipe pipe = power_well->id;
1632         bool enabled;
1633         u32 state, ctrl;
1634
1635         mutex_lock(&dev_priv->rps.hw_lock);
1636
1637         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1638         /*
1639          * We only ever set the power-on and power-gate states, anything
1640          * else is unexpected.
1641          */
1642         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1643         enabled = state == DP_SSS_PWR_ON(pipe);
1644
1645         /*
1646          * A transient state at this point would mean some unexpected party
1647          * is poking at the power controls too.
1648          */
1649         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1650         WARN_ON(ctrl << 16 != state);
1651
1652         mutex_unlock(&dev_priv->rps.hw_lock);
1653
1654         return enabled;
1655 }
1656
1657 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1658                                     struct i915_power_well *power_well,
1659                                     bool enable)
1660 {
1661         enum pipe pipe = power_well->id;
1662         u32 state;
1663         u32 ctrl;
1664
1665         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1666
1667         mutex_lock(&dev_priv->rps.hw_lock);
1668
1669 #define COND \
1670         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1671
1672         if (COND)
1673                 goto out;
1674
1675         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1676         ctrl &= ~DP_SSC_MASK(pipe);
1677         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1678         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1679
1680         if (wait_for(COND, 100))
1681                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1682                           state,
1683                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1684
1685 #undef COND
1686
1687 out:
1688         mutex_unlock(&dev_priv->rps.hw_lock);
1689 }
1690
1691 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1692                                        struct i915_power_well *power_well)
1693 {
1694         WARN_ON_ONCE(power_well->id != PIPE_A);
1695
1696         chv_set_pipe_power_well(dev_priv, power_well, true);
1697
1698         vlv_display_power_well_init(dev_priv);
1699 }
1700
1701 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1702                                         struct i915_power_well *power_well)
1703 {
1704         WARN_ON_ONCE(power_well->id != PIPE_A);
1705
1706         vlv_display_power_well_deinit(dev_priv);
1707
1708         chv_set_pipe_power_well(dev_priv, power_well, false);
1709 }
1710
1711 static void
1712 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1713                                  enum intel_display_power_domain domain)
1714 {
1715         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1716         struct i915_power_well *power_well;
1717
1718         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1719                 intel_power_well_get(dev_priv, power_well);
1720
1721         power_domains->domain_use_count[domain]++;
1722 }
1723
1724 /**
1725  * intel_display_power_get - grab a power domain reference
1726  * @dev_priv: i915 device instance
1727  * @domain: power domain to reference
1728  *
1729  * This function grabs a power domain reference for @domain and ensures that the
1730  * power domain and all its parents are powered up. Therefore users should only
1731  * grab a reference to the innermost power domain they need.
1732  *
1733  * Any power domain reference obtained by this function must have a symmetric
1734  * call to intel_display_power_put() to release the reference again.
1735  */
1736 void intel_display_power_get(struct drm_i915_private *dev_priv,
1737                              enum intel_display_power_domain domain)
1738 {
1739         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1740
1741         intel_runtime_pm_get(dev_priv);
1742
1743         mutex_lock(&power_domains->lock);
1744
1745         __intel_display_power_get_domain(dev_priv, domain);
1746
1747         mutex_unlock(&power_domains->lock);
1748 }
1749
1750 /**
1751  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1752  * @dev_priv: i915 device instance
1753  * @domain: power domain to reference
1754  *
1755  * This function grabs a power domain reference for @domain and ensures that the
1756  * power domain and all its parents are powered up. Therefore users should only
1757  * grab a reference to the innermost power domain they need.
1758  *
1759  * Any power domain reference obtained by this function must have a symmetric
1760  * call to intel_display_power_put() to release the reference again.
1761  */
1762 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1763                                         enum intel_display_power_domain domain)
1764 {
1765         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1766         bool is_enabled;
1767
1768         if (!intel_runtime_pm_get_if_in_use(dev_priv))
1769                 return false;
1770
1771         mutex_lock(&power_domains->lock);
1772
1773         if (__intel_display_power_is_enabled(dev_priv, domain)) {
1774                 __intel_display_power_get_domain(dev_priv, domain);
1775                 is_enabled = true;
1776         } else {
1777                 is_enabled = false;
1778         }
1779
1780         mutex_unlock(&power_domains->lock);
1781
1782         if (!is_enabled)
1783                 intel_runtime_pm_put(dev_priv);
1784
1785         return is_enabled;
1786 }
1787
1788 /**
1789  * intel_display_power_put - release a power domain reference
1790  * @dev_priv: i915 device instance
1791  * @domain: power domain to reference
1792  *
1793  * This function drops the power domain reference obtained by
1794  * intel_display_power_get() and might power down the corresponding hardware
1795  * block right away if this is the last reference.
1796  */
1797 void intel_display_power_put(struct drm_i915_private *dev_priv,
1798                              enum intel_display_power_domain domain)
1799 {
1800         struct i915_power_domains *power_domains;
1801         struct i915_power_well *power_well;
1802
1803         power_domains = &dev_priv->power_domains;
1804
1805         mutex_lock(&power_domains->lock);
1806
1807         WARN(!power_domains->domain_use_count[domain],
1808              "Use count on domain %s is already zero\n",
1809              intel_display_power_domain_str(domain));
1810         power_domains->domain_use_count[domain]--;
1811
1812         for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1813                 intel_power_well_put(dev_priv, power_well);
1814
1815         mutex_unlock(&power_domains->lock);
1816
1817         intel_runtime_pm_put(dev_priv);
1818 }
1819
1820 #define HSW_DISPLAY_POWER_DOMAINS (                     \
1821         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
1822         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
1823         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
1824         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
1825         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
1826         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
1827         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
1828         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
1829         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
1830         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
1831         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
1832         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
1833         BIT_ULL(POWER_DOMAIN_VGA) |                             \
1834         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
1835         BIT_ULL(POWER_DOMAIN_INIT))
1836
1837 #define BDW_DISPLAY_POWER_DOMAINS (                     \
1838         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
1839         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
1840         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
1841         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
1842         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
1843         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
1844         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
1845         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
1846         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
1847         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
1848         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
1849         BIT_ULL(POWER_DOMAIN_VGA) |                             \
1850         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
1851         BIT_ULL(POWER_DOMAIN_INIT))
1852
1853 #define VLV_DISPLAY_POWER_DOMAINS (             \
1854         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1855         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1856         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1857         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1858         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1859         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1860         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1861         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1862         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
1863         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1864         BIT_ULL(POWER_DOMAIN_VGA) |                     \
1865         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
1866         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1867         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1868         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
1869         BIT_ULL(POWER_DOMAIN_INIT))
1870
1871 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1872         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1873         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1874         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1875         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1876         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1877         BIT_ULL(POWER_DOMAIN_INIT))
1878
1879 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1880         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1881         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1882         BIT_ULL(POWER_DOMAIN_INIT))
1883
1884 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1885         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1886         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1887         BIT_ULL(POWER_DOMAIN_INIT))
1888
1889 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1890         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1891         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1892         BIT_ULL(POWER_DOMAIN_INIT))
1893
1894 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1895         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1896         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1897         BIT_ULL(POWER_DOMAIN_INIT))
1898
1899 #define CHV_DISPLAY_POWER_DOMAINS (             \
1900         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1901         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1902         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
1903         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1904         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1905         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
1906         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1907         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1908         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
1909         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1910         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1911         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
1912         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
1913         BIT_ULL(POWER_DOMAIN_VGA) |                     \
1914         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
1915         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1916         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1917         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
1918         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
1919         BIT_ULL(POWER_DOMAIN_INIT))
1920
1921 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1922         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1923         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1924         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1925         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1926         BIT_ULL(POWER_DOMAIN_INIT))
1927
1928 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1929         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
1930         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
1931         BIT_ULL(POWER_DOMAIN_INIT))
1932
1933 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1934         .sync_hw = i9xx_power_well_sync_hw_noop,
1935         .enable = i9xx_always_on_power_well_noop,
1936         .disable = i9xx_always_on_power_well_noop,
1937         .is_enabled = i9xx_always_on_power_well_enabled,
1938 };
1939
1940 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1941         .sync_hw = i9xx_power_well_sync_hw_noop,
1942         .enable = chv_pipe_power_well_enable,
1943         .disable = chv_pipe_power_well_disable,
1944         .is_enabled = chv_pipe_power_well_enabled,
1945 };
1946
1947 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1948         .sync_hw = i9xx_power_well_sync_hw_noop,
1949         .enable = chv_dpio_cmn_power_well_enable,
1950         .disable = chv_dpio_cmn_power_well_disable,
1951         .is_enabled = vlv_power_well_enabled,
1952 };
1953
1954 static struct i915_power_well i9xx_always_on_power_well[] = {
1955         {
1956                 .name = "always-on",
1957                 .always_on = 1,
1958                 .domains = POWER_DOMAIN_MASK,
1959                 .ops = &i9xx_always_on_power_well_ops,
1960         },
1961 };
1962
1963 static const struct i915_power_well_ops hsw_power_well_ops = {
1964         .sync_hw = hsw_power_well_sync_hw,
1965         .enable = hsw_power_well_enable,
1966         .disable = hsw_power_well_disable,
1967         .is_enabled = hsw_power_well_enabled,
1968 };
1969
1970 static const struct i915_power_well_ops skl_power_well_ops = {
1971         .sync_hw = skl_power_well_sync_hw,
1972         .enable = skl_power_well_enable,
1973         .disable = skl_power_well_disable,
1974         .is_enabled = skl_power_well_enabled,
1975 };
1976
1977 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1978         .sync_hw = i9xx_power_well_sync_hw_noop,
1979         .enable = gen9_dc_off_power_well_enable,
1980         .disable = gen9_dc_off_power_well_disable,
1981         .is_enabled = gen9_dc_off_power_well_enabled,
1982 };
1983
1984 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1985         .sync_hw = i9xx_power_well_sync_hw_noop,
1986         .enable = bxt_dpio_cmn_power_well_enable,
1987         .disable = bxt_dpio_cmn_power_well_disable,
1988         .is_enabled = bxt_dpio_cmn_power_well_enabled,
1989 };
1990
1991 static struct i915_power_well hsw_power_wells[] = {
1992         {
1993                 .name = "always-on",
1994                 .always_on = 1,
1995                 .domains = POWER_DOMAIN_MASK,
1996                 .ops = &i9xx_always_on_power_well_ops,
1997         },
1998         {
1999                 .name = "display",
2000                 .domains = HSW_DISPLAY_POWER_DOMAINS,
2001                 .ops = &hsw_power_well_ops,
2002         },
2003 };
2004
2005 static struct i915_power_well bdw_power_wells[] = {
2006         {
2007                 .name = "always-on",
2008                 .always_on = 1,
2009                 .domains = POWER_DOMAIN_MASK,
2010                 .ops = &i9xx_always_on_power_well_ops,
2011         },
2012         {
2013                 .name = "display",
2014                 .domains = BDW_DISPLAY_POWER_DOMAINS,
2015                 .ops = &hsw_power_well_ops,
2016         },
2017 };
2018
2019 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2020         .sync_hw = i9xx_power_well_sync_hw_noop,
2021         .enable = vlv_display_power_well_enable,
2022         .disable = vlv_display_power_well_disable,
2023         .is_enabled = vlv_power_well_enabled,
2024 };
2025
2026 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2027         .sync_hw = i9xx_power_well_sync_hw_noop,
2028         .enable = vlv_dpio_cmn_power_well_enable,
2029         .disable = vlv_dpio_cmn_power_well_disable,
2030         .is_enabled = vlv_power_well_enabled,
2031 };
2032
2033 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2034         .sync_hw = i9xx_power_well_sync_hw_noop,
2035         .enable = vlv_power_well_enable,
2036         .disable = vlv_power_well_disable,
2037         .is_enabled = vlv_power_well_enabled,
2038 };
2039
2040 static struct i915_power_well vlv_power_wells[] = {
2041         {
2042                 .name = "always-on",
2043                 .always_on = 1,
2044                 .domains = POWER_DOMAIN_MASK,
2045                 .ops = &i9xx_always_on_power_well_ops,
2046                 .id = PUNIT_POWER_WELL_ALWAYS_ON,
2047         },
2048         {
2049                 .name = "display",
2050                 .domains = VLV_DISPLAY_POWER_DOMAINS,
2051                 .id = PUNIT_POWER_WELL_DISP2D,
2052                 .ops = &vlv_display_power_well_ops,
2053         },
2054         {
2055                 .name = "dpio-tx-b-01",
2056                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2057                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2058                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2059                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2060                 .ops = &vlv_dpio_power_well_ops,
2061                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2062         },
2063         {
2064                 .name = "dpio-tx-b-23",
2065                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2066                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2067                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2068                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2069                 .ops = &vlv_dpio_power_well_ops,
2070                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2071         },
2072         {
2073                 .name = "dpio-tx-c-01",
2074                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2075                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2076                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2077                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2078                 .ops = &vlv_dpio_power_well_ops,
2079                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2080         },
2081         {
2082                 .name = "dpio-tx-c-23",
2083                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2084                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2085                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2086                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2087                 .ops = &vlv_dpio_power_well_ops,
2088                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2089         },
2090         {
2091                 .name = "dpio-common",
2092                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2093                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2094                 .ops = &vlv_dpio_cmn_power_well_ops,
2095         },
2096 };
2097
2098 static struct i915_power_well chv_power_wells[] = {
2099         {
2100                 .name = "always-on",
2101                 .always_on = 1,
2102                 .domains = POWER_DOMAIN_MASK,
2103                 .ops = &i9xx_always_on_power_well_ops,
2104         },
2105         {
2106                 .name = "display",
2107                 /*
2108                  * Pipe A power well is the new disp2d well. Pipe B and C
2109                  * power wells don't actually exist. Pipe A power well is
2110                  * required for any pipe to work.
2111                  */
2112                 .domains = CHV_DISPLAY_POWER_DOMAINS,
2113                 .id = PIPE_A,
2114                 .ops = &chv_pipe_power_well_ops,
2115         },
2116         {
2117                 .name = "dpio-common-bc",
2118                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2119                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2120                 .ops = &chv_dpio_cmn_power_well_ops,
2121         },
2122         {
2123                 .name = "dpio-common-d",
2124                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2125                 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
2126                 .ops = &chv_dpio_cmn_power_well_ops,
2127         },
2128 };
2129
2130 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2131                                     int power_well_id)
2132 {
2133         struct i915_power_well *power_well;
2134         bool ret;
2135
2136         power_well = lookup_power_well(dev_priv, power_well_id);
2137         ret = power_well->ops->is_enabled(dev_priv, power_well);
2138
2139         return ret;
2140 }
2141
2142 static struct i915_power_well skl_power_wells[] = {
2143         {
2144                 .name = "always-on",
2145                 .always_on = 1,
2146                 .domains = POWER_DOMAIN_MASK,
2147                 .ops = &i9xx_always_on_power_well_ops,
2148                 .id = SKL_DISP_PW_ALWAYS_ON,
2149         },
2150         {
2151                 .name = "power well 1",
2152                 /* Handled by the DMC firmware */
2153                 .domains = 0,
2154                 .ops = &skl_power_well_ops,
2155                 .id = SKL_DISP_PW_1,
2156         },
2157         {
2158                 .name = "MISC IO power well",
2159                 /* Handled by the DMC firmware */
2160                 .domains = 0,
2161                 .ops = &skl_power_well_ops,
2162                 .id = SKL_DISP_PW_MISC_IO,
2163         },
2164         {
2165                 .name = "DC off",
2166                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2167                 .ops = &gen9_dc_off_power_well_ops,
2168                 .id = SKL_DISP_PW_DC_OFF,
2169         },
2170         {
2171                 .name = "power well 2",
2172                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2173                 .ops = &skl_power_well_ops,
2174                 .id = SKL_DISP_PW_2,
2175         },
2176         {
2177                 .name = "DDI A/E IO power well",
2178                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2179                 .ops = &skl_power_well_ops,
2180                 .id = SKL_DISP_PW_DDI_A_E,
2181         },
2182         {
2183                 .name = "DDI B IO power well",
2184                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2185                 .ops = &skl_power_well_ops,
2186                 .id = SKL_DISP_PW_DDI_B,
2187         },
2188         {
2189                 .name = "DDI C IO power well",
2190                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2191                 .ops = &skl_power_well_ops,
2192                 .id = SKL_DISP_PW_DDI_C,
2193         },
2194         {
2195                 .name = "DDI D IO power well",
2196                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2197                 .ops = &skl_power_well_ops,
2198                 .id = SKL_DISP_PW_DDI_D,
2199         },
2200 };
2201
2202 static struct i915_power_well bxt_power_wells[] = {
2203         {
2204                 .name = "always-on",
2205                 .always_on = 1,
2206                 .domains = POWER_DOMAIN_MASK,
2207                 .ops = &i9xx_always_on_power_well_ops,
2208         },
2209         {
2210                 .name = "power well 1",
2211                 .domains = 0,
2212                 .ops = &skl_power_well_ops,
2213                 .id = SKL_DISP_PW_1,
2214         },
2215         {
2216                 .name = "DC off",
2217                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2218                 .ops = &gen9_dc_off_power_well_ops,
2219                 .id = SKL_DISP_PW_DC_OFF,
2220         },
2221         {
2222                 .name = "power well 2",
2223                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2224                 .ops = &skl_power_well_ops,
2225                 .id = SKL_DISP_PW_2,
2226         },
2227         {
2228                 .name = "dpio-common-a",
2229                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2230                 .ops = &bxt_dpio_cmn_power_well_ops,
2231                 .id = BXT_DPIO_CMN_A,
2232                 .data = DPIO_PHY1,
2233         },
2234         {
2235                 .name = "dpio-common-bc",
2236                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2237                 .ops = &bxt_dpio_cmn_power_well_ops,
2238                 .id = BXT_DPIO_CMN_BC,
2239                 .data = DPIO_PHY0,
2240         },
2241 };
2242
2243 static struct i915_power_well glk_power_wells[] = {
2244         {
2245                 .name = "always-on",
2246                 .always_on = 1,
2247                 .domains = POWER_DOMAIN_MASK,
2248                 .ops = &i9xx_always_on_power_well_ops,
2249         },
2250         {
2251                 .name = "power well 1",
2252                 /* Handled by the DMC firmware */
2253                 .domains = 0,
2254                 .ops = &skl_power_well_ops,
2255                 .id = SKL_DISP_PW_1,
2256         },
2257         {
2258                 .name = "DC off",
2259                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2260                 .ops = &gen9_dc_off_power_well_ops,
2261                 .id = SKL_DISP_PW_DC_OFF,
2262         },
2263         {
2264                 .name = "power well 2",
2265                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2266                 .ops = &skl_power_well_ops,
2267                 .id = SKL_DISP_PW_2,
2268         },
2269         {
2270                 .name = "dpio-common-a",
2271                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2272                 .ops = &bxt_dpio_cmn_power_well_ops,
2273                 .id = BXT_DPIO_CMN_A,
2274                 .data = DPIO_PHY1,
2275         },
2276         {
2277                 .name = "dpio-common-b",
2278                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2279                 .ops = &bxt_dpio_cmn_power_well_ops,
2280                 .id = BXT_DPIO_CMN_BC,
2281                 .data = DPIO_PHY0,
2282         },
2283         {
2284                 .name = "dpio-common-c",
2285                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2286                 .ops = &bxt_dpio_cmn_power_well_ops,
2287                 .id = GLK_DPIO_CMN_C,
2288                 .data = DPIO_PHY2,
2289         },
2290         {
2291                 .name = "AUX A",
2292                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2293                 .ops = &skl_power_well_ops,
2294                 .id = GLK_DISP_PW_AUX_A,
2295         },
2296         {
2297                 .name = "AUX B",
2298                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2299                 .ops = &skl_power_well_ops,
2300                 .id = GLK_DISP_PW_AUX_B,
2301         },
2302         {
2303                 .name = "AUX C",
2304                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2305                 .ops = &skl_power_well_ops,
2306                 .id = GLK_DISP_PW_AUX_C,
2307         },
2308         {
2309                 .name = "DDI A IO power well",
2310                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2311                 .ops = &skl_power_well_ops,
2312                 .id = GLK_DISP_PW_DDI_A,
2313         },
2314         {
2315                 .name = "DDI B IO power well",
2316                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2317                 .ops = &skl_power_well_ops,
2318                 .id = SKL_DISP_PW_DDI_B,
2319         },
2320         {
2321                 .name = "DDI C IO power well",
2322                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2323                 .ops = &skl_power_well_ops,
2324                 .id = SKL_DISP_PW_DDI_C,
2325         },
2326 };
2327
2328 static struct i915_power_well cnl_power_wells[] = {
2329         {
2330                 .name = "always-on",
2331                 .always_on = 1,
2332                 .domains = POWER_DOMAIN_MASK,
2333                 .ops = &i9xx_always_on_power_well_ops,
2334         },
2335         {
2336                 .name = "power well 1",
2337                 /* Handled by the DMC firmware */
2338                 .domains = 0,
2339                 .ops = &skl_power_well_ops,
2340                 .id = SKL_DISP_PW_1,
2341         },
2342         {
2343                 .name = "AUX A",
2344                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2345                 .ops = &skl_power_well_ops,
2346                 .id = CNL_DISP_PW_AUX_A,
2347         },
2348         {
2349                 .name = "AUX B",
2350                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2351                 .ops = &skl_power_well_ops,
2352                 .id = CNL_DISP_PW_AUX_B,
2353         },
2354         {
2355                 .name = "AUX C",
2356                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2357                 .ops = &skl_power_well_ops,
2358                 .id = CNL_DISP_PW_AUX_C,
2359         },
2360         {
2361                 .name = "AUX D",
2362                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2363                 .ops = &skl_power_well_ops,
2364                 .id = CNL_DISP_PW_AUX_D,
2365         },
2366         {
2367                 .name = "DC off",
2368                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2369                 .ops = &gen9_dc_off_power_well_ops,
2370                 .id = SKL_DISP_PW_DC_OFF,
2371         },
2372         {
2373                 .name = "power well 2",
2374                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2375                 .ops = &skl_power_well_ops,
2376                 .id = SKL_DISP_PW_2,
2377         },
2378         {
2379                 .name = "DDI A IO power well",
2380                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2381                 .ops = &skl_power_well_ops,
2382                 .id = CNL_DISP_PW_DDI_A,
2383         },
2384         {
2385                 .name = "DDI B IO power well",
2386                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2387                 .ops = &skl_power_well_ops,
2388                 .id = SKL_DISP_PW_DDI_B,
2389         },
2390         {
2391                 .name = "DDI C IO power well",
2392                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2393                 .ops = &skl_power_well_ops,
2394                 .id = SKL_DISP_PW_DDI_C,
2395         },
2396         {
2397                 .name = "DDI D IO power well",
2398                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2399                 .ops = &skl_power_well_ops,
2400                 .id = SKL_DISP_PW_DDI_D,
2401         },
2402 };
2403
2404 static int
2405 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2406                                    int disable_power_well)
2407 {
2408         if (disable_power_well >= 0)
2409                 return !!disable_power_well;
2410
2411         return 1;
2412 }
2413
2414 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2415                                     int enable_dc)
2416 {
2417         uint32_t mask;
2418         int requested_dc;
2419         int max_dc;
2420
2421         if (IS_GEN9_BC(dev_priv)) {
2422                 max_dc = 2;
2423                 mask = 0;
2424         } else if (IS_GEN9_LP(dev_priv)) {
2425                 max_dc = 1;
2426                 /*
2427                  * DC9 has a separate HW flow from the rest of the DC states,
2428                  * not depending on the DMC firmware. It's needed by system
2429                  * suspend/resume, so allow it unconditionally.
2430                  */
2431                 mask = DC_STATE_EN_DC9;
2432         } else {
2433                 max_dc = 0;
2434                 mask = 0;
2435         }
2436
2437         if (!i915.disable_power_well)
2438                 max_dc = 0;
2439
2440         if (enable_dc >= 0 && enable_dc <= max_dc) {
2441                 requested_dc = enable_dc;
2442         } else if (enable_dc == -1) {
2443                 requested_dc = max_dc;
2444         } else if (enable_dc > max_dc && enable_dc <= 2) {
2445                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2446                               enable_dc, max_dc);
2447                 requested_dc = max_dc;
2448         } else {
2449                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2450                 requested_dc = max_dc;
2451         }
2452
2453         if (requested_dc > 1)
2454                 mask |= DC_STATE_EN_UPTO_DC6;
2455         if (requested_dc > 0)
2456                 mask |= DC_STATE_EN_UPTO_DC5;
2457
2458         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2459
2460         return mask;
2461 }
2462
2463 #define set_power_wells(power_domains, __power_wells) ({                \
2464         (power_domains)->power_wells = (__power_wells);                 \
2465         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
2466 })
2467
2468 /**
2469  * intel_power_domains_init - initializes the power domain structures
2470  * @dev_priv: i915 device instance
2471  *
2472  * Initializes the power domain structures for @dev_priv depending upon the
2473  * supported platform.
2474  */
2475 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2476 {
2477         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2478
2479         i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2480                                                      i915.disable_power_well);
2481         dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2482                                                             i915.enable_dc);
2483
2484         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2485
2486         mutex_init(&power_domains->lock);
2487
2488         /*
2489          * The enabling order will be from lower to higher indexed wells,
2490          * the disabling order is reversed.
2491          */
2492         if (IS_HASWELL(dev_priv)) {
2493                 set_power_wells(power_domains, hsw_power_wells);
2494         } else if (IS_BROADWELL(dev_priv)) {
2495                 set_power_wells(power_domains, bdw_power_wells);
2496         } else if (IS_GEN9_BC(dev_priv)) {
2497                 set_power_wells(power_domains, skl_power_wells);
2498         } else if (IS_CANNONLAKE(dev_priv)) {
2499                 set_power_wells(power_domains, cnl_power_wells);
2500         } else if (IS_BROXTON(dev_priv)) {
2501                 set_power_wells(power_domains, bxt_power_wells);
2502         } else if (IS_GEMINILAKE(dev_priv)) {
2503                 set_power_wells(power_domains, glk_power_wells);
2504         } else if (IS_CHERRYVIEW(dev_priv)) {
2505                 set_power_wells(power_domains, chv_power_wells);
2506         } else if (IS_VALLEYVIEW(dev_priv)) {
2507                 set_power_wells(power_domains, vlv_power_wells);
2508         } else {
2509                 set_power_wells(power_domains, i9xx_always_on_power_well);
2510         }
2511
2512         return 0;
2513 }
2514
2515 /**
2516  * intel_power_domains_fini - finalizes the power domain structures
2517  * @dev_priv: i915 device instance
2518  *
2519  * Finalizes the power domain structures for @dev_priv depending upon the
2520  * supported platform. This function also disables runtime pm and ensures that
2521  * the device stays powered up so that the driver can be reloaded.
2522  */
2523 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2524 {
2525         struct device *kdev = &dev_priv->drm.pdev->dev;
2526
2527         /*
2528          * The i915.ko module is still not prepared to be loaded when
2529          * the power well is not enabled, so just enable it in case
2530          * we're going to unload/reload.
2531          * The following also reacquires the RPM reference the core passed
2532          * to the driver during loading, which is dropped in
2533          * intel_runtime_pm_enable(). We have to hand back the control of the
2534          * device to the core with this reference held.
2535          */
2536         intel_display_set_init_power(dev_priv, true);
2537
2538         /* Remove the refcount we took to keep power well support disabled. */
2539         if (!i915.disable_power_well)
2540                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2541
2542         /*
2543          * Remove the refcount we took in intel_runtime_pm_enable() in case
2544          * the platform doesn't support runtime PM.
2545          */
2546         if (!HAS_RUNTIME_PM(dev_priv))
2547                 pm_runtime_put(kdev);
2548 }
2549
2550 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2551 {
2552         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2553         struct i915_power_well *power_well;
2554
2555         mutex_lock(&power_domains->lock);
2556         for_each_power_well(dev_priv, power_well) {
2557                 power_well->ops->sync_hw(dev_priv, power_well);
2558                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2559                                                                      power_well);
2560         }
2561         mutex_unlock(&power_domains->lock);
2562 }
2563
2564 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2565 {
2566         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2567         POSTING_READ(DBUF_CTL);
2568
2569         udelay(10);
2570
2571         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2572                 DRM_ERROR("DBuf power enable timeout\n");
2573 }
2574
2575 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2576 {
2577         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2578         POSTING_READ(DBUF_CTL);
2579
2580         udelay(10);
2581
2582         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2583                 DRM_ERROR("DBuf power disable timeout!\n");
2584 }
2585
2586 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2587                                    bool resume)
2588 {
2589         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2590         struct i915_power_well *well;
2591         uint32_t val;
2592
2593         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2594
2595         /* enable PCH reset handshake */
2596         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2597         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2598
2599         /* enable PG1 and Misc I/O */
2600         mutex_lock(&power_domains->lock);
2601
2602         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2603         intel_power_well_enable(dev_priv, well);
2604
2605         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2606         intel_power_well_enable(dev_priv, well);
2607
2608         mutex_unlock(&power_domains->lock);
2609
2610         skl_init_cdclk(dev_priv);
2611
2612         gen9_dbuf_enable(dev_priv);
2613
2614         if (resume && dev_priv->csr.dmc_payload)
2615                 intel_csr_load_program(dev_priv);
2616 }
2617
2618 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2619 {
2620         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2621         struct i915_power_well *well;
2622
2623         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2624
2625         gen9_dbuf_disable(dev_priv);
2626
2627         skl_uninit_cdclk(dev_priv);
2628
2629         /* The spec doesn't call for removing the reset handshake flag */
2630         /* disable PG1 and Misc I/O */
2631
2632         mutex_lock(&power_domains->lock);
2633
2634         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2635         intel_power_well_disable(dev_priv, well);
2636
2637         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2638         intel_power_well_disable(dev_priv, well);
2639
2640         mutex_unlock(&power_domains->lock);
2641 }
2642
2643 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2644                            bool resume)
2645 {
2646         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2647         struct i915_power_well *well;
2648         uint32_t val;
2649
2650         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2651
2652         /*
2653          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2654          * or else the reset will hang because there is no PCH to respond.
2655          * Move the handshake programming to initialization sequence.
2656          * Previously was left up to BIOS.
2657          */
2658         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2659         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2660         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2661
2662         /* Enable PG1 */
2663         mutex_lock(&power_domains->lock);
2664
2665         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2666         intel_power_well_enable(dev_priv, well);
2667
2668         mutex_unlock(&power_domains->lock);
2669
2670         bxt_init_cdclk(dev_priv);
2671
2672         gen9_dbuf_enable(dev_priv);
2673
2674         if (resume && dev_priv->csr.dmc_payload)
2675                 intel_csr_load_program(dev_priv);
2676 }
2677
2678 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2679 {
2680         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2681         struct i915_power_well *well;
2682
2683         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2684
2685         gen9_dbuf_disable(dev_priv);
2686
2687         bxt_uninit_cdclk(dev_priv);
2688
2689         /* The spec doesn't call for removing the reset handshake flag */
2690
2691         /* Disable PG1 */
2692         mutex_lock(&power_domains->lock);
2693
2694         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2695         intel_power_well_disable(dev_priv, well);
2696
2697         mutex_unlock(&power_domains->lock);
2698 }
2699
2700 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2701 {
2702         struct i915_power_well *cmn_bc =
2703                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2704         struct i915_power_well *cmn_d =
2705                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2706
2707         /*
2708          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2709          * workaround never ever read DISPLAY_PHY_CONTROL, and
2710          * instead maintain a shadow copy ourselves. Use the actual
2711          * power well state and lane status to reconstruct the
2712          * expected initial value.
2713          */
2714         dev_priv->chv_phy_control =
2715                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2716                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2717                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2718                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2719                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2720
2721         /*
2722          * If all lanes are disabled we leave the override disabled
2723          * with all power down bits cleared to match the state we
2724          * would use after disabling the port. Otherwise enable the
2725          * override and set the lane powerdown bits accding to the
2726          * current lane status.
2727          */
2728         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2729                 uint32_t status = I915_READ(DPLL(PIPE_A));
2730                 unsigned int mask;
2731
2732                 mask = status & DPLL_PORTB_READY_MASK;
2733                 if (mask == 0xf)
2734                         mask = 0x0;
2735                 else
2736                         dev_priv->chv_phy_control |=
2737                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2738
2739                 dev_priv->chv_phy_control |=
2740                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2741
2742                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2743                 if (mask == 0xf)
2744                         mask = 0x0;
2745                 else
2746                         dev_priv->chv_phy_control |=
2747                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2748
2749                 dev_priv->chv_phy_control |=
2750                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2751
2752                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2753
2754                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2755         } else {
2756                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2757         }
2758
2759         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2760                 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2761                 unsigned int mask;
2762
2763                 mask = status & DPLL_PORTD_READY_MASK;
2764
2765                 if (mask == 0xf)
2766                         mask = 0x0;
2767                 else
2768                         dev_priv->chv_phy_control |=
2769                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2770
2771                 dev_priv->chv_phy_control |=
2772                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2773
2774                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2775
2776                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2777         } else {
2778                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2779         }
2780
2781         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2782
2783         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2784                       dev_priv->chv_phy_control);
2785 }
2786
2787 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2788 {
2789         struct i915_power_well *cmn =
2790                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2791         struct i915_power_well *disp2d =
2792                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2793
2794         /* If the display might be already active skip this */
2795         if (cmn->ops->is_enabled(dev_priv, cmn) &&
2796             disp2d->ops->is_enabled(dev_priv, disp2d) &&
2797             I915_READ(DPIO_CTL) & DPIO_CMNRST)
2798                 return;
2799
2800         DRM_DEBUG_KMS("toggling display PHY side reset\n");
2801
2802         /* cmnlane needs DPLL registers */
2803         disp2d->ops->enable(dev_priv, disp2d);
2804
2805         /*
2806          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2807          * Need to assert and de-assert PHY SB reset by gating the
2808          * common lane power, then un-gating it.
2809          * Simply ungating isn't enough to reset the PHY enough to get
2810          * ports and lanes running.
2811          */
2812         cmn->ops->disable(dev_priv, cmn);
2813 }
2814
2815 /**
2816  * intel_power_domains_init_hw - initialize hardware power domain state
2817  * @dev_priv: i915 device instance
2818  * @resume: Called from resume code paths or not
2819  *
2820  * This function initializes the hardware power domain state and enables all
2821  * power wells belonging to the INIT power domain. Power wells in other
2822  * domains (and not in the INIT domain) are referenced or disabled during the
2823  * modeset state HW readout. After that the reference count of each power well
2824  * must match its HW enabled state, see intel_power_domains_verify_state().
2825  */
2826 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2827 {
2828         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2829
2830         power_domains->initializing = true;
2831
2832         if (IS_GEN9_BC(dev_priv)) {
2833                 skl_display_core_init(dev_priv, resume);
2834         } else if (IS_GEN9_LP(dev_priv)) {
2835                 bxt_display_core_init(dev_priv, resume);
2836         } else if (IS_CHERRYVIEW(dev_priv)) {
2837                 mutex_lock(&power_domains->lock);
2838                 chv_phy_control_init(dev_priv);
2839                 mutex_unlock(&power_domains->lock);
2840         } else if (IS_VALLEYVIEW(dev_priv)) {
2841                 mutex_lock(&power_domains->lock);
2842                 vlv_cmnlane_wa(dev_priv);
2843                 mutex_unlock(&power_domains->lock);
2844         }
2845
2846         /* For now, we need the power well to be always enabled. */
2847         intel_display_set_init_power(dev_priv, true);
2848         /* Disable power support if the user asked so. */
2849         if (!i915.disable_power_well)
2850                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2851         intel_power_domains_sync_hw(dev_priv);
2852         power_domains->initializing = false;
2853 }
2854
2855 /**
2856  * intel_power_domains_suspend - suspend power domain state
2857  * @dev_priv: i915 device instance
2858  *
2859  * This function prepares the hardware power domain state before entering
2860  * system suspend. It must be paired with intel_power_domains_init_hw().
2861  */
2862 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2863 {
2864         /*
2865          * Even if power well support was disabled we still want to disable
2866          * power wells while we are system suspended.
2867          */
2868         if (!i915.disable_power_well)
2869                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2870
2871         if (IS_GEN9_BC(dev_priv))
2872                 skl_display_core_uninit(dev_priv);
2873         else if (IS_GEN9_LP(dev_priv))
2874                 bxt_display_core_uninit(dev_priv);
2875 }
2876
2877 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
2878 {
2879         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2880         struct i915_power_well *power_well;
2881
2882         for_each_power_well(dev_priv, power_well) {
2883                 enum intel_display_power_domain domain;
2884
2885                 DRM_DEBUG_DRIVER("%-25s %d\n",
2886                                  power_well->name, power_well->count);
2887
2888                 for_each_power_domain(domain, power_well->domains)
2889                         DRM_DEBUG_DRIVER("  %-23s %d\n",
2890                                          intel_display_power_domain_str(domain),
2891                                          power_domains->domain_use_count[domain]);
2892         }
2893 }
2894
2895 /**
2896  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2897  * @dev_priv: i915 device instance
2898  *
2899  * Verify if the reference count of each power well matches its HW enabled
2900  * state and the total refcount of the domains it belongs to. This must be
2901  * called after modeset HW state sanitization, which is responsible for
2902  * acquiring reference counts for any power wells in use and disabling the
2903  * ones left on by BIOS but not required by any active output.
2904  */
2905 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
2906 {
2907         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2908         struct i915_power_well *power_well;
2909         bool dump_domain_info;
2910
2911         mutex_lock(&power_domains->lock);
2912
2913         dump_domain_info = false;
2914         for_each_power_well(dev_priv, power_well) {
2915                 enum intel_display_power_domain domain;
2916                 int domains_count;
2917                 bool enabled;
2918
2919                 /*
2920                  * Power wells not belonging to any domain (like the MISC_IO
2921                  * and PW1 power wells) are under FW control, so ignore them,
2922                  * since their state can change asynchronously.
2923                  */
2924                 if (!power_well->domains)
2925                         continue;
2926
2927                 enabled = power_well->ops->is_enabled(dev_priv, power_well);
2928                 if ((power_well->count || power_well->always_on) != enabled)
2929                         DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
2930                                   power_well->name, power_well->count, enabled);
2931
2932                 domains_count = 0;
2933                 for_each_power_domain(domain, power_well->domains)
2934                         domains_count += power_domains->domain_use_count[domain];
2935
2936                 if (power_well->count != domains_count) {
2937                         DRM_ERROR("power well %s refcount/domain refcount mismatch "
2938                                   "(refcount %d/domains refcount %d)\n",
2939                                   power_well->name, power_well->count,
2940                                   domains_count);
2941                         dump_domain_info = true;
2942                 }
2943         }
2944
2945         if (dump_domain_info) {
2946                 static bool dumped;
2947
2948                 if (!dumped) {
2949                         intel_power_domains_dump_info(dev_priv);
2950                         dumped = true;
2951                 }
2952         }
2953
2954         mutex_unlock(&power_domains->lock);
2955 }
2956
2957 /**
2958  * intel_runtime_pm_get - grab a runtime pm reference
2959  * @dev_priv: i915 device instance
2960  *
2961  * This function grabs a device-level runtime pm reference (mostly used for GEM
2962  * code to ensure the GTT or GT is on) and ensures that it is powered up.
2963  *
2964  * Any runtime pm reference obtained by this function must have a symmetric
2965  * call to intel_runtime_pm_put() to release the reference again.
2966  */
2967 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2968 {
2969         struct pci_dev *pdev = dev_priv->drm.pdev;
2970         struct device *kdev = &pdev->dev;
2971         int ret;
2972
2973         ret = pm_runtime_get_sync(kdev);
2974         WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
2975
2976         atomic_inc(&dev_priv->pm.wakeref_count);
2977         assert_rpm_wakelock_held(dev_priv);
2978 }
2979
2980 /**
2981  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2982  * @dev_priv: i915 device instance
2983  *
2984  * This function grabs a device-level runtime pm reference if the device is
2985  * already in use and ensures that it is powered up.
2986  *
2987  * Any runtime pm reference obtained by this function must have a symmetric
2988  * call to intel_runtime_pm_put() to release the reference again.
2989  */
2990 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2991 {
2992         struct pci_dev *pdev = dev_priv->drm.pdev;
2993         struct device *kdev = &pdev->dev;
2994
2995         if (IS_ENABLED(CONFIG_PM)) {
2996                 int ret = pm_runtime_get_if_in_use(kdev);
2997
2998                 /*
2999                  * In cases runtime PM is disabled by the RPM core and we get
3000                  * an -EINVAL return value we are not supposed to call this
3001                  * function, since the power state is undefined. This applies
3002                  * atm to the late/early system suspend/resume handlers.
3003                  */
3004                 WARN_ONCE(ret < 0,
3005                           "pm_runtime_get_if_in_use() failed: %d\n", ret);
3006                 if (ret <= 0)
3007                         return false;
3008         }
3009
3010         atomic_inc(&dev_priv->pm.wakeref_count);
3011         assert_rpm_wakelock_held(dev_priv);
3012
3013         return true;
3014 }
3015
3016 /**
3017  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3018  * @dev_priv: i915 device instance
3019  *
3020  * This function grabs a device-level runtime pm reference (mostly used for GEM
3021  * code to ensure the GTT or GT is on).
3022  *
3023  * It will _not_ power up the device but instead only check that it's powered
3024  * on.  Therefore it is only valid to call this functions from contexts where
3025  * the device is known to be powered up and where trying to power it up would
3026  * result in hilarity and deadlocks. That pretty much means only the system
3027  * suspend/resume code where this is used to grab runtime pm references for
3028  * delayed setup down in work items.
3029  *
3030  * Any runtime pm reference obtained by this function must have a symmetric
3031  * call to intel_runtime_pm_put() to release the reference again.
3032  */
3033 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3034 {
3035         struct pci_dev *pdev = dev_priv->drm.pdev;
3036         struct device *kdev = &pdev->dev;
3037
3038         assert_rpm_wakelock_held(dev_priv);
3039         pm_runtime_get_noresume(kdev);
3040
3041         atomic_inc(&dev_priv->pm.wakeref_count);
3042 }
3043
3044 /**
3045  * intel_runtime_pm_put - release a runtime pm reference
3046  * @dev_priv: i915 device instance
3047  *
3048  * This function drops the device-level runtime pm reference obtained by
3049  * intel_runtime_pm_get() and might power down the corresponding
3050  * hardware block right away if this is the last reference.
3051  */
3052 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3053 {
3054         struct pci_dev *pdev = dev_priv->drm.pdev;
3055         struct device *kdev = &pdev->dev;
3056
3057         assert_rpm_wakelock_held(dev_priv);
3058         atomic_dec(&dev_priv->pm.wakeref_count);
3059
3060         pm_runtime_mark_last_busy(kdev);
3061         pm_runtime_put_autosuspend(kdev);
3062 }
3063
3064 /**
3065  * intel_runtime_pm_enable - enable runtime pm
3066  * @dev_priv: i915 device instance
3067  *
3068  * This function enables runtime pm at the end of the driver load sequence.
3069  *
3070  * Note that this function does currently not enable runtime pm for the
3071  * subordinate display power domains. That is only done on the first modeset
3072  * using intel_display_set_init_power().
3073  */
3074 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3075 {
3076         struct pci_dev *pdev = dev_priv->drm.pdev;
3077         struct device *kdev = &pdev->dev;
3078
3079         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3080         pm_runtime_mark_last_busy(kdev);
3081
3082         /*
3083          * Take a permanent reference to disable the RPM functionality and drop
3084          * it only when unloading the driver. Use the low level get/put helpers,
3085          * so the driver's own RPM reference tracking asserts also work on
3086          * platforms without RPM support.
3087          */
3088         if (!HAS_RUNTIME_PM(dev_priv)) {
3089                 int ret;
3090
3091                 pm_runtime_dont_use_autosuspend(kdev);
3092                 ret = pm_runtime_get_sync(kdev);
3093                 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3094         } else {
3095                 pm_runtime_use_autosuspend(kdev);
3096         }
3097
3098         /*
3099          * The core calls the driver load handler with an RPM reference held.
3100          * We drop that here and will reacquire it during unloading in
3101          * intel_power_domains_fini().
3102          */
3103         pm_runtime_put_autosuspend(kdev);
3104 }