]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
Merge branch 'drm-etnaviv-next' of git://git.pengutronix.de/git/lst/linux into drm...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139         switch (max_link_bw) {
140         case DP_LINK_BW_1_62:
141         case DP_LINK_BW_2_7:
142         case DP_LINK_BW_5_4:
143                 break;
144         default:
145                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146                      max_link_bw);
147                 max_link_bw = DP_LINK_BW_1_62;
148                 break;
149         }
150         return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156         u8 source_max, sink_max;
157
158         source_max = intel_dig_port->max_lanes;
159         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160
161         return min(source_max, sink_max);
162 }
163
164 /*
165  * The units on the numbers in the next two are... bizarre.  Examples will
166  * make it clearer; this one parallels an example in the eDP spec.
167  *
168  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169  *
170  *     270000 * 1 * 8 / 10 == 216000
171  *
172  * The actual data capacity of that configuration is 2.16Gbit/s, so the
173  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
174  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175  * 119000.  At 18bpp that's 2142000 kilobits per second.
176  *
177  * Thus the strange-looking division by 10 in intel_dp_link_required, to
178  * get the result in decakilobits instead of kilobits.
179  */
180
181 static int
182 intel_dp_link_required(int pixel_clock, int bpp)
183 {
184         return (pixel_clock * bpp + 9) / 10;
185 }
186
187 static int
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190         return (max_link_clock * max_lanes * 8) / 10;
191 }
192
193 static enum drm_mode_status
194 intel_dp_mode_valid(struct drm_connector *connector,
195                     struct drm_display_mode *mode)
196 {
197         struct intel_dp *intel_dp = intel_attached_dp(connector);
198         struct intel_connector *intel_connector = to_intel_connector(connector);
199         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
200         int target_clock = mode->clock;
201         int max_rate, mode_rate, max_lanes, max_link_clock;
202         int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
203
204         if (is_edp(intel_dp) && fixed_mode) {
205                 if (mode->hdisplay > fixed_mode->hdisplay)
206                         return MODE_PANEL;
207
208                 if (mode->vdisplay > fixed_mode->vdisplay)
209                         return MODE_PANEL;
210
211                 target_clock = fixed_mode->clock;
212         }
213
214         max_link_clock = intel_dp_max_link_rate(intel_dp);
215         max_lanes = intel_dp_max_lane_count(intel_dp);
216
217         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
218         mode_rate = intel_dp_link_required(target_clock, 18);
219
220         if (mode_rate > max_rate || target_clock > max_dotclk)
221                 return MODE_CLOCK_HIGH;
222
223         if (mode->clock < 10000)
224                 return MODE_CLOCK_LOW;
225
226         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
227                 return MODE_H_ILLEGAL;
228
229         return MODE_OK;
230 }
231
232 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
233 {
234         int     i;
235         uint32_t v = 0;
236
237         if (src_bytes > 4)
238                 src_bytes = 4;
239         for (i = 0; i < src_bytes; i++)
240                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
241         return v;
242 }
243
244 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
245 {
246         int i;
247         if (dst_bytes > 4)
248                 dst_bytes = 4;
249         for (i = 0; i < dst_bytes; i++)
250                 dst[i] = src >> ((3-i) * 8);
251 }
252
253 static void
254 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
255                                     struct intel_dp *intel_dp);
256 static void
257 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
258                                               struct intel_dp *intel_dp);
259
260 static void pps_lock(struct intel_dp *intel_dp)
261 {
262         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
263         struct intel_encoder *encoder = &intel_dig_port->base;
264         struct drm_device *dev = encoder->base.dev;
265         struct drm_i915_private *dev_priv = dev->dev_private;
266         enum intel_display_power_domain power_domain;
267
268         /*
269          * See vlv_power_sequencer_reset() why we need
270          * a power domain reference here.
271          */
272         power_domain = intel_display_port_aux_power_domain(encoder);
273         intel_display_power_get(dev_priv, power_domain);
274
275         mutex_lock(&dev_priv->pps_mutex);
276 }
277
278 static void pps_unlock(struct intel_dp *intel_dp)
279 {
280         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
281         struct intel_encoder *encoder = &intel_dig_port->base;
282         struct drm_device *dev = encoder->base.dev;
283         struct drm_i915_private *dev_priv = dev->dev_private;
284         enum intel_display_power_domain power_domain;
285
286         mutex_unlock(&dev_priv->pps_mutex);
287
288         power_domain = intel_display_port_aux_power_domain(encoder);
289         intel_display_power_put(dev_priv, power_domain);
290 }
291
292 static void
293 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
294 {
295         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296         struct drm_device *dev = intel_dig_port->base.base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum pipe pipe = intel_dp->pps_pipe;
299         bool pll_enabled, release_cl_override = false;
300         enum dpio_phy phy = DPIO_PHY(pipe);
301         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
302         uint32_t DP;
303
304         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
305                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
306                  pipe_name(pipe), port_name(intel_dig_port->port)))
307                 return;
308
309         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
310                       pipe_name(pipe), port_name(intel_dig_port->port));
311
312         /* Preserve the BIOS-computed detected bit. This is
313          * supposed to be read-only.
314          */
315         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
316         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
317         DP |= DP_PORT_WIDTH(1);
318         DP |= DP_LINK_TRAIN_PAT_1;
319
320         if (IS_CHERRYVIEW(dev))
321                 DP |= DP_PIPE_SELECT_CHV(pipe);
322         else if (pipe == PIPE_B)
323                 DP |= DP_PIPEB_SELECT;
324
325         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
326
327         /*
328          * The DPLL for the pipe must be enabled for this to work.
329          * So enable temporarily it if it's not already enabled.
330          */
331         if (!pll_enabled) {
332                 release_cl_override = IS_CHERRYVIEW(dev) &&
333                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
334
335                 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
336                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
337                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
338                                   pipe_name(pipe));
339                         return;
340                 }
341         }
342
343         /*
344          * Similar magic as in intel_dp_enable_port().
345          * We _must_ do this port enable + disable trick
346          * to make this power seqeuencer lock onto the port.
347          * Otherwise even VDD force bit won't work.
348          */
349         I915_WRITE(intel_dp->output_reg, DP);
350         POSTING_READ(intel_dp->output_reg);
351
352         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
353         POSTING_READ(intel_dp->output_reg);
354
355         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
356         POSTING_READ(intel_dp->output_reg);
357
358         if (!pll_enabled) {
359                 vlv_force_pll_off(dev, pipe);
360
361                 if (release_cl_override)
362                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
363         }
364 }
365
366 static enum pipe
367 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
368 {
369         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
370         struct drm_device *dev = intel_dig_port->base.base.dev;
371         struct drm_i915_private *dev_priv = dev->dev_private;
372         struct intel_encoder *encoder;
373         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
374         enum pipe pipe;
375
376         lockdep_assert_held(&dev_priv->pps_mutex);
377
378         /* We should never land here with regular DP ports */
379         WARN_ON(!is_edp(intel_dp));
380
381         if (intel_dp->pps_pipe != INVALID_PIPE)
382                 return intel_dp->pps_pipe;
383
384         /*
385          * We don't have power sequencer currently.
386          * Pick one that's not used by other ports.
387          */
388         for_each_intel_encoder(dev, encoder) {
389                 struct intel_dp *tmp;
390
391                 if (encoder->type != INTEL_OUTPUT_EDP)
392                         continue;
393
394                 tmp = enc_to_intel_dp(&encoder->base);
395
396                 if (tmp->pps_pipe != INVALID_PIPE)
397                         pipes &= ~(1 << tmp->pps_pipe);
398         }
399
400         /*
401          * Didn't find one. This should not happen since there
402          * are two power sequencers and up to two eDP ports.
403          */
404         if (WARN_ON(pipes == 0))
405                 pipe = PIPE_A;
406         else
407                 pipe = ffs(pipes) - 1;
408
409         vlv_steal_power_sequencer(dev, pipe);
410         intel_dp->pps_pipe = pipe;
411
412         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
413                       pipe_name(intel_dp->pps_pipe),
414                       port_name(intel_dig_port->port));
415
416         /* init power sequencer on this pipe and port */
417         intel_dp_init_panel_power_sequencer(dev, intel_dp);
418         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
419
420         /*
421          * Even vdd force doesn't work until we've made
422          * the power sequencer lock in on the port.
423          */
424         vlv_power_sequencer_kick(intel_dp);
425
426         return intel_dp->pps_pipe;
427 }
428
429 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
430                                enum pipe pipe);
431
432 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
433                                enum pipe pipe)
434 {
435         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
436 }
437
438 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
439                                 enum pipe pipe)
440 {
441         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
442 }
443
444 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
445                          enum pipe pipe)
446 {
447         return true;
448 }
449
450 static enum pipe
451 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
452                      enum port port,
453                      vlv_pipe_check pipe_check)
454 {
455         enum pipe pipe;
456
457         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
458                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
459                         PANEL_PORT_SELECT_MASK;
460
461                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
462                         continue;
463
464                 if (!pipe_check(dev_priv, pipe))
465                         continue;
466
467                 return pipe;
468         }
469
470         return INVALID_PIPE;
471 }
472
473 static void
474 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
475 {
476         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
477         struct drm_device *dev = intel_dig_port->base.base.dev;
478         struct drm_i915_private *dev_priv = dev->dev_private;
479         enum port port = intel_dig_port->port;
480
481         lockdep_assert_held(&dev_priv->pps_mutex);
482
483         /* try to find a pipe with this port selected */
484         /* first pick one where the panel is on */
485         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
486                                                   vlv_pipe_has_pp_on);
487         /* didn't find one? pick one where vdd is on */
488         if (intel_dp->pps_pipe == INVALID_PIPE)
489                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490                                                           vlv_pipe_has_vdd_on);
491         /* didn't find one? pick one with just the correct port */
492         if (intel_dp->pps_pipe == INVALID_PIPE)
493                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494                                                           vlv_pipe_any);
495
496         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
497         if (intel_dp->pps_pipe == INVALID_PIPE) {
498                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
499                               port_name(port));
500                 return;
501         }
502
503         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
504                       port_name(port), pipe_name(intel_dp->pps_pipe));
505
506         intel_dp_init_panel_power_sequencer(dev, intel_dp);
507         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
508 }
509
510 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
511 {
512         struct drm_device *dev = dev_priv->dev;
513         struct intel_encoder *encoder;
514
515         if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
516                 return;
517
518         /*
519          * We can't grab pps_mutex here due to deadlock with power_domain
520          * mutex when power_domain functions are called while holding pps_mutex.
521          * That also means that in order to use pps_pipe the code needs to
522          * hold both a power domain reference and pps_mutex, and the power domain
523          * reference get/put must be done while _not_ holding pps_mutex.
524          * pps_{lock,unlock}() do these steps in the correct order, so one
525          * should use them always.
526          */
527
528         for_each_intel_encoder(dev, encoder) {
529                 struct intel_dp *intel_dp;
530
531                 if (encoder->type != INTEL_OUTPUT_EDP)
532                         continue;
533
534                 intel_dp = enc_to_intel_dp(&encoder->base);
535                 intel_dp->pps_pipe = INVALID_PIPE;
536         }
537 }
538
539 static i915_reg_t
540 _pp_ctrl_reg(struct intel_dp *intel_dp)
541 {
542         struct drm_device *dev = intel_dp_to_dev(intel_dp);
543
544         if (IS_BROXTON(dev))
545                 return BXT_PP_CONTROL(0);
546         else if (HAS_PCH_SPLIT(dev))
547                 return PCH_PP_CONTROL;
548         else
549                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
550 }
551
552 static i915_reg_t
553 _pp_stat_reg(struct intel_dp *intel_dp)
554 {
555         struct drm_device *dev = intel_dp_to_dev(intel_dp);
556
557         if (IS_BROXTON(dev))
558                 return BXT_PP_STATUS(0);
559         else if (HAS_PCH_SPLIT(dev))
560                 return PCH_PP_STATUS;
561         else
562                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
563 }
564
565 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
566    This function only applicable when panel PM state is not to be tracked */
567 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
568                               void *unused)
569 {
570         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
571                                                  edp_notifier);
572         struct drm_device *dev = intel_dp_to_dev(intel_dp);
573         struct drm_i915_private *dev_priv = dev->dev_private;
574
575         if (!is_edp(intel_dp) || code != SYS_RESTART)
576                 return 0;
577
578         pps_lock(intel_dp);
579
580         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
581                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
582                 i915_reg_t pp_ctrl_reg, pp_div_reg;
583                 u32 pp_div;
584
585                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
586                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
587                 pp_div = I915_READ(pp_div_reg);
588                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
589
590                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
591                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
592                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
593                 msleep(intel_dp->panel_power_cycle_delay);
594         }
595
596         pps_unlock(intel_dp);
597
598         return 0;
599 }
600
601 static bool edp_have_panel_power(struct intel_dp *intel_dp)
602 {
603         struct drm_device *dev = intel_dp_to_dev(intel_dp);
604         struct drm_i915_private *dev_priv = dev->dev_private;
605
606         lockdep_assert_held(&dev_priv->pps_mutex);
607
608         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
609             intel_dp->pps_pipe == INVALID_PIPE)
610                 return false;
611
612         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
613 }
614
615 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
616 {
617         struct drm_device *dev = intel_dp_to_dev(intel_dp);
618         struct drm_i915_private *dev_priv = dev->dev_private;
619
620         lockdep_assert_held(&dev_priv->pps_mutex);
621
622         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
623             intel_dp->pps_pipe == INVALID_PIPE)
624                 return false;
625
626         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
627 }
628
629 static void
630 intel_dp_check_edp(struct intel_dp *intel_dp)
631 {
632         struct drm_device *dev = intel_dp_to_dev(intel_dp);
633         struct drm_i915_private *dev_priv = dev->dev_private;
634
635         if (!is_edp(intel_dp))
636                 return;
637
638         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
639                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
640                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
641                               I915_READ(_pp_stat_reg(intel_dp)),
642                               I915_READ(_pp_ctrl_reg(intel_dp)));
643         }
644 }
645
646 static uint32_t
647 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
648 {
649         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
650         struct drm_device *dev = intel_dig_port->base.base.dev;
651         struct drm_i915_private *dev_priv = dev->dev_private;
652         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
653         uint32_t status;
654         bool done;
655
656 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
657         if (has_aux_irq)
658                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
659                                           msecs_to_jiffies_timeout(10));
660         else
661                 done = wait_for_atomic(C, 10) == 0;
662         if (!done)
663                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
664                           has_aux_irq);
665 #undef C
666
667         return status;
668 }
669
670 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
671 {
672         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
673         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
674
675         if (index)
676                 return 0;
677
678         /*
679          * The clock divider is based off the hrawclk, and would like to run at
680          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
681          */
682         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
683 }
684
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
689
690         if (index)
691                 return 0;
692
693         /*
694          * The clock divider is based off the cdclk or PCH rawclk, and would
695          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
696          * divide by 2000 and use that
697          */
698         if (intel_dig_port->port == PORT_A)
699                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
700         else
701                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
702 }
703
704 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 {
706         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
708
709         if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
710                 /* Workaround for non-ULT HSW */
711                 switch (index) {
712                 case 0: return 63;
713                 case 1: return 72;
714                 default: return 0;
715                 }
716         }
717
718         return ilk_get_aux_clock_divider(intel_dp, index);
719 }
720
721 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722 {
723         /*
724          * SKL doesn't need us to program the AUX clock divider (Hardware will
725          * derive the clock from CDCLK automatically). We still implement the
726          * get_aux_clock_divider vfunc to plug-in into the existing code.
727          */
728         return index ? 0 : 1;
729 }
730
731 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
732                                      bool has_aux_irq,
733                                      int send_bytes,
734                                      uint32_t aux_clock_divider)
735 {
736         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
737         struct drm_device *dev = intel_dig_port->base.base.dev;
738         uint32_t precharge, timeout;
739
740         if (IS_GEN6(dev))
741                 precharge = 3;
742         else
743                 precharge = 5;
744
745         if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
746                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
747         else
748                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
749
750         return DP_AUX_CH_CTL_SEND_BUSY |
751                DP_AUX_CH_CTL_DONE |
752                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
753                DP_AUX_CH_CTL_TIME_OUT_ERROR |
754                timeout |
755                DP_AUX_CH_CTL_RECEIVE_ERROR |
756                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
757                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
758                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
759 }
760
761 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
762                                       bool has_aux_irq,
763                                       int send_bytes,
764                                       uint32_t unused)
765 {
766         return DP_AUX_CH_CTL_SEND_BUSY |
767                DP_AUX_CH_CTL_DONE |
768                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
769                DP_AUX_CH_CTL_TIME_OUT_ERROR |
770                DP_AUX_CH_CTL_TIME_OUT_1600us |
771                DP_AUX_CH_CTL_RECEIVE_ERROR |
772                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
774                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
775 }
776
777 static int
778 intel_dp_aux_ch(struct intel_dp *intel_dp,
779                 const uint8_t *send, int send_bytes,
780                 uint8_t *recv, int recv_size)
781 {
782         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
783         struct drm_device *dev = intel_dig_port->base.base.dev;
784         struct drm_i915_private *dev_priv = dev->dev_private;
785         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
786         uint32_t aux_clock_divider;
787         int i, ret, recv_bytes;
788         uint32_t status;
789         int try, clock = 0;
790         bool has_aux_irq = HAS_AUX_IRQ(dev);
791         bool vdd;
792
793         pps_lock(intel_dp);
794
795         /*
796          * We will be called with VDD already enabled for dpcd/edid/oui reads.
797          * In such cases we want to leave VDD enabled and it's up to upper layers
798          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
799          * ourselves.
800          */
801         vdd = edp_panel_vdd_on(intel_dp);
802
803         /* dp aux is extremely sensitive to irq latency, hence request the
804          * lowest possible wakeup latency and so prevent the cpu from going into
805          * deep sleep states.
806          */
807         pm_qos_update_request(&dev_priv->pm_qos, 0);
808
809         intel_dp_check_edp(intel_dp);
810
811         /* Try to wait for any previous AUX channel activity */
812         for (try = 0; try < 3; try++) {
813                 status = I915_READ_NOTRACE(ch_ctl);
814                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
815                         break;
816                 msleep(1);
817         }
818
819         if (try == 3) {
820                 static u32 last_status = -1;
821                 const u32 status = I915_READ(ch_ctl);
822
823                 if (status != last_status) {
824                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
825                              status);
826                         last_status = status;
827                 }
828
829                 ret = -EBUSY;
830                 goto out;
831         }
832
833         /* Only 5 data registers! */
834         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
835                 ret = -E2BIG;
836                 goto out;
837         }
838
839         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
840                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
841                                                           has_aux_irq,
842                                                           send_bytes,
843                                                           aux_clock_divider);
844
845                 /* Must try at least 3 times according to DP spec */
846                 for (try = 0; try < 5; try++) {
847                         /* Load the send data into the aux channel data registers */
848                         for (i = 0; i < send_bytes; i += 4)
849                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
850                                            intel_dp_pack_aux(send + i,
851                                                              send_bytes - i));
852
853                         /* Send the command and wait for it to complete */
854                         I915_WRITE(ch_ctl, send_ctl);
855
856                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
857
858                         /* Clear done status and any errors */
859                         I915_WRITE(ch_ctl,
860                                    status |
861                                    DP_AUX_CH_CTL_DONE |
862                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
863                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
864
865                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
866                                 continue;
867
868                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
869                          *   400us delay required for errors and timeouts
870                          *   Timeout errors from the HW already meet this
871                          *   requirement so skip to next iteration
872                          */
873                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
874                                 usleep_range(400, 500);
875                                 continue;
876                         }
877                         if (status & DP_AUX_CH_CTL_DONE)
878                                 goto done;
879                 }
880         }
881
882         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
883                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
884                 ret = -EBUSY;
885                 goto out;
886         }
887
888 done:
889         /* Check for timeout or receive error.
890          * Timeouts occur when the sink is not connected
891          */
892         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
893                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
894                 ret = -EIO;
895                 goto out;
896         }
897
898         /* Timeouts occur when the device isn't connected, so they're
899          * "normal" -- don't fill the kernel log with these */
900         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
901                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
902                 ret = -ETIMEDOUT;
903                 goto out;
904         }
905
906         /* Unload any bytes sent back from the other side */
907         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
908                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
909
910         /*
911          * By BSpec: "Message sizes of 0 or >20 are not allowed."
912          * We have no idea of what happened so we return -EBUSY so
913          * drm layer takes care for the necessary retries.
914          */
915         if (recv_bytes == 0 || recv_bytes > 20) {
916                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
917                               recv_bytes);
918                 /*
919                  * FIXME: This patch was created on top of a series that
920                  * organize the retries at drm level. There EBUSY should
921                  * also take care for 1ms wait before retrying.
922                  * That aux retries re-org is still needed and after that is
923                  * merged we remove this sleep from here.
924                  */
925                 usleep_range(1000, 1500);
926                 ret = -EBUSY;
927                 goto out;
928         }
929
930         if (recv_bytes > recv_size)
931                 recv_bytes = recv_size;
932
933         for (i = 0; i < recv_bytes; i += 4)
934                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
935                                     recv + i, recv_bytes - i);
936
937         ret = recv_bytes;
938 out:
939         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
940
941         if (vdd)
942                 edp_panel_vdd_off(intel_dp, false);
943
944         pps_unlock(intel_dp);
945
946         return ret;
947 }
948
949 #define BARE_ADDRESS_SIZE       3
950 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
951 static ssize_t
952 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
953 {
954         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
955         uint8_t txbuf[20], rxbuf[20];
956         size_t txsize, rxsize;
957         int ret;
958
959         txbuf[0] = (msg->request << 4) |
960                 ((msg->address >> 16) & 0xf);
961         txbuf[1] = (msg->address >> 8) & 0xff;
962         txbuf[2] = msg->address & 0xff;
963         txbuf[3] = msg->size - 1;
964
965         switch (msg->request & ~DP_AUX_I2C_MOT) {
966         case DP_AUX_NATIVE_WRITE:
967         case DP_AUX_I2C_WRITE:
968         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
969                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
970                 rxsize = 2; /* 0 or 1 data bytes */
971
972                 if (WARN_ON(txsize > 20))
973                         return -E2BIG;
974
975                 if (msg->buffer)
976                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
977                 else
978                         WARN_ON(msg->size);
979
980                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
981                 if (ret > 0) {
982                         msg->reply = rxbuf[0] >> 4;
983
984                         if (ret > 1) {
985                                 /* Number of bytes written in a short write. */
986                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
987                         } else {
988                                 /* Return payload size. */
989                                 ret = msg->size;
990                         }
991                 }
992                 break;
993
994         case DP_AUX_NATIVE_READ:
995         case DP_AUX_I2C_READ:
996                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
997                 rxsize = msg->size + 1;
998
999                 if (WARN_ON(rxsize > 20))
1000                         return -E2BIG;
1001
1002                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1003                 if (ret > 0) {
1004                         msg->reply = rxbuf[0] >> 4;
1005                         /*
1006                          * Assume happy day, and copy the data. The caller is
1007                          * expected to check msg->reply before touching it.
1008                          *
1009                          * Return payload size.
1010                          */
1011                         ret--;
1012                         memcpy(msg->buffer, rxbuf + 1, ret);
1013                 }
1014                 break;
1015
1016         default:
1017                 ret = -EINVAL;
1018                 break;
1019         }
1020
1021         return ret;
1022 }
1023
1024 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1025                                        enum port port)
1026 {
1027         switch (port) {
1028         case PORT_B:
1029         case PORT_C:
1030         case PORT_D:
1031                 return DP_AUX_CH_CTL(port);
1032         default:
1033                 MISSING_CASE(port);
1034                 return DP_AUX_CH_CTL(PORT_B);
1035         }
1036 }
1037
1038 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1039                                         enum port port, int index)
1040 {
1041         switch (port) {
1042         case PORT_B:
1043         case PORT_C:
1044         case PORT_D:
1045                 return DP_AUX_CH_DATA(port, index);
1046         default:
1047                 MISSING_CASE(port);
1048                 return DP_AUX_CH_DATA(PORT_B, index);
1049         }
1050 }
1051
1052 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1053                                        enum port port)
1054 {
1055         switch (port) {
1056         case PORT_A:
1057                 return DP_AUX_CH_CTL(port);
1058         case PORT_B:
1059         case PORT_C:
1060         case PORT_D:
1061                 return PCH_DP_AUX_CH_CTL(port);
1062         default:
1063                 MISSING_CASE(port);
1064                 return DP_AUX_CH_CTL(PORT_A);
1065         }
1066 }
1067
1068 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069                                         enum port port, int index)
1070 {
1071         switch (port) {
1072         case PORT_A:
1073                 return DP_AUX_CH_DATA(port, index);
1074         case PORT_B:
1075         case PORT_C:
1076         case PORT_D:
1077                 return PCH_DP_AUX_CH_DATA(port, index);
1078         default:
1079                 MISSING_CASE(port);
1080                 return DP_AUX_CH_DATA(PORT_A, index);
1081         }
1082 }
1083
1084 /*
1085  * On SKL we don't have Aux for port E so we rely
1086  * on VBT to set a proper alternate aux channel.
1087  */
1088 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1089 {
1090         const struct ddi_vbt_port_info *info =
1091                 &dev_priv->vbt.ddi_port_info[PORT_E];
1092
1093         switch (info->alternate_aux_channel) {
1094         case DP_AUX_A:
1095                 return PORT_A;
1096         case DP_AUX_B:
1097                 return PORT_B;
1098         case DP_AUX_C:
1099                 return PORT_C;
1100         case DP_AUX_D:
1101                 return PORT_D;
1102         default:
1103                 MISSING_CASE(info->alternate_aux_channel);
1104                 return PORT_A;
1105         }
1106 }
1107
1108 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1109                                        enum port port)
1110 {
1111         if (port == PORT_E)
1112                 port = skl_porte_aux_port(dev_priv);
1113
1114         switch (port) {
1115         case PORT_A:
1116         case PORT_B:
1117         case PORT_C:
1118         case PORT_D:
1119                 return DP_AUX_CH_CTL(port);
1120         default:
1121                 MISSING_CASE(port);
1122                 return DP_AUX_CH_CTL(PORT_A);
1123         }
1124 }
1125
1126 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1127                                         enum port port, int index)
1128 {
1129         if (port == PORT_E)
1130                 port = skl_porte_aux_port(dev_priv);
1131
1132         switch (port) {
1133         case PORT_A:
1134         case PORT_B:
1135         case PORT_C:
1136         case PORT_D:
1137                 return DP_AUX_CH_DATA(port, index);
1138         default:
1139                 MISSING_CASE(port);
1140                 return DP_AUX_CH_DATA(PORT_A, index);
1141         }
1142 }
1143
1144 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1145                                          enum port port)
1146 {
1147         if (INTEL_INFO(dev_priv)->gen >= 9)
1148                 return skl_aux_ctl_reg(dev_priv, port);
1149         else if (HAS_PCH_SPLIT(dev_priv))
1150                 return ilk_aux_ctl_reg(dev_priv, port);
1151         else
1152                 return g4x_aux_ctl_reg(dev_priv, port);
1153 }
1154
1155 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1156                                           enum port port, int index)
1157 {
1158         if (INTEL_INFO(dev_priv)->gen >= 9)
1159                 return skl_aux_data_reg(dev_priv, port, index);
1160         else if (HAS_PCH_SPLIT(dev_priv))
1161                 return ilk_aux_data_reg(dev_priv, port, index);
1162         else
1163                 return g4x_aux_data_reg(dev_priv, port, index);
1164 }
1165
1166 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1167 {
1168         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1169         enum port port = dp_to_dig_port(intel_dp)->port;
1170         int i;
1171
1172         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1173         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1174                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1175 }
1176
1177 static void
1178 intel_dp_aux_fini(struct intel_dp *intel_dp)
1179 {
1180         kfree(intel_dp->aux.name);
1181 }
1182
1183 static int
1184 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1185 {
1186         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1187         enum port port = intel_dig_port->port;
1188         int ret;
1189
1190         intel_aux_reg_init(intel_dp);
1191
1192         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1193         if (!intel_dp->aux.name)
1194                 return -ENOMEM;
1195
1196         intel_dp->aux.dev = connector->base.kdev;
1197         intel_dp->aux.transfer = intel_dp_aux_transfer;
1198
1199         DRM_DEBUG_KMS("registering %s bus for %s\n",
1200                       intel_dp->aux.name,
1201                       connector->base.kdev->kobj.name);
1202
1203         ret = drm_dp_aux_register(&intel_dp->aux);
1204         if (ret < 0) {
1205                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1206                           intel_dp->aux.name, ret);
1207                 kfree(intel_dp->aux.name);
1208                 return ret;
1209         }
1210
1211         return 0;
1212 }
1213
1214 static int
1215 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1216 {
1217         if (intel_dp->num_sink_rates) {
1218                 *sink_rates = intel_dp->sink_rates;
1219                 return intel_dp->num_sink_rates;
1220         }
1221
1222         *sink_rates = default_rates;
1223
1224         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1225 }
1226
1227 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1228 {
1229         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1230         struct drm_device *dev = dig_port->base.base.dev;
1231
1232         /* WaDisableHBR2:skl */
1233         if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1234                 return false;
1235
1236         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1237             (INTEL_INFO(dev)->gen >= 9))
1238                 return true;
1239         else
1240                 return false;
1241 }
1242
1243 static int
1244 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1245 {
1246         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1247         struct drm_device *dev = dig_port->base.base.dev;
1248         int size;
1249
1250         if (IS_BROXTON(dev)) {
1251                 *source_rates = bxt_rates;
1252                 size = ARRAY_SIZE(bxt_rates);
1253         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1254                 *source_rates = skl_rates;
1255                 size = ARRAY_SIZE(skl_rates);
1256         } else {
1257                 *source_rates = default_rates;
1258                 size = ARRAY_SIZE(default_rates);
1259         }
1260
1261         /* This depends on the fact that 5.4 is last value in the array */
1262         if (!intel_dp_source_supports_hbr2(intel_dp))
1263                 size--;
1264
1265         return size;
1266 }
1267
1268 static void
1269 intel_dp_set_clock(struct intel_encoder *encoder,
1270                    struct intel_crtc_state *pipe_config)
1271 {
1272         struct drm_device *dev = encoder->base.dev;
1273         const struct dp_link_dpll *divisor = NULL;
1274         int i, count = 0;
1275
1276         if (IS_G4X(dev)) {
1277                 divisor = gen4_dpll;
1278                 count = ARRAY_SIZE(gen4_dpll);
1279         } else if (HAS_PCH_SPLIT(dev)) {
1280                 divisor = pch_dpll;
1281                 count = ARRAY_SIZE(pch_dpll);
1282         } else if (IS_CHERRYVIEW(dev)) {
1283                 divisor = chv_dpll;
1284                 count = ARRAY_SIZE(chv_dpll);
1285         } else if (IS_VALLEYVIEW(dev)) {
1286                 divisor = vlv_dpll;
1287                 count = ARRAY_SIZE(vlv_dpll);
1288         }
1289
1290         if (divisor && count) {
1291                 for (i = 0; i < count; i++) {
1292                         if (pipe_config->port_clock == divisor[i].clock) {
1293                                 pipe_config->dpll = divisor[i].dpll;
1294                                 pipe_config->clock_set = true;
1295                                 break;
1296                         }
1297                 }
1298         }
1299 }
1300
1301 static int intersect_rates(const int *source_rates, int source_len,
1302                            const int *sink_rates, int sink_len,
1303                            int *common_rates)
1304 {
1305         int i = 0, j = 0, k = 0;
1306
1307         while (i < source_len && j < sink_len) {
1308                 if (source_rates[i] == sink_rates[j]) {
1309                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1310                                 return k;
1311                         common_rates[k] = source_rates[i];
1312                         ++k;
1313                         ++i;
1314                         ++j;
1315                 } else if (source_rates[i] < sink_rates[j]) {
1316                         ++i;
1317                 } else {
1318                         ++j;
1319                 }
1320         }
1321         return k;
1322 }
1323
1324 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1325                                  int *common_rates)
1326 {
1327         const int *source_rates, *sink_rates;
1328         int source_len, sink_len;
1329
1330         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1331         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1332
1333         return intersect_rates(source_rates, source_len,
1334                                sink_rates, sink_len,
1335                                common_rates);
1336 }
1337
1338 static void snprintf_int_array(char *str, size_t len,
1339                                const int *array, int nelem)
1340 {
1341         int i;
1342
1343         str[0] = '\0';
1344
1345         for (i = 0; i < nelem; i++) {
1346                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1347                 if (r >= len)
1348                         return;
1349                 str += r;
1350                 len -= r;
1351         }
1352 }
1353
1354 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1355 {
1356         const int *source_rates, *sink_rates;
1357         int source_len, sink_len, common_len;
1358         int common_rates[DP_MAX_SUPPORTED_RATES];
1359         char str[128]; /* FIXME: too big for stack? */
1360
1361         if ((drm_debug & DRM_UT_KMS) == 0)
1362                 return;
1363
1364         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1365         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1366         DRM_DEBUG_KMS("source rates: %s\n", str);
1367
1368         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1369         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1370         DRM_DEBUG_KMS("sink rates: %s\n", str);
1371
1372         common_len = intel_dp_common_rates(intel_dp, common_rates);
1373         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1374         DRM_DEBUG_KMS("common rates: %s\n", str);
1375 }
1376
1377 static int rate_to_index(int find, const int *rates)
1378 {
1379         int i = 0;
1380
1381         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1382                 if (find == rates[i])
1383                         break;
1384
1385         return i;
1386 }
1387
1388 int
1389 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1390 {
1391         int rates[DP_MAX_SUPPORTED_RATES] = {};
1392         int len;
1393
1394         len = intel_dp_common_rates(intel_dp, rates);
1395         if (WARN_ON(len <= 0))
1396                 return 162000;
1397
1398         return rates[rate_to_index(0, rates) - 1];
1399 }
1400
1401 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1402 {
1403         return rate_to_index(rate, intel_dp->sink_rates);
1404 }
1405
1406 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1407                            uint8_t *link_bw, uint8_t *rate_select)
1408 {
1409         if (intel_dp->num_sink_rates) {
1410                 *link_bw = 0;
1411                 *rate_select =
1412                         intel_dp_rate_select(intel_dp, port_clock);
1413         } else {
1414                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1415                 *rate_select = 0;
1416         }
1417 }
1418
1419 bool
1420 intel_dp_compute_config(struct intel_encoder *encoder,
1421                         struct intel_crtc_state *pipe_config)
1422 {
1423         struct drm_device *dev = encoder->base.dev;
1424         struct drm_i915_private *dev_priv = dev->dev_private;
1425         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1426         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1427         enum port port = dp_to_dig_port(intel_dp)->port;
1428         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1429         struct intel_connector *intel_connector = intel_dp->attached_connector;
1430         int lane_count, clock;
1431         int min_lane_count = 1;
1432         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1433         /* Conveniently, the link BW constants become indices with a shift...*/
1434         int min_clock = 0;
1435         int max_clock;
1436         int bpp, mode_rate;
1437         int link_avail, link_clock;
1438         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1439         int common_len;
1440         uint8_t link_bw, rate_select;
1441
1442         common_len = intel_dp_common_rates(intel_dp, common_rates);
1443
1444         /* No common link rates between source and sink */
1445         WARN_ON(common_len <= 0);
1446
1447         max_clock = common_len - 1;
1448
1449         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1450                 pipe_config->has_pch_encoder = true;
1451
1452         pipe_config->has_dp_encoder = true;
1453         pipe_config->has_drrs = false;
1454         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1455
1456         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1457                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1458                                        adjusted_mode);
1459
1460                 if (INTEL_INFO(dev)->gen >= 9) {
1461                         int ret;
1462                         ret = skl_update_scaler_crtc(pipe_config);
1463                         if (ret)
1464                                 return ret;
1465                 }
1466
1467                 if (HAS_GMCH_DISPLAY(dev))
1468                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1469                                                  intel_connector->panel.fitting_mode);
1470                 else
1471                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1472                                                 intel_connector->panel.fitting_mode);
1473         }
1474
1475         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1476                 return false;
1477
1478         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1479                       "max bw %d pixel clock %iKHz\n",
1480                       max_lane_count, common_rates[max_clock],
1481                       adjusted_mode->crtc_clock);
1482
1483         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1484          * bpc in between. */
1485         bpp = pipe_config->pipe_bpp;
1486         if (is_edp(intel_dp)) {
1487
1488                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1489                 if (intel_connector->base.display_info.bpc == 0 &&
1490                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1491                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1492                                       dev_priv->vbt.edp.bpp);
1493                         bpp = dev_priv->vbt.edp.bpp;
1494                 }
1495
1496                 /*
1497                  * Use the maximum clock and number of lanes the eDP panel
1498                  * advertizes being capable of. The panels are generally
1499                  * designed to support only a single clock and lane
1500                  * configuration, and typically these values correspond to the
1501                  * native resolution of the panel.
1502                  */
1503                 min_lane_count = max_lane_count;
1504                 min_clock = max_clock;
1505         }
1506
1507         for (; bpp >= 6*3; bpp -= 2*3) {
1508                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1509                                                    bpp);
1510
1511                 for (clock = min_clock; clock <= max_clock; clock++) {
1512                         for (lane_count = min_lane_count;
1513                                 lane_count <= max_lane_count;
1514                                 lane_count <<= 1) {
1515
1516                                 link_clock = common_rates[clock];
1517                                 link_avail = intel_dp_max_data_rate(link_clock,
1518                                                                     lane_count);
1519
1520                                 if (mode_rate <= link_avail) {
1521                                         goto found;
1522                                 }
1523                         }
1524                 }
1525         }
1526
1527         return false;
1528
1529 found:
1530         if (intel_dp->color_range_auto) {
1531                 /*
1532                  * See:
1533                  * CEA-861-E - 5.1 Default Encoding Parameters
1534                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1535                  */
1536                 pipe_config->limited_color_range =
1537                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1538         } else {
1539                 pipe_config->limited_color_range =
1540                         intel_dp->limited_color_range;
1541         }
1542
1543         pipe_config->lane_count = lane_count;
1544
1545         pipe_config->pipe_bpp = bpp;
1546         pipe_config->port_clock = common_rates[clock];
1547
1548         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1549                               &link_bw, &rate_select);
1550
1551         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1552                       link_bw, rate_select, pipe_config->lane_count,
1553                       pipe_config->port_clock, bpp);
1554         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1555                       mode_rate, link_avail);
1556
1557         intel_link_compute_m_n(bpp, lane_count,
1558                                adjusted_mode->crtc_clock,
1559                                pipe_config->port_clock,
1560                                &pipe_config->dp_m_n);
1561
1562         if (intel_connector->panel.downclock_mode != NULL &&
1563                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1564                         pipe_config->has_drrs = true;
1565                         intel_link_compute_m_n(bpp, lane_count,
1566                                 intel_connector->panel.downclock_mode->clock,
1567                                 pipe_config->port_clock,
1568                                 &pipe_config->dp_m2_n2);
1569         }
1570
1571         /*
1572          * DPLL0 VCO may need to be adjusted to get the correct
1573          * clock for eDP. This will affect cdclk as well.
1574          */
1575         if (is_edp(intel_dp) &&
1576             (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1577                 int vco;
1578
1579                 switch (pipe_config->port_clock / 2) {
1580                 case 108000:
1581                 case 216000:
1582                         vco = 8640000;
1583                         break;
1584                 default:
1585                         vco = 8100000;
1586                         break;
1587                 }
1588
1589                 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1590         }
1591
1592         if (!HAS_DDI(dev))
1593                 intel_dp_set_clock(encoder, pipe_config);
1594
1595         return true;
1596 }
1597
1598 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1599                               const struct intel_crtc_state *pipe_config)
1600 {
1601         intel_dp->link_rate = pipe_config->port_clock;
1602         intel_dp->lane_count = pipe_config->lane_count;
1603 }
1604
1605 static void intel_dp_prepare(struct intel_encoder *encoder)
1606 {
1607         struct drm_device *dev = encoder->base.dev;
1608         struct drm_i915_private *dev_priv = dev->dev_private;
1609         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1610         enum port port = dp_to_dig_port(intel_dp)->port;
1611         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1612         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1613
1614         intel_dp_set_link_params(intel_dp, crtc->config);
1615
1616         /*
1617          * There are four kinds of DP registers:
1618          *
1619          *      IBX PCH
1620          *      SNB CPU
1621          *      IVB CPU
1622          *      CPT PCH
1623          *
1624          * IBX PCH and CPU are the same for almost everything,
1625          * except that the CPU DP PLL is configured in this
1626          * register
1627          *
1628          * CPT PCH is quite different, having many bits moved
1629          * to the TRANS_DP_CTL register instead. That
1630          * configuration happens (oddly) in ironlake_pch_enable
1631          */
1632
1633         /* Preserve the BIOS-computed detected bit. This is
1634          * supposed to be read-only.
1635          */
1636         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1637
1638         /* Handle DP bits in common between all three register formats */
1639         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1640         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1641
1642         /* Split out the IBX/CPU vs CPT settings */
1643
1644         if (IS_GEN7(dev) && port == PORT_A) {
1645                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1646                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1647                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1648                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1649                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1650
1651                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1652                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1653
1654                 intel_dp->DP |= crtc->pipe << 29;
1655         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1656                 u32 trans_dp;
1657
1658                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1659
1660                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1661                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1662                         trans_dp |= TRANS_DP_ENH_FRAMING;
1663                 else
1664                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1665                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1666         } else {
1667                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1668                     !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1669                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1670
1671                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1672                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1673                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1674                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1675                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1676
1677                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1678                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1679
1680                 if (IS_CHERRYVIEW(dev))
1681                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1682                 else if (crtc->pipe == PIPE_B)
1683                         intel_dp->DP |= DP_PIPEB_SELECT;
1684         }
1685 }
1686
1687 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1688 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1689
1690 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1691 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1692
1693 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1694 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1695
1696 static void wait_panel_status(struct intel_dp *intel_dp,
1697                                        u32 mask,
1698                                        u32 value)
1699 {
1700         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1701         struct drm_i915_private *dev_priv = dev->dev_private;
1702         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1703
1704         lockdep_assert_held(&dev_priv->pps_mutex);
1705
1706         pp_stat_reg = _pp_stat_reg(intel_dp);
1707         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1708
1709         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1710                         mask, value,
1711                         I915_READ(pp_stat_reg),
1712                         I915_READ(pp_ctrl_reg));
1713
1714         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1715                       5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
1716                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1717                                 I915_READ(pp_stat_reg),
1718                                 I915_READ(pp_ctrl_reg));
1719
1720         DRM_DEBUG_KMS("Wait complete\n");
1721 }
1722
1723 static void wait_panel_on(struct intel_dp *intel_dp)
1724 {
1725         DRM_DEBUG_KMS("Wait for panel power on\n");
1726         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1727 }
1728
1729 static void wait_panel_off(struct intel_dp *intel_dp)
1730 {
1731         DRM_DEBUG_KMS("Wait for panel power off time\n");
1732         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1733 }
1734
1735 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1736 {
1737         ktime_t panel_power_on_time;
1738         s64 panel_power_off_duration;
1739
1740         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1741
1742         /* take the difference of currrent time and panel power off time
1743          * and then make panel wait for t11_t12 if needed. */
1744         panel_power_on_time = ktime_get_boottime();
1745         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1746
1747         /* When we disable the VDD override bit last we have to do the manual
1748          * wait. */
1749         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1750                 wait_remaining_ms_from_jiffies(jiffies,
1751                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1752
1753         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1754 }
1755
1756 static void wait_backlight_on(struct intel_dp *intel_dp)
1757 {
1758         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1759                                        intel_dp->backlight_on_delay);
1760 }
1761
1762 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1763 {
1764         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1765                                        intel_dp->backlight_off_delay);
1766 }
1767
1768 /* Read the current pp_control value, unlocking the register if it
1769  * is locked
1770  */
1771
1772 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1773 {
1774         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1775         struct drm_i915_private *dev_priv = dev->dev_private;
1776         u32 control;
1777
1778         lockdep_assert_held(&dev_priv->pps_mutex);
1779
1780         control = I915_READ(_pp_ctrl_reg(intel_dp));
1781         if (!IS_BROXTON(dev)) {
1782                 control &= ~PANEL_UNLOCK_MASK;
1783                 control |= PANEL_UNLOCK_REGS;
1784         }
1785         return control;
1786 }
1787
1788 /*
1789  * Must be paired with edp_panel_vdd_off().
1790  * Must hold pps_mutex around the whole on/off sequence.
1791  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1792  */
1793 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1794 {
1795         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1796         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1797         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1798         struct drm_i915_private *dev_priv = dev->dev_private;
1799         enum intel_display_power_domain power_domain;
1800         u32 pp;
1801         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1802         bool need_to_disable = !intel_dp->want_panel_vdd;
1803
1804         lockdep_assert_held(&dev_priv->pps_mutex);
1805
1806         if (!is_edp(intel_dp))
1807                 return false;
1808
1809         cancel_delayed_work(&intel_dp->panel_vdd_work);
1810         intel_dp->want_panel_vdd = true;
1811
1812         if (edp_have_panel_vdd(intel_dp))
1813                 return need_to_disable;
1814
1815         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1816         intel_display_power_get(dev_priv, power_domain);
1817
1818         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1819                       port_name(intel_dig_port->port));
1820
1821         if (!edp_have_panel_power(intel_dp))
1822                 wait_panel_power_cycle(intel_dp);
1823
1824         pp = ironlake_get_pp_control(intel_dp);
1825         pp |= EDP_FORCE_VDD;
1826
1827         pp_stat_reg = _pp_stat_reg(intel_dp);
1828         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1829
1830         I915_WRITE(pp_ctrl_reg, pp);
1831         POSTING_READ(pp_ctrl_reg);
1832         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1833                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1834         /*
1835          * If the panel wasn't on, delay before accessing aux channel
1836          */
1837         if (!edp_have_panel_power(intel_dp)) {
1838                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1839                               port_name(intel_dig_port->port));
1840                 msleep(intel_dp->panel_power_up_delay);
1841         }
1842
1843         return need_to_disable;
1844 }
1845
1846 /*
1847  * Must be paired with intel_edp_panel_vdd_off() or
1848  * intel_edp_panel_off().
1849  * Nested calls to these functions are not allowed since
1850  * we drop the lock. Caller must use some higher level
1851  * locking to prevent nested calls from other threads.
1852  */
1853 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1854 {
1855         bool vdd;
1856
1857         if (!is_edp(intel_dp))
1858                 return;
1859
1860         pps_lock(intel_dp);
1861         vdd = edp_panel_vdd_on(intel_dp);
1862         pps_unlock(intel_dp);
1863
1864         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1865              port_name(dp_to_dig_port(intel_dp)->port));
1866 }
1867
1868 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1869 {
1870         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1871         struct drm_i915_private *dev_priv = dev->dev_private;
1872         struct intel_digital_port *intel_dig_port =
1873                 dp_to_dig_port(intel_dp);
1874         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1875         enum intel_display_power_domain power_domain;
1876         u32 pp;
1877         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1878
1879         lockdep_assert_held(&dev_priv->pps_mutex);
1880
1881         WARN_ON(intel_dp->want_panel_vdd);
1882
1883         if (!edp_have_panel_vdd(intel_dp))
1884                 return;
1885
1886         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1887                       port_name(intel_dig_port->port));
1888
1889         pp = ironlake_get_pp_control(intel_dp);
1890         pp &= ~EDP_FORCE_VDD;
1891
1892         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1893         pp_stat_reg = _pp_stat_reg(intel_dp);
1894
1895         I915_WRITE(pp_ctrl_reg, pp);
1896         POSTING_READ(pp_ctrl_reg);
1897
1898         /* Make sure sequencer is idle before allowing subsequent activity */
1899         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1900         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1901
1902         if ((pp & POWER_TARGET_ON) == 0)
1903                 intel_dp->panel_power_off_time = ktime_get_boottime();
1904
1905         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1906         intel_display_power_put(dev_priv, power_domain);
1907 }
1908
1909 static void edp_panel_vdd_work(struct work_struct *__work)
1910 {
1911         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1912                                                  struct intel_dp, panel_vdd_work);
1913
1914         pps_lock(intel_dp);
1915         if (!intel_dp->want_panel_vdd)
1916                 edp_panel_vdd_off_sync(intel_dp);
1917         pps_unlock(intel_dp);
1918 }
1919
1920 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1921 {
1922         unsigned long delay;
1923
1924         /*
1925          * Queue the timer to fire a long time from now (relative to the power
1926          * down delay) to keep the panel power up across a sequence of
1927          * operations.
1928          */
1929         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1930         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1931 }
1932
1933 /*
1934  * Must be paired with edp_panel_vdd_on().
1935  * Must hold pps_mutex around the whole on/off sequence.
1936  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1937  */
1938 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1939 {
1940         struct drm_i915_private *dev_priv =
1941                 intel_dp_to_dev(intel_dp)->dev_private;
1942
1943         lockdep_assert_held(&dev_priv->pps_mutex);
1944
1945         if (!is_edp(intel_dp))
1946                 return;
1947
1948         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1949              port_name(dp_to_dig_port(intel_dp)->port));
1950
1951         intel_dp->want_panel_vdd = false;
1952
1953         if (sync)
1954                 edp_panel_vdd_off_sync(intel_dp);
1955         else
1956                 edp_panel_vdd_schedule_off(intel_dp);
1957 }
1958
1959 static void edp_panel_on(struct intel_dp *intel_dp)
1960 {
1961         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1962         struct drm_i915_private *dev_priv = dev->dev_private;
1963         u32 pp;
1964         i915_reg_t pp_ctrl_reg;
1965
1966         lockdep_assert_held(&dev_priv->pps_mutex);
1967
1968         if (!is_edp(intel_dp))
1969                 return;
1970
1971         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1972                       port_name(dp_to_dig_port(intel_dp)->port));
1973
1974         if (WARN(edp_have_panel_power(intel_dp),
1975                  "eDP port %c panel power already on\n",
1976                  port_name(dp_to_dig_port(intel_dp)->port)))
1977                 return;
1978
1979         wait_panel_power_cycle(intel_dp);
1980
1981         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1982         pp = ironlake_get_pp_control(intel_dp);
1983         if (IS_GEN5(dev)) {
1984                 /* ILK workaround: disable reset around power sequence */
1985                 pp &= ~PANEL_POWER_RESET;
1986                 I915_WRITE(pp_ctrl_reg, pp);
1987                 POSTING_READ(pp_ctrl_reg);
1988         }
1989
1990         pp |= POWER_TARGET_ON;
1991         if (!IS_GEN5(dev))
1992                 pp |= PANEL_POWER_RESET;
1993
1994         I915_WRITE(pp_ctrl_reg, pp);
1995         POSTING_READ(pp_ctrl_reg);
1996
1997         wait_panel_on(intel_dp);
1998         intel_dp->last_power_on = jiffies;
1999
2000         if (IS_GEN5(dev)) {
2001                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2002                 I915_WRITE(pp_ctrl_reg, pp);
2003                 POSTING_READ(pp_ctrl_reg);
2004         }
2005 }
2006
2007 void intel_edp_panel_on(struct intel_dp *intel_dp)
2008 {
2009         if (!is_edp(intel_dp))
2010                 return;
2011
2012         pps_lock(intel_dp);
2013         edp_panel_on(intel_dp);
2014         pps_unlock(intel_dp);
2015 }
2016
2017
2018 static void edp_panel_off(struct intel_dp *intel_dp)
2019 {
2020         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2021         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2022         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2023         struct drm_i915_private *dev_priv = dev->dev_private;
2024         enum intel_display_power_domain power_domain;
2025         u32 pp;
2026         i915_reg_t pp_ctrl_reg;
2027
2028         lockdep_assert_held(&dev_priv->pps_mutex);
2029
2030         if (!is_edp(intel_dp))
2031                 return;
2032
2033         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2034                       port_name(dp_to_dig_port(intel_dp)->port));
2035
2036         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2037              port_name(dp_to_dig_port(intel_dp)->port));
2038
2039         pp = ironlake_get_pp_control(intel_dp);
2040         /* We need to switch off panel power _and_ force vdd, for otherwise some
2041          * panels get very unhappy and cease to work. */
2042         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2043                 EDP_BLC_ENABLE);
2044
2045         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2046
2047         intel_dp->want_panel_vdd = false;
2048
2049         I915_WRITE(pp_ctrl_reg, pp);
2050         POSTING_READ(pp_ctrl_reg);
2051
2052         intel_dp->panel_power_off_time = ktime_get_boottime();
2053         wait_panel_off(intel_dp);
2054
2055         /* We got a reference when we enabled the VDD. */
2056         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2057         intel_display_power_put(dev_priv, power_domain);
2058 }
2059
2060 void intel_edp_panel_off(struct intel_dp *intel_dp)
2061 {
2062         if (!is_edp(intel_dp))
2063                 return;
2064
2065         pps_lock(intel_dp);
2066         edp_panel_off(intel_dp);
2067         pps_unlock(intel_dp);
2068 }
2069
2070 /* Enable backlight in the panel power control. */
2071 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2072 {
2073         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2074         struct drm_device *dev = intel_dig_port->base.base.dev;
2075         struct drm_i915_private *dev_priv = dev->dev_private;
2076         u32 pp;
2077         i915_reg_t pp_ctrl_reg;
2078
2079         /*
2080          * If we enable the backlight right away following a panel power
2081          * on, we may see slight flicker as the panel syncs with the eDP
2082          * link.  So delay a bit to make sure the image is solid before
2083          * allowing it to appear.
2084          */
2085         wait_backlight_on(intel_dp);
2086
2087         pps_lock(intel_dp);
2088
2089         pp = ironlake_get_pp_control(intel_dp);
2090         pp |= EDP_BLC_ENABLE;
2091
2092         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2093
2094         I915_WRITE(pp_ctrl_reg, pp);
2095         POSTING_READ(pp_ctrl_reg);
2096
2097         pps_unlock(intel_dp);
2098 }
2099
2100 /* Enable backlight PWM and backlight PP control. */
2101 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2102 {
2103         if (!is_edp(intel_dp))
2104                 return;
2105
2106         DRM_DEBUG_KMS("\n");
2107
2108         intel_panel_enable_backlight(intel_dp->attached_connector);
2109         _intel_edp_backlight_on(intel_dp);
2110 }
2111
2112 /* Disable backlight in the panel power control. */
2113 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2114 {
2115         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2116         struct drm_i915_private *dev_priv = dev->dev_private;
2117         u32 pp;
2118         i915_reg_t pp_ctrl_reg;
2119
2120         if (!is_edp(intel_dp))
2121                 return;
2122
2123         pps_lock(intel_dp);
2124
2125         pp = ironlake_get_pp_control(intel_dp);
2126         pp &= ~EDP_BLC_ENABLE;
2127
2128         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2129
2130         I915_WRITE(pp_ctrl_reg, pp);
2131         POSTING_READ(pp_ctrl_reg);
2132
2133         pps_unlock(intel_dp);
2134
2135         intel_dp->last_backlight_off = jiffies;
2136         edp_wait_backlight_off(intel_dp);
2137 }
2138
2139 /* Disable backlight PP control and backlight PWM. */
2140 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2141 {
2142         if (!is_edp(intel_dp))
2143                 return;
2144
2145         DRM_DEBUG_KMS("\n");
2146
2147         _intel_edp_backlight_off(intel_dp);
2148         intel_panel_disable_backlight(intel_dp->attached_connector);
2149 }
2150
2151 /*
2152  * Hook for controlling the panel power control backlight through the bl_power
2153  * sysfs attribute. Take care to handle multiple calls.
2154  */
2155 static void intel_edp_backlight_power(struct intel_connector *connector,
2156                                       bool enable)
2157 {
2158         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2159         bool is_enabled;
2160
2161         pps_lock(intel_dp);
2162         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2163         pps_unlock(intel_dp);
2164
2165         if (is_enabled == enable)
2166                 return;
2167
2168         DRM_DEBUG_KMS("panel power control backlight %s\n",
2169                       enable ? "enable" : "disable");
2170
2171         if (enable)
2172                 _intel_edp_backlight_on(intel_dp);
2173         else
2174                 _intel_edp_backlight_off(intel_dp);
2175 }
2176
2177 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2178 {
2179         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2180         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2181         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2182
2183         I915_STATE_WARN(cur_state != state,
2184                         "DP port %c state assertion failure (expected %s, current %s)\n",
2185                         port_name(dig_port->port),
2186                         onoff(state), onoff(cur_state));
2187 }
2188 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2189
2190 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2191 {
2192         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2193
2194         I915_STATE_WARN(cur_state != state,
2195                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2196                         onoff(state), onoff(cur_state));
2197 }
2198 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2199 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2200
2201 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2202 {
2203         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2205         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2206
2207         assert_pipe_disabled(dev_priv, crtc->pipe);
2208         assert_dp_port_disabled(intel_dp);
2209         assert_edp_pll_disabled(dev_priv);
2210
2211         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2212                       crtc->config->port_clock);
2213
2214         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2215
2216         if (crtc->config->port_clock == 162000)
2217                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2218         else
2219                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2220
2221         I915_WRITE(DP_A, intel_dp->DP);
2222         POSTING_READ(DP_A);
2223         udelay(500);
2224
2225         /*
2226          * [DevILK] Work around required when enabling DP PLL
2227          * while a pipe is enabled going to FDI:
2228          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2229          * 2. Program DP PLL enable
2230          */
2231         if (IS_GEN5(dev_priv))
2232                 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
2233
2234         intel_dp->DP |= DP_PLL_ENABLE;
2235
2236         I915_WRITE(DP_A, intel_dp->DP);
2237         POSTING_READ(DP_A);
2238         udelay(200);
2239 }
2240
2241 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2242 {
2243         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2244         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2245         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2246
2247         assert_pipe_disabled(dev_priv, crtc->pipe);
2248         assert_dp_port_disabled(intel_dp);
2249         assert_edp_pll_enabled(dev_priv);
2250
2251         DRM_DEBUG_KMS("disabling eDP PLL\n");
2252
2253         intel_dp->DP &= ~DP_PLL_ENABLE;
2254
2255         I915_WRITE(DP_A, intel_dp->DP);
2256         POSTING_READ(DP_A);
2257         udelay(200);
2258 }
2259
2260 /* If the sink supports it, try to set the power state appropriately */
2261 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2262 {
2263         int ret, i;
2264
2265         /* Should have a valid DPCD by this point */
2266         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2267                 return;
2268
2269         if (mode != DRM_MODE_DPMS_ON) {
2270                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2271                                          DP_SET_POWER_D3);
2272         } else {
2273                 /*
2274                  * When turning on, we need to retry for 1ms to give the sink
2275                  * time to wake up.
2276                  */
2277                 for (i = 0; i < 3; i++) {
2278                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2279                                                  DP_SET_POWER_D0);
2280                         if (ret == 1)
2281                                 break;
2282                         msleep(1);
2283                 }
2284         }
2285
2286         if (ret != 1)
2287                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2288                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2289 }
2290
2291 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2292                                   enum pipe *pipe)
2293 {
2294         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2295         enum port port = dp_to_dig_port(intel_dp)->port;
2296         struct drm_device *dev = encoder->base.dev;
2297         struct drm_i915_private *dev_priv = dev->dev_private;
2298         enum intel_display_power_domain power_domain;
2299         u32 tmp;
2300         bool ret;
2301
2302         power_domain = intel_display_port_power_domain(encoder);
2303         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2304                 return false;
2305
2306         ret = false;
2307
2308         tmp = I915_READ(intel_dp->output_reg);
2309
2310         if (!(tmp & DP_PORT_EN))
2311                 goto out;
2312
2313         if (IS_GEN7(dev) && port == PORT_A) {
2314                 *pipe = PORT_TO_PIPE_CPT(tmp);
2315         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2316                 enum pipe p;
2317
2318                 for_each_pipe(dev_priv, p) {
2319                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2320                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2321                                 *pipe = p;
2322                                 ret = true;
2323
2324                                 goto out;
2325                         }
2326                 }
2327
2328                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2329                               i915_mmio_reg_offset(intel_dp->output_reg));
2330         } else if (IS_CHERRYVIEW(dev)) {
2331                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2332         } else {
2333                 *pipe = PORT_TO_PIPE(tmp);
2334         }
2335
2336         ret = true;
2337
2338 out:
2339         intel_display_power_put(dev_priv, power_domain);
2340
2341         return ret;
2342 }
2343
2344 static void intel_dp_get_config(struct intel_encoder *encoder,
2345                                 struct intel_crtc_state *pipe_config)
2346 {
2347         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2348         u32 tmp, flags = 0;
2349         struct drm_device *dev = encoder->base.dev;
2350         struct drm_i915_private *dev_priv = dev->dev_private;
2351         enum port port = dp_to_dig_port(intel_dp)->port;
2352         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2353
2354         tmp = I915_READ(intel_dp->output_reg);
2355
2356         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2357
2358         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2359                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2360
2361                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2362                         flags |= DRM_MODE_FLAG_PHSYNC;
2363                 else
2364                         flags |= DRM_MODE_FLAG_NHSYNC;
2365
2366                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2367                         flags |= DRM_MODE_FLAG_PVSYNC;
2368                 else
2369                         flags |= DRM_MODE_FLAG_NVSYNC;
2370         } else {
2371                 if (tmp & DP_SYNC_HS_HIGH)
2372                         flags |= DRM_MODE_FLAG_PHSYNC;
2373                 else
2374                         flags |= DRM_MODE_FLAG_NHSYNC;
2375
2376                 if (tmp & DP_SYNC_VS_HIGH)
2377                         flags |= DRM_MODE_FLAG_PVSYNC;
2378                 else
2379                         flags |= DRM_MODE_FLAG_NVSYNC;
2380         }
2381
2382         pipe_config->base.adjusted_mode.flags |= flags;
2383
2384         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2385             !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2386                 pipe_config->limited_color_range = true;
2387
2388         pipe_config->has_dp_encoder = true;
2389
2390         pipe_config->lane_count =
2391                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2392
2393         intel_dp_get_m_n(crtc, pipe_config);
2394
2395         if (port == PORT_A) {
2396                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2397                         pipe_config->port_clock = 162000;
2398                 else
2399                         pipe_config->port_clock = 270000;
2400         }
2401
2402         pipe_config->base.adjusted_mode.crtc_clock =
2403                 intel_dotclock_calculate(pipe_config->port_clock,
2404                                          &pipe_config->dp_m_n);
2405
2406         if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2407             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2408                 /*
2409                  * This is a big fat ugly hack.
2410                  *
2411                  * Some machines in UEFI boot mode provide us a VBT that has 18
2412                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2413                  * unknown we fail to light up. Yet the same BIOS boots up with
2414                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2415                  * max, not what it tells us to use.
2416                  *
2417                  * Note: This will still be broken if the eDP panel is not lit
2418                  * up by the BIOS, and thus we can't get the mode at module
2419                  * load.
2420                  */
2421                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2422                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2423                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2424         }
2425 }
2426
2427 static void intel_disable_dp(struct intel_encoder *encoder)
2428 {
2429         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2430         struct drm_device *dev = encoder->base.dev;
2431         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2432
2433         if (crtc->config->has_audio)
2434                 intel_audio_codec_disable(encoder);
2435
2436         if (HAS_PSR(dev) && !HAS_DDI(dev))
2437                 intel_psr_disable(intel_dp);
2438
2439         /* Make sure the panel is off before trying to change the mode. But also
2440          * ensure that we have vdd while we switch off the panel. */
2441         intel_edp_panel_vdd_on(intel_dp);
2442         intel_edp_backlight_off(intel_dp);
2443         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2444         intel_edp_panel_off(intel_dp);
2445
2446         /* disable the port before the pipe on g4x */
2447         if (INTEL_INFO(dev)->gen < 5)
2448                 intel_dp_link_down(intel_dp);
2449 }
2450
2451 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2452 {
2453         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454         enum port port = dp_to_dig_port(intel_dp)->port;
2455
2456         intel_dp_link_down(intel_dp);
2457
2458         /* Only ilk+ has port A */
2459         if (port == PORT_A)
2460                 ironlake_edp_pll_off(intel_dp);
2461 }
2462
2463 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2464 {
2465         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2466
2467         intel_dp_link_down(intel_dp);
2468 }
2469
2470 static void chv_post_disable_dp(struct intel_encoder *encoder)
2471 {
2472         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473         struct drm_device *dev = encoder->base.dev;
2474         struct drm_i915_private *dev_priv = dev->dev_private;
2475
2476         intel_dp_link_down(intel_dp);
2477
2478         mutex_lock(&dev_priv->sb_lock);
2479
2480         /* Assert data lane reset */
2481         chv_data_lane_soft_reset(encoder, true);
2482
2483         mutex_unlock(&dev_priv->sb_lock);
2484 }
2485
2486 static void
2487 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2488                          uint32_t *DP,
2489                          uint8_t dp_train_pat)
2490 {
2491         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2492         struct drm_device *dev = intel_dig_port->base.base.dev;
2493         struct drm_i915_private *dev_priv = dev->dev_private;
2494         enum port port = intel_dig_port->port;
2495
2496         if (HAS_DDI(dev)) {
2497                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2498
2499                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2500                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2501                 else
2502                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2503
2504                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2506                 case DP_TRAINING_PATTERN_DISABLE:
2507                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2508
2509                         break;
2510                 case DP_TRAINING_PATTERN_1:
2511                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2512                         break;
2513                 case DP_TRAINING_PATTERN_2:
2514                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2515                         break;
2516                 case DP_TRAINING_PATTERN_3:
2517                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2518                         break;
2519                 }
2520                 I915_WRITE(DP_TP_CTL(port), temp);
2521
2522         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2523                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2524                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2525
2526                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2527                 case DP_TRAINING_PATTERN_DISABLE:
2528                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2529                         break;
2530                 case DP_TRAINING_PATTERN_1:
2531                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2532                         break;
2533                 case DP_TRAINING_PATTERN_2:
2534                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2535                         break;
2536                 case DP_TRAINING_PATTERN_3:
2537                         DRM_ERROR("DP training pattern 3 not supported\n");
2538                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2539                         break;
2540                 }
2541
2542         } else {
2543                 if (IS_CHERRYVIEW(dev))
2544                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2545                 else
2546                         *DP &= ~DP_LINK_TRAIN_MASK;
2547
2548                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2549                 case DP_TRAINING_PATTERN_DISABLE:
2550                         *DP |= DP_LINK_TRAIN_OFF;
2551                         break;
2552                 case DP_TRAINING_PATTERN_1:
2553                         *DP |= DP_LINK_TRAIN_PAT_1;
2554                         break;
2555                 case DP_TRAINING_PATTERN_2:
2556                         *DP |= DP_LINK_TRAIN_PAT_2;
2557                         break;
2558                 case DP_TRAINING_PATTERN_3:
2559                         if (IS_CHERRYVIEW(dev)) {
2560                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2561                         } else {
2562                                 DRM_ERROR("DP training pattern 3 not supported\n");
2563                                 *DP |= DP_LINK_TRAIN_PAT_2;
2564                         }
2565                         break;
2566                 }
2567         }
2568 }
2569
2570 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2571 {
2572         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573         struct drm_i915_private *dev_priv = dev->dev_private;
2574         struct intel_crtc *crtc =
2575                 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2576
2577         /* enable with pattern 1 (as per spec) */
2578         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2579                                  DP_TRAINING_PATTERN_1);
2580
2581         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2582         POSTING_READ(intel_dp->output_reg);
2583
2584         /*
2585          * Magic for VLV/CHV. We _must_ first set up the register
2586          * without actually enabling the port, and then do another
2587          * write to enable the port. Otherwise link training will
2588          * fail when the power sequencer is freshly used for this port.
2589          */
2590         intel_dp->DP |= DP_PORT_EN;
2591         if (crtc->config->has_audio)
2592                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2593
2594         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2595         POSTING_READ(intel_dp->output_reg);
2596 }
2597
2598 static void intel_enable_dp(struct intel_encoder *encoder)
2599 {
2600         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2601         struct drm_device *dev = encoder->base.dev;
2602         struct drm_i915_private *dev_priv = dev->dev_private;
2603         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2604         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2605         enum pipe pipe = crtc->pipe;
2606
2607         if (WARN_ON(dp_reg & DP_PORT_EN))
2608                 return;
2609
2610         pps_lock(intel_dp);
2611
2612         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2613                 vlv_init_panel_power_sequencer(intel_dp);
2614
2615         intel_dp_enable_port(intel_dp);
2616
2617         edp_panel_vdd_on(intel_dp);
2618         edp_panel_on(intel_dp);
2619         edp_panel_vdd_off(intel_dp, true);
2620
2621         pps_unlock(intel_dp);
2622
2623         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2624                 unsigned int lane_mask = 0x0;
2625
2626                 if (IS_CHERRYVIEW(dev))
2627                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2628
2629                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2630                                     lane_mask);
2631         }
2632
2633         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2634         intel_dp_start_link_train(intel_dp);
2635         intel_dp_stop_link_train(intel_dp);
2636
2637         if (crtc->config->has_audio) {
2638                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2639                                  pipe_name(pipe));
2640                 intel_audio_codec_enable(encoder);
2641         }
2642 }
2643
2644 static void g4x_enable_dp(struct intel_encoder *encoder)
2645 {
2646         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2647
2648         intel_enable_dp(encoder);
2649         intel_edp_backlight_on(intel_dp);
2650 }
2651
2652 static void vlv_enable_dp(struct intel_encoder *encoder)
2653 {
2654         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655
2656         intel_edp_backlight_on(intel_dp);
2657         intel_psr_enable(intel_dp);
2658 }
2659
2660 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2661 {
2662         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2663         enum port port = dp_to_dig_port(intel_dp)->port;
2664
2665         intel_dp_prepare(encoder);
2666
2667         /* Only ilk+ has port A */
2668         if (port == PORT_A)
2669                 ironlake_edp_pll_on(intel_dp);
2670 }
2671
2672 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2673 {
2674         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2675         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2676         enum pipe pipe = intel_dp->pps_pipe;
2677         i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2678
2679         edp_panel_vdd_off_sync(intel_dp);
2680
2681         /*
2682          * VLV seems to get confused when multiple power seqeuencers
2683          * have the same port selected (even if only one has power/vdd
2684          * enabled). The failure manifests as vlv_wait_port_ready() failing
2685          * CHV on the other hand doesn't seem to mind having the same port
2686          * selected in multiple power seqeuencers, but let's clear the
2687          * port select always when logically disconnecting a power sequencer
2688          * from a port.
2689          */
2690         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2691                       pipe_name(pipe), port_name(intel_dig_port->port));
2692         I915_WRITE(pp_on_reg, 0);
2693         POSTING_READ(pp_on_reg);
2694
2695         intel_dp->pps_pipe = INVALID_PIPE;
2696 }
2697
2698 static void vlv_steal_power_sequencer(struct drm_device *dev,
2699                                       enum pipe pipe)
2700 {
2701         struct drm_i915_private *dev_priv = dev->dev_private;
2702         struct intel_encoder *encoder;
2703
2704         lockdep_assert_held(&dev_priv->pps_mutex);
2705
2706         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2707                 return;
2708
2709         for_each_intel_encoder(dev, encoder) {
2710                 struct intel_dp *intel_dp;
2711                 enum port port;
2712
2713                 if (encoder->type != INTEL_OUTPUT_EDP)
2714                         continue;
2715
2716                 intel_dp = enc_to_intel_dp(&encoder->base);
2717                 port = dp_to_dig_port(intel_dp)->port;
2718
2719                 if (intel_dp->pps_pipe != pipe)
2720                         continue;
2721
2722                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2723                               pipe_name(pipe), port_name(port));
2724
2725                 WARN(encoder->base.crtc,
2726                      "stealing pipe %c power sequencer from active eDP port %c\n",
2727                      pipe_name(pipe), port_name(port));
2728
2729                 /* make sure vdd is off before we steal it */
2730                 vlv_detach_power_sequencer(intel_dp);
2731         }
2732 }
2733
2734 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2735 {
2736         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2737         struct intel_encoder *encoder = &intel_dig_port->base;
2738         struct drm_device *dev = encoder->base.dev;
2739         struct drm_i915_private *dev_priv = dev->dev_private;
2740         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2741
2742         lockdep_assert_held(&dev_priv->pps_mutex);
2743
2744         if (!is_edp(intel_dp))
2745                 return;
2746
2747         if (intel_dp->pps_pipe == crtc->pipe)
2748                 return;
2749
2750         /*
2751          * If another power sequencer was being used on this
2752          * port previously make sure to turn off vdd there while
2753          * we still have control of it.
2754          */
2755         if (intel_dp->pps_pipe != INVALID_PIPE)
2756                 vlv_detach_power_sequencer(intel_dp);
2757
2758         /*
2759          * We may be stealing the power
2760          * sequencer from another port.
2761          */
2762         vlv_steal_power_sequencer(dev, crtc->pipe);
2763
2764         /* now it's all ours */
2765         intel_dp->pps_pipe = crtc->pipe;
2766
2767         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2768                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2769
2770         /* init power sequencer on this pipe and port */
2771         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2772         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2773 }
2774
2775 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2776 {
2777         vlv_phy_pre_encoder_enable(encoder);
2778
2779         intel_enable_dp(encoder);
2780 }
2781
2782 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2783 {
2784         intel_dp_prepare(encoder);
2785
2786         vlv_phy_pre_pll_enable(encoder);
2787 }
2788
2789 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2790 {
2791         chv_phy_pre_encoder_enable(encoder);
2792
2793         intel_enable_dp(encoder);
2794
2795         /* Second common lane will stay alive on its own now */
2796         chv_phy_release_cl2_override(encoder);
2797 }
2798
2799 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2800 {
2801         intel_dp_prepare(encoder);
2802
2803         chv_phy_pre_pll_enable(encoder);
2804 }
2805
2806 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2807 {
2808         chv_phy_post_pll_disable(encoder);
2809 }
2810
2811 /*
2812  * Fetch AUX CH registers 0x202 - 0x207 which contain
2813  * link status information
2814  */
2815 bool
2816 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2817 {
2818         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
2819                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2820 }
2821
2822 /* These are source-specific values. */
2823 uint8_t
2824 intel_dp_voltage_max(struct intel_dp *intel_dp)
2825 {
2826         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2827         struct drm_i915_private *dev_priv = dev->dev_private;
2828         enum port port = dp_to_dig_port(intel_dp)->port;
2829
2830         if (IS_BROXTON(dev))
2831                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2832         else if (INTEL_INFO(dev)->gen >= 9) {
2833                 if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
2834                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2835                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2836         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2837                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2838         else if (IS_GEN7(dev) && port == PORT_A)
2839                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2840         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2841                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2842         else
2843                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2844 }
2845
2846 uint8_t
2847 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2848 {
2849         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2850         enum port port = dp_to_dig_port(intel_dp)->port;
2851
2852         if (INTEL_INFO(dev)->gen >= 9) {
2853                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2854                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2855                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2856                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2857                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2858                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2859                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2860                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2861                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2862                 default:
2863                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2864                 }
2865         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2866                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2867                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2868                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2869                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2870                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2871                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2872                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2873                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2874                 default:
2875                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2876                 }
2877         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2878                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2879                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2880                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2881                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2882                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2883                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2884                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2885                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2886                 default:
2887                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2888                 }
2889         } else if (IS_GEN7(dev) && port == PORT_A) {
2890                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2891                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2892                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2893                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2894                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2895                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2896                 default:
2897                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2898                 }
2899         } else {
2900                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2901                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2902                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2904                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2905                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2906                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2907                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2908                 default:
2909                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2910                 }
2911         }
2912 }
2913
2914 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
2915 {
2916         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2917         unsigned long demph_reg_value, preemph_reg_value,
2918                 uniqtranscale_reg_value;
2919         uint8_t train_set = intel_dp->train_set[0];
2920
2921         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2922         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2923                 preemph_reg_value = 0x0004000;
2924                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2925                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2926                         demph_reg_value = 0x2B405555;
2927                         uniqtranscale_reg_value = 0x552AB83A;
2928                         break;
2929                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2930                         demph_reg_value = 0x2B404040;
2931                         uniqtranscale_reg_value = 0x5548B83A;
2932                         break;
2933                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2934                         demph_reg_value = 0x2B245555;
2935                         uniqtranscale_reg_value = 0x5560B83A;
2936                         break;
2937                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2938                         demph_reg_value = 0x2B405555;
2939                         uniqtranscale_reg_value = 0x5598DA3A;
2940                         break;
2941                 default:
2942                         return 0;
2943                 }
2944                 break;
2945         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2946                 preemph_reg_value = 0x0002000;
2947                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2948                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2949                         demph_reg_value = 0x2B404040;
2950                         uniqtranscale_reg_value = 0x5552B83A;
2951                         break;
2952                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2953                         demph_reg_value = 0x2B404848;
2954                         uniqtranscale_reg_value = 0x5580B83A;
2955                         break;
2956                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2957                         demph_reg_value = 0x2B404040;
2958                         uniqtranscale_reg_value = 0x55ADDA3A;
2959                         break;
2960                 default:
2961                         return 0;
2962                 }
2963                 break;
2964         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2965                 preemph_reg_value = 0x0000000;
2966                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2967                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2968                         demph_reg_value = 0x2B305555;
2969                         uniqtranscale_reg_value = 0x5570B83A;
2970                         break;
2971                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2972                         demph_reg_value = 0x2B2B4040;
2973                         uniqtranscale_reg_value = 0x55ADDA3A;
2974                         break;
2975                 default:
2976                         return 0;
2977                 }
2978                 break;
2979         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2980                 preemph_reg_value = 0x0006000;
2981                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2982                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2983                         demph_reg_value = 0x1B405555;
2984                         uniqtranscale_reg_value = 0x55ADDA3A;
2985                         break;
2986                 default:
2987                         return 0;
2988                 }
2989                 break;
2990         default:
2991                 return 0;
2992         }
2993
2994         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
2995                                  uniqtranscale_reg_value, 0);
2996
2997         return 0;
2998 }
2999
3000 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3001 {
3002         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3003         u32 deemph_reg_value, margin_reg_value;
3004         bool uniq_trans_scale = false;
3005         uint8_t train_set = intel_dp->train_set[0];
3006
3007         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3008         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3009                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3010                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3011                         deemph_reg_value = 128;
3012                         margin_reg_value = 52;
3013                         break;
3014                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3015                         deemph_reg_value = 128;
3016                         margin_reg_value = 77;
3017                         break;
3018                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3019                         deemph_reg_value = 128;
3020                         margin_reg_value = 102;
3021                         break;
3022                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3023                         deemph_reg_value = 128;
3024                         margin_reg_value = 154;
3025                         uniq_trans_scale = true;
3026                         break;
3027                 default:
3028                         return 0;
3029                 }
3030                 break;
3031         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3032                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3033                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3034                         deemph_reg_value = 85;
3035                         margin_reg_value = 78;
3036                         break;
3037                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038                         deemph_reg_value = 85;
3039                         margin_reg_value = 116;
3040                         break;
3041                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3042                         deemph_reg_value = 85;
3043                         margin_reg_value = 154;
3044                         break;
3045                 default:
3046                         return 0;
3047                 }
3048                 break;
3049         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3050                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3051                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3052                         deemph_reg_value = 64;
3053                         margin_reg_value = 104;
3054                         break;
3055                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3056                         deemph_reg_value = 64;
3057                         margin_reg_value = 154;
3058                         break;
3059                 default:
3060                         return 0;
3061                 }
3062                 break;
3063         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3064                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3065                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3066                         deemph_reg_value = 43;
3067                         margin_reg_value = 154;
3068                         break;
3069                 default:
3070                         return 0;
3071                 }
3072                 break;
3073         default:
3074                 return 0;
3075         }
3076
3077         chv_set_phy_signal_level(encoder, deemph_reg_value,
3078                                  margin_reg_value, uniq_trans_scale);
3079
3080         return 0;
3081 }
3082
3083 static uint32_t
3084 gen4_signal_levels(uint8_t train_set)
3085 {
3086         uint32_t        signal_levels = 0;
3087
3088         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3089         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3090         default:
3091                 signal_levels |= DP_VOLTAGE_0_4;
3092                 break;
3093         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3094                 signal_levels |= DP_VOLTAGE_0_6;
3095                 break;
3096         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3097                 signal_levels |= DP_VOLTAGE_0_8;
3098                 break;
3099         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3100                 signal_levels |= DP_VOLTAGE_1_2;
3101                 break;
3102         }
3103         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3104         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3105         default:
3106                 signal_levels |= DP_PRE_EMPHASIS_0;
3107                 break;
3108         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3109                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3110                 break;
3111         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3112                 signal_levels |= DP_PRE_EMPHASIS_6;
3113                 break;
3114         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3115                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3116                 break;
3117         }
3118         return signal_levels;
3119 }
3120
3121 /* Gen6's DP voltage swing and pre-emphasis control */
3122 static uint32_t
3123 gen6_edp_signal_levels(uint8_t train_set)
3124 {
3125         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3126                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3127         switch (signal_levels) {
3128         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3129         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3130                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3131         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3132                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3133         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3134         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3135                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3136         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3137         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3138                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3139         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3140         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3141                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3142         default:
3143                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3144                               "0x%x\n", signal_levels);
3145                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3146         }
3147 }
3148
3149 /* Gen7's DP voltage swing and pre-emphasis control */
3150 static uint32_t
3151 gen7_edp_signal_levels(uint8_t train_set)
3152 {
3153         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3154                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3155         switch (signal_levels) {
3156         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3157                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3158         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3159                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3160         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3161                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3162
3163         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3164                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3165         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3166                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3167
3168         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3169                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3170         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3171                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3172
3173         default:
3174                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3175                               "0x%x\n", signal_levels);
3176                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3177         }
3178 }
3179
3180 void
3181 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3182 {
3183         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3184         enum port port = intel_dig_port->port;
3185         struct drm_device *dev = intel_dig_port->base.base.dev;
3186         struct drm_i915_private *dev_priv = to_i915(dev);
3187         uint32_t signal_levels, mask = 0;
3188         uint8_t train_set = intel_dp->train_set[0];
3189
3190         if (HAS_DDI(dev)) {
3191                 signal_levels = ddi_signal_levels(intel_dp);
3192
3193                 if (IS_BROXTON(dev))
3194                         signal_levels = 0;
3195                 else
3196                         mask = DDI_BUF_EMP_MASK;
3197         } else if (IS_CHERRYVIEW(dev)) {
3198                 signal_levels = chv_signal_levels(intel_dp);
3199         } else if (IS_VALLEYVIEW(dev)) {
3200                 signal_levels = vlv_signal_levels(intel_dp);
3201         } else if (IS_GEN7(dev) && port == PORT_A) {
3202                 signal_levels = gen7_edp_signal_levels(train_set);
3203                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3204         } else if (IS_GEN6(dev) && port == PORT_A) {
3205                 signal_levels = gen6_edp_signal_levels(train_set);
3206                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3207         } else {
3208                 signal_levels = gen4_signal_levels(train_set);
3209                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3210         }
3211
3212         if (mask)
3213                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3214
3215         DRM_DEBUG_KMS("Using vswing level %d\n",
3216                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3217         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3218                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3219                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3220
3221         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3222
3223         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3224         POSTING_READ(intel_dp->output_reg);
3225 }
3226
3227 void
3228 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3229                                        uint8_t dp_train_pat)
3230 {
3231         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3232         struct drm_i915_private *dev_priv =
3233                 to_i915(intel_dig_port->base.base.dev);
3234
3235         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3236
3237         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3238         POSTING_READ(intel_dp->output_reg);
3239 }
3240
3241 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3242 {
3243         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3244         struct drm_device *dev = intel_dig_port->base.base.dev;
3245         struct drm_i915_private *dev_priv = dev->dev_private;
3246         enum port port = intel_dig_port->port;
3247         uint32_t val;
3248
3249         if (!HAS_DDI(dev))
3250                 return;
3251
3252         val = I915_READ(DP_TP_CTL(port));
3253         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3254         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3255         I915_WRITE(DP_TP_CTL(port), val);
3256
3257         /*
3258          * On PORT_A we can have only eDP in SST mode. There the only reason
3259          * we need to set idle transmission mode is to work around a HW issue
3260          * where we enable the pipe while not in idle link-training mode.
3261          * In this case there is requirement to wait for a minimum number of
3262          * idle patterns to be sent.
3263          */
3264         if (port == PORT_A)
3265                 return;
3266
3267         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3268                      1))
3269                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3270 }
3271
3272 static void
3273 intel_dp_link_down(struct intel_dp *intel_dp)
3274 {
3275         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3276         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3277         enum port port = intel_dig_port->port;
3278         struct drm_device *dev = intel_dig_port->base.base.dev;
3279         struct drm_i915_private *dev_priv = dev->dev_private;
3280         uint32_t DP = intel_dp->DP;
3281
3282         if (WARN_ON(HAS_DDI(dev)))
3283                 return;
3284
3285         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3286                 return;
3287
3288         DRM_DEBUG_KMS("\n");
3289
3290         if ((IS_GEN7(dev) && port == PORT_A) ||
3291             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3292                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3293                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3294         } else {
3295                 if (IS_CHERRYVIEW(dev))
3296                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3297                 else
3298                         DP &= ~DP_LINK_TRAIN_MASK;
3299                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3300         }
3301         I915_WRITE(intel_dp->output_reg, DP);
3302         POSTING_READ(intel_dp->output_reg);
3303
3304         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3305         I915_WRITE(intel_dp->output_reg, DP);
3306         POSTING_READ(intel_dp->output_reg);
3307
3308         /*
3309          * HW workaround for IBX, we need to move the port
3310          * to transcoder A after disabling it to allow the
3311          * matching HDMI port to be enabled on transcoder A.
3312          */
3313         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3314                 /*
3315                  * We get CPU/PCH FIFO underruns on the other pipe when
3316                  * doing the workaround. Sweep them under the rug.
3317                  */
3318                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3319                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3320
3321                 /* always enable with pattern 1 (as per spec) */
3322                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3323                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3324                 I915_WRITE(intel_dp->output_reg, DP);
3325                 POSTING_READ(intel_dp->output_reg);
3326
3327                 DP &= ~DP_PORT_EN;
3328                 I915_WRITE(intel_dp->output_reg, DP);
3329                 POSTING_READ(intel_dp->output_reg);
3330
3331                 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3332                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3333                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3334         }
3335
3336         msleep(intel_dp->panel_power_down_delay);
3337
3338         intel_dp->DP = DP;
3339 }
3340
3341 static bool
3342 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3343 {
3344         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3345         struct drm_device *dev = dig_port->base.base.dev;
3346         struct drm_i915_private *dev_priv = dev->dev_private;
3347
3348         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3349                              sizeof(intel_dp->dpcd)) < 0)
3350                 return false; /* aux transfer failed */
3351
3352         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3353
3354         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3355                 return false; /* DPCD not present */
3356
3357         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3358                              &intel_dp->sink_count, 1) < 0)
3359                 return false;
3360
3361         /*
3362          * Sink count can change between short pulse hpd hence
3363          * a member variable in intel_dp will track any changes
3364          * between short pulse interrupts.
3365          */
3366         intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3367
3368         /*
3369          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3370          * a dongle is present but no display. Unless we require to know
3371          * if a dongle is present or not, we don't need to update
3372          * downstream port information. So, an early return here saves
3373          * time from performing other operations which are not required.
3374          */
3375         if (!is_edp(intel_dp) && !intel_dp->sink_count)
3376                 return false;
3377
3378         /* Check if the panel supports PSR */
3379         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3380         if (is_edp(intel_dp)) {
3381                 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3382                                  intel_dp->psr_dpcd,
3383                                  sizeof(intel_dp->psr_dpcd));
3384                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3385                         dev_priv->psr.sink_support = true;
3386                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3387                 }
3388
3389                 if (INTEL_INFO(dev)->gen >= 9 &&
3390                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3391                         uint8_t frame_sync_cap;
3392
3393                         dev_priv->psr.sink_support = true;
3394                         drm_dp_dpcd_read(&intel_dp->aux,
3395                                          DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3396                                          &frame_sync_cap, 1);
3397                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3398                         /* PSR2 needs frame sync as well */
3399                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3400                         DRM_DEBUG_KMS("PSR2 %s on sink",
3401                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3402                 }
3403
3404                 /* Read the eDP Display control capabilities registers */
3405                 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3406                 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3407                                 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3408                                                 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3409                                                                 sizeof(intel_dp->edp_dpcd)))
3410                         DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3411                                         intel_dp->edp_dpcd);
3412         }
3413
3414         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3415                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
3416                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3417
3418         /* Intermediate frequency support */
3419         if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3420                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3421                 int i;
3422
3423                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3424                                 sink_rates, sizeof(sink_rates));
3425
3426                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3427                         int val = le16_to_cpu(sink_rates[i]);
3428
3429                         if (val == 0)
3430                                 break;
3431
3432                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3433                         intel_dp->sink_rates[i] = (val * 200) / 10;
3434                 }
3435                 intel_dp->num_sink_rates = i;
3436         }
3437
3438         intel_dp_print_rates(intel_dp);
3439
3440         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3441               DP_DWN_STRM_PORT_PRESENT))
3442                 return true; /* native DP sink */
3443
3444         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3445                 return true; /* no per-port downstream info */
3446
3447         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3448                              intel_dp->downstream_ports,
3449                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3450                 return false; /* downstream port status fetch failed */
3451
3452         return true;
3453 }
3454
3455 static void
3456 intel_dp_probe_oui(struct intel_dp *intel_dp)
3457 {
3458         u8 buf[3];
3459
3460         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3461                 return;
3462
3463         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3464                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3465                               buf[0], buf[1], buf[2]);
3466
3467         if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3468                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3469                               buf[0], buf[1], buf[2]);
3470 }
3471
3472 static bool
3473 intel_dp_probe_mst(struct intel_dp *intel_dp)
3474 {
3475         u8 buf[1];
3476
3477         if (!i915.enable_dp_mst)
3478                 return false;
3479
3480         if (!intel_dp->can_mst)
3481                 return false;
3482
3483         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3484                 return false;
3485
3486         if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3487                 if (buf[0] & DP_MST_CAP) {
3488                         DRM_DEBUG_KMS("Sink is MST capable\n");
3489                         intel_dp->is_mst = true;
3490                 } else {
3491                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3492                         intel_dp->is_mst = false;
3493                 }
3494         }
3495
3496         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3497         return intel_dp->is_mst;
3498 }
3499
3500 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3501 {
3502         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3503         struct drm_device *dev = dig_port->base.base.dev;
3504         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3505         u8 buf;
3506         int ret = 0;
3507         int count = 0;
3508         int attempts = 10;
3509
3510         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3511                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3512                 ret = -EIO;
3513                 goto out;
3514         }
3515
3516         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3517                                buf & ~DP_TEST_SINK_START) < 0) {
3518                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3519                 ret = -EIO;
3520                 goto out;
3521         }
3522
3523         do {
3524                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3525
3526                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3527                                       DP_TEST_SINK_MISC, &buf) < 0) {
3528                         ret = -EIO;
3529                         goto out;
3530                 }
3531                 count = buf & DP_TEST_COUNT_MASK;
3532         } while (--attempts && count);
3533
3534         if (attempts == 0) {
3535                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3536                 ret = -ETIMEDOUT;
3537         }
3538
3539  out:
3540         hsw_enable_ips(intel_crtc);
3541         return ret;
3542 }
3543
3544 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3545 {
3546         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3547         struct drm_device *dev = dig_port->base.base.dev;
3548         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3549         u8 buf;
3550         int ret;
3551
3552         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3553                 return -EIO;
3554
3555         if (!(buf & DP_TEST_CRC_SUPPORTED))
3556                 return -ENOTTY;
3557
3558         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3559                 return -EIO;
3560
3561         if (buf & DP_TEST_SINK_START) {
3562                 ret = intel_dp_sink_crc_stop(intel_dp);
3563                 if (ret)
3564                         return ret;
3565         }
3566
3567         hsw_disable_ips(intel_crtc);
3568
3569         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3570                                buf | DP_TEST_SINK_START) < 0) {
3571                 hsw_enable_ips(intel_crtc);
3572                 return -EIO;
3573         }
3574
3575         intel_wait_for_vblank(dev, intel_crtc->pipe);
3576         return 0;
3577 }
3578
3579 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3580 {
3581         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3582         struct drm_device *dev = dig_port->base.base.dev;
3583         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3584         u8 buf;
3585         int count, ret;
3586         int attempts = 6;
3587
3588         ret = intel_dp_sink_crc_start(intel_dp);
3589         if (ret)
3590                 return ret;
3591
3592         do {
3593                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3594
3595                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3596                                       DP_TEST_SINK_MISC, &buf) < 0) {
3597                         ret = -EIO;
3598                         goto stop;
3599                 }
3600                 count = buf & DP_TEST_COUNT_MASK;
3601
3602         } while (--attempts && count == 0);
3603
3604         if (attempts == 0) {
3605                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3606                 ret = -ETIMEDOUT;
3607                 goto stop;
3608         }
3609
3610         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3611                 ret = -EIO;
3612                 goto stop;
3613         }
3614
3615 stop:
3616         intel_dp_sink_crc_stop(intel_dp);
3617         return ret;
3618 }
3619
3620 static bool
3621 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3622 {
3623         return drm_dp_dpcd_read(&intel_dp->aux,
3624                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3625                                        sink_irq_vector, 1) == 1;
3626 }
3627
3628 static bool
3629 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3630 {
3631         int ret;
3632
3633         ret = drm_dp_dpcd_read(&intel_dp->aux,
3634                                              DP_SINK_COUNT_ESI,
3635                                              sink_irq_vector, 14);
3636         if (ret != 14)
3637                 return false;
3638
3639         return true;
3640 }
3641
3642 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3643 {
3644         uint8_t test_result = DP_TEST_ACK;
3645         return test_result;
3646 }
3647
3648 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3649 {
3650         uint8_t test_result = DP_TEST_NAK;
3651         return test_result;
3652 }
3653
3654 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
3655 {
3656         uint8_t test_result = DP_TEST_NAK;
3657         struct intel_connector *intel_connector = intel_dp->attached_connector;
3658         struct drm_connector *connector = &intel_connector->base;
3659
3660         if (intel_connector->detect_edid == NULL ||
3661             connector->edid_corrupt ||
3662             intel_dp->aux.i2c_defer_count > 6) {
3663                 /* Check EDID read for NACKs, DEFERs and corruption
3664                  * (DP CTS 1.2 Core r1.1)
3665                  *    4.2.2.4 : Failed EDID read, I2C_NAK
3666                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
3667                  *    4.2.2.6 : EDID corruption detected
3668                  * Use failsafe mode for all cases
3669                  */
3670                 if (intel_dp->aux.i2c_nack_count > 0 ||
3671                         intel_dp->aux.i2c_defer_count > 0)
3672                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
3673                                       intel_dp->aux.i2c_nack_count,
3674                                       intel_dp->aux.i2c_defer_count);
3675                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
3676         } else {
3677                 struct edid *block = intel_connector->detect_edid;
3678
3679                 /* We have to write the checksum
3680                  * of the last block read
3681                  */
3682                 block += intel_connector->detect_edid->extensions;
3683
3684                 if (!drm_dp_dpcd_write(&intel_dp->aux,
3685                                         DP_TEST_EDID_CHECKSUM,
3686                                         &block->checksum,
3687                                         1))
3688                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
3689
3690                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3691                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
3692         }
3693
3694         /* Set test active flag here so userspace doesn't interrupt things */
3695         intel_dp->compliance_test_active = 1;
3696
3697         return test_result;
3698 }
3699
3700 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3701 {
3702         uint8_t test_result = DP_TEST_NAK;
3703         return test_result;
3704 }
3705
3706 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3707 {
3708         uint8_t response = DP_TEST_NAK;
3709         uint8_t rxdata = 0;
3710         int status = 0;
3711
3712         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
3713         if (status <= 0) {
3714                 DRM_DEBUG_KMS("Could not read test request from sink\n");
3715                 goto update_status;
3716         }
3717
3718         switch (rxdata) {
3719         case DP_TEST_LINK_TRAINING:
3720                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
3721                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
3722                 response = intel_dp_autotest_link_training(intel_dp);
3723                 break;
3724         case DP_TEST_LINK_VIDEO_PATTERN:
3725                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
3726                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
3727                 response = intel_dp_autotest_video_pattern(intel_dp);
3728                 break;
3729         case DP_TEST_LINK_EDID_READ:
3730                 DRM_DEBUG_KMS("EDID test requested\n");
3731                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
3732                 response = intel_dp_autotest_edid(intel_dp);
3733                 break;
3734         case DP_TEST_LINK_PHY_TEST_PATTERN:
3735                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
3736                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
3737                 response = intel_dp_autotest_phy_pattern(intel_dp);
3738                 break;
3739         default:
3740                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
3741                 break;
3742         }
3743
3744 update_status:
3745         status = drm_dp_dpcd_write(&intel_dp->aux,
3746                                    DP_TEST_RESPONSE,
3747                                    &response, 1);
3748         if (status <= 0)
3749                 DRM_DEBUG_KMS("Could not write test response to sink\n");
3750 }
3751
3752 static int
3753 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3754 {
3755         bool bret;
3756
3757         if (intel_dp->is_mst) {
3758                 u8 esi[16] = { 0 };
3759                 int ret = 0;
3760                 int retry;
3761                 bool handled;
3762                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3763 go_again:
3764                 if (bret == true) {
3765
3766                         /* check link status - esi[10] = 0x200c */
3767                         if (intel_dp->active_mst_links &&
3768                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3769                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3770                                 intel_dp_start_link_train(intel_dp);
3771                                 intel_dp_stop_link_train(intel_dp);
3772                         }
3773
3774                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3775                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3776
3777                         if (handled) {
3778                                 for (retry = 0; retry < 3; retry++) {
3779                                         int wret;
3780                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3781                                                                  DP_SINK_COUNT_ESI+1,
3782                                                                  &esi[1], 3);
3783                                         if (wret == 3) {
3784                                                 break;
3785                                         }
3786                                 }
3787
3788                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3789                                 if (bret == true) {
3790                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3791                                         goto go_again;
3792                                 }
3793                         } else
3794                                 ret = 0;
3795
3796                         return ret;
3797                 } else {
3798                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3799                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3800                         intel_dp->is_mst = false;
3801                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3802                         /* send a hotplug event */
3803                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3804                 }
3805         }
3806         return -EINVAL;
3807 }
3808
3809 static void
3810 intel_dp_check_link_status(struct intel_dp *intel_dp)
3811 {
3812         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3813         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3814         u8 link_status[DP_LINK_STATUS_SIZE];
3815
3816         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3817
3818         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3819                 DRM_ERROR("Failed to get link status\n");
3820                 return;
3821         }
3822
3823         if (!intel_encoder->base.crtc)
3824                 return;
3825
3826         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3827                 return;
3828
3829         /* if link training is requested we should perform it always */
3830         if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
3831             (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
3832                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3833                               intel_encoder->base.name);
3834                 intel_dp_start_link_train(intel_dp);
3835                 intel_dp_stop_link_train(intel_dp);
3836         }
3837 }
3838
3839 /*
3840  * According to DP spec
3841  * 5.1.2:
3842  *  1. Read DPCD
3843  *  2. Configure link according to Receiver Capabilities
3844  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3845  *  4. Check link status on receipt of hot-plug interrupt
3846  *
3847  * intel_dp_short_pulse -  handles short pulse interrupts
3848  * when full detection is not required.
3849  * Returns %true if short pulse is handled and full detection
3850  * is NOT required and %false otherwise.
3851  */
3852 static bool
3853 intel_dp_short_pulse(struct intel_dp *intel_dp)
3854 {
3855         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3856         u8 sink_irq_vector;
3857         u8 old_sink_count = intel_dp->sink_count;
3858         bool ret;
3859
3860         /*
3861          * Clearing compliance test variables to allow capturing
3862          * of values for next automated test request.
3863          */
3864         intel_dp->compliance_test_active = 0;
3865         intel_dp->compliance_test_type = 0;
3866         intel_dp->compliance_test_data = 0;
3867
3868         /*
3869          * Now read the DPCD to see if it's actually running
3870          * If the current value of sink count doesn't match with
3871          * the value that was stored earlier or dpcd read failed
3872          * we need to do full detection
3873          */
3874         ret = intel_dp_get_dpcd(intel_dp);
3875
3876         if ((old_sink_count != intel_dp->sink_count) || !ret) {
3877                 /* No need to proceed if we are going to do full detect */
3878                 return false;
3879         }
3880
3881         /* Try to read the source of the interrupt */
3882         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3883             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3884                 /* Clear interrupt source */
3885                 drm_dp_dpcd_writeb(&intel_dp->aux,
3886                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
3887                                    sink_irq_vector);
3888
3889                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3890                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
3891                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3892                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3893         }
3894
3895         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3896         intel_dp_check_link_status(intel_dp);
3897         drm_modeset_unlock(&dev->mode_config.connection_mutex);
3898
3899         return true;
3900 }
3901
3902 /* XXX this is probably wrong for multiple downstream ports */
3903 static enum drm_connector_status
3904 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3905 {
3906         uint8_t *dpcd = intel_dp->dpcd;
3907         uint8_t type;
3908
3909         if (!intel_dp_get_dpcd(intel_dp))
3910                 return connector_status_disconnected;
3911
3912         if (is_edp(intel_dp))
3913                 return connector_status_connected;
3914
3915         /* if there's no downstream port, we're done */
3916         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3917                 return connector_status_connected;
3918
3919         /* If we're HPD-aware, SINK_COUNT changes dynamically */
3920         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3921             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3922
3923                 return intel_dp->sink_count ?
3924                 connector_status_connected : connector_status_disconnected;
3925         }
3926
3927         /* If no HPD, poke DDC gently */
3928         if (drm_probe_ddc(&intel_dp->aux.ddc))
3929                 return connector_status_connected;
3930
3931         /* Well we tried, say unknown for unreliable port types */
3932         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3933                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3934                 if (type == DP_DS_PORT_TYPE_VGA ||
3935                     type == DP_DS_PORT_TYPE_NON_EDID)
3936                         return connector_status_unknown;
3937         } else {
3938                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3939                         DP_DWN_STRM_PORT_TYPE_MASK;
3940                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3941                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
3942                         return connector_status_unknown;
3943         }
3944
3945         /* Anything else is out of spec, warn and ignore */
3946         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3947         return connector_status_disconnected;
3948 }
3949
3950 static enum drm_connector_status
3951 edp_detect(struct intel_dp *intel_dp)
3952 {
3953         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3954         enum drm_connector_status status;
3955
3956         status = intel_panel_detect(dev);
3957         if (status == connector_status_unknown)
3958                 status = connector_status_connected;
3959
3960         return status;
3961 }
3962
3963 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
3964                                        struct intel_digital_port *port)
3965 {
3966         u32 bit;
3967
3968         switch (port->port) {
3969         case PORT_A:
3970                 return true;
3971         case PORT_B:
3972                 bit = SDE_PORTB_HOTPLUG;
3973                 break;
3974         case PORT_C:
3975                 bit = SDE_PORTC_HOTPLUG;
3976                 break;
3977         case PORT_D:
3978                 bit = SDE_PORTD_HOTPLUG;
3979                 break;
3980         default:
3981                 MISSING_CASE(port->port);
3982                 return false;
3983         }
3984
3985         return I915_READ(SDEISR) & bit;
3986 }
3987
3988 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
3989                                        struct intel_digital_port *port)
3990 {
3991         u32 bit;
3992
3993         switch (port->port) {
3994         case PORT_A:
3995                 return true;
3996         case PORT_B:
3997                 bit = SDE_PORTB_HOTPLUG_CPT;
3998                 break;
3999         case PORT_C:
4000                 bit = SDE_PORTC_HOTPLUG_CPT;
4001                 break;
4002         case PORT_D:
4003                 bit = SDE_PORTD_HOTPLUG_CPT;
4004                 break;
4005         case PORT_E:
4006                 bit = SDE_PORTE_HOTPLUG_SPT;
4007                 break;
4008         default:
4009                 MISSING_CASE(port->port);
4010                 return false;
4011         }
4012
4013         return I915_READ(SDEISR) & bit;
4014 }
4015
4016 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4017                                        struct intel_digital_port *port)
4018 {
4019         u32 bit;
4020
4021         switch (port->port) {
4022         case PORT_B:
4023                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4024                 break;
4025         case PORT_C:
4026                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4027                 break;
4028         case PORT_D:
4029                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4030                 break;
4031         default:
4032                 MISSING_CASE(port->port);
4033                 return false;
4034         }
4035
4036         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4037 }
4038
4039 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4040                                         struct intel_digital_port *port)
4041 {
4042         u32 bit;
4043
4044         switch (port->port) {
4045         case PORT_B:
4046                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4047                 break;
4048         case PORT_C:
4049                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4050                 break;
4051         case PORT_D:
4052                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4053                 break;
4054         default:
4055                 MISSING_CASE(port->port);
4056                 return false;
4057         }
4058
4059         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4060 }
4061
4062 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4063                                        struct intel_digital_port *intel_dig_port)
4064 {
4065         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4066         enum port port;
4067         u32 bit;
4068
4069         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4070         switch (port) {
4071         case PORT_A:
4072                 bit = BXT_DE_PORT_HP_DDIA;
4073                 break;
4074         case PORT_B:
4075                 bit = BXT_DE_PORT_HP_DDIB;
4076                 break;
4077         case PORT_C:
4078                 bit = BXT_DE_PORT_HP_DDIC;
4079                 break;
4080         default:
4081                 MISSING_CASE(port);
4082                 return false;
4083         }
4084
4085         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4086 }
4087
4088 /*
4089  * intel_digital_port_connected - is the specified port connected?
4090  * @dev_priv: i915 private structure
4091  * @port: the port to test
4092  *
4093  * Return %true if @port is connected, %false otherwise.
4094  */
4095 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4096                                          struct intel_digital_port *port)
4097 {
4098         if (HAS_PCH_IBX(dev_priv))
4099                 return ibx_digital_port_connected(dev_priv, port);
4100         else if (HAS_PCH_SPLIT(dev_priv))
4101                 return cpt_digital_port_connected(dev_priv, port);
4102         else if (IS_BROXTON(dev_priv))
4103                 return bxt_digital_port_connected(dev_priv, port);
4104         else if (IS_GM45(dev_priv))
4105                 return gm45_digital_port_connected(dev_priv, port);
4106         else
4107                 return g4x_digital_port_connected(dev_priv, port);
4108 }
4109
4110 static struct edid *
4111 intel_dp_get_edid(struct intel_dp *intel_dp)
4112 {
4113         struct intel_connector *intel_connector = intel_dp->attached_connector;
4114
4115         /* use cached edid if we have one */
4116         if (intel_connector->edid) {
4117                 /* invalid edid */
4118                 if (IS_ERR(intel_connector->edid))
4119                         return NULL;
4120
4121                 return drm_edid_duplicate(intel_connector->edid);
4122         } else
4123                 return drm_get_edid(&intel_connector->base,
4124                                     &intel_dp->aux.ddc);
4125 }
4126
4127 static void
4128 intel_dp_set_edid(struct intel_dp *intel_dp)
4129 {
4130         struct intel_connector *intel_connector = intel_dp->attached_connector;
4131         struct edid *edid;
4132
4133         intel_dp_unset_edid(intel_dp);
4134         edid = intel_dp_get_edid(intel_dp);
4135         intel_connector->detect_edid = edid;
4136
4137         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4138                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4139         else
4140                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4141 }
4142
4143 static void
4144 intel_dp_unset_edid(struct intel_dp *intel_dp)
4145 {
4146         struct intel_connector *intel_connector = intel_dp->attached_connector;
4147
4148         kfree(intel_connector->detect_edid);
4149         intel_connector->detect_edid = NULL;
4150
4151         intel_dp->has_audio = false;
4152 }
4153
4154 static void
4155 intel_dp_long_pulse(struct intel_connector *intel_connector)
4156 {
4157         struct drm_connector *connector = &intel_connector->base;
4158         struct intel_dp *intel_dp = intel_attached_dp(connector);
4159         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4160         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4161         struct drm_device *dev = connector->dev;
4162         enum drm_connector_status status;
4163         enum intel_display_power_domain power_domain;
4164         bool ret;
4165         u8 sink_irq_vector;
4166
4167         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4168         intel_display_power_get(to_i915(dev), power_domain);
4169
4170         /* Can't disconnect eDP, but you can close the lid... */
4171         if (is_edp(intel_dp))
4172                 status = edp_detect(intel_dp);
4173         else if (intel_digital_port_connected(to_i915(dev),
4174                                               dp_to_dig_port(intel_dp)))
4175                 status = intel_dp_detect_dpcd(intel_dp);
4176         else
4177                 status = connector_status_disconnected;
4178
4179         if (status != connector_status_connected) {
4180                 intel_dp->compliance_test_active = 0;
4181                 intel_dp->compliance_test_type = 0;
4182                 intel_dp->compliance_test_data = 0;
4183
4184                 if (intel_dp->is_mst) {
4185                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4186                                       intel_dp->is_mst,
4187                                       intel_dp->mst_mgr.mst_state);
4188                         intel_dp->is_mst = false;
4189                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4190                                                         intel_dp->is_mst);
4191                 }
4192
4193                 goto out;
4194         }
4195
4196         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4197                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4198
4199         intel_dp_probe_oui(intel_dp);
4200
4201         ret = intel_dp_probe_mst(intel_dp);
4202         if (ret) {
4203                 /*
4204                  * If we are in MST mode then this connector
4205                  * won't appear connected or have anything
4206                  * with EDID on it
4207                  */
4208                 status = connector_status_disconnected;
4209                 goto out;
4210         } else if (connector->status == connector_status_connected) {
4211                 /*
4212                  * If display was connected already and is still connected
4213                  * check links status, there has been known issues of
4214                  * link loss triggerring long pulse!!!!
4215                  */
4216                 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4217                 intel_dp_check_link_status(intel_dp);
4218                 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4219                 goto out;
4220         }
4221
4222         /*
4223          * Clearing NACK and defer counts to get their exact values
4224          * while reading EDID which are required by Compliance tests
4225          * 4.2.2.4 and 4.2.2.5
4226          */
4227         intel_dp->aux.i2c_nack_count = 0;
4228         intel_dp->aux.i2c_defer_count = 0;
4229
4230         intel_dp_set_edid(intel_dp);
4231
4232         status = connector_status_connected;
4233         intel_dp->detect_done = true;
4234
4235         /* Try to read the source of the interrupt */
4236         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4237             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4238                 /* Clear interrupt source */
4239                 drm_dp_dpcd_writeb(&intel_dp->aux,
4240                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4241                                    sink_irq_vector);
4242
4243                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4244                         intel_dp_handle_test_request(intel_dp);
4245                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4246                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4247         }
4248
4249 out:
4250         if ((status != connector_status_connected) &&
4251             (intel_dp->is_mst == false))
4252                 intel_dp_unset_edid(intel_dp);
4253
4254         intel_display_power_put(to_i915(dev), power_domain);
4255         return;
4256 }
4257
4258 static enum drm_connector_status
4259 intel_dp_detect(struct drm_connector *connector, bool force)
4260 {
4261         struct intel_dp *intel_dp = intel_attached_dp(connector);
4262         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4263         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4264         struct intel_connector *intel_connector = to_intel_connector(connector);
4265
4266         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4267                       connector->base.id, connector->name);
4268
4269         if (intel_dp->is_mst) {
4270                 /* MST devices are disconnected from a monitor POV */
4271                 intel_dp_unset_edid(intel_dp);
4272                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4273                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4274                 return connector_status_disconnected;
4275         }
4276
4277         /* If full detect is not performed yet, do a full detect */
4278         if (!intel_dp->detect_done)
4279                 intel_dp_long_pulse(intel_dp->attached_connector);
4280
4281         intel_dp->detect_done = false;
4282
4283         if (intel_connector->detect_edid)
4284                 return connector_status_connected;
4285         else
4286                 return connector_status_disconnected;
4287 }
4288
4289 static void
4290 intel_dp_force(struct drm_connector *connector)
4291 {
4292         struct intel_dp *intel_dp = intel_attached_dp(connector);
4293         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4294         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4295         enum intel_display_power_domain power_domain;
4296
4297         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4298                       connector->base.id, connector->name);
4299         intel_dp_unset_edid(intel_dp);
4300
4301         if (connector->status != connector_status_connected)
4302                 return;
4303
4304         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4305         intel_display_power_get(dev_priv, power_domain);
4306
4307         intel_dp_set_edid(intel_dp);
4308
4309         intel_display_power_put(dev_priv, power_domain);
4310
4311         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4312                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4313 }
4314
4315 static int intel_dp_get_modes(struct drm_connector *connector)
4316 {
4317         struct intel_connector *intel_connector = to_intel_connector(connector);
4318         struct edid *edid;
4319
4320         edid = intel_connector->detect_edid;
4321         if (edid) {
4322                 int ret = intel_connector_update_modes(connector, edid);
4323                 if (ret)
4324                         return ret;
4325         }
4326
4327         /* if eDP has no EDID, fall back to fixed mode */
4328         if (is_edp(intel_attached_dp(connector)) &&
4329             intel_connector->panel.fixed_mode) {
4330                 struct drm_display_mode *mode;
4331
4332                 mode = drm_mode_duplicate(connector->dev,
4333                                           intel_connector->panel.fixed_mode);
4334                 if (mode) {
4335                         drm_mode_probed_add(connector, mode);
4336                         return 1;
4337                 }
4338         }
4339
4340         return 0;
4341 }
4342
4343 static bool
4344 intel_dp_detect_audio(struct drm_connector *connector)
4345 {
4346         bool has_audio = false;
4347         struct edid *edid;
4348
4349         edid = to_intel_connector(connector)->detect_edid;
4350         if (edid)
4351                 has_audio = drm_detect_monitor_audio(edid);
4352
4353         return has_audio;
4354 }
4355
4356 static int
4357 intel_dp_set_property(struct drm_connector *connector,
4358                       struct drm_property *property,
4359                       uint64_t val)
4360 {
4361         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4362         struct intel_connector *intel_connector = to_intel_connector(connector);
4363         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4364         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4365         int ret;
4366
4367         ret = drm_object_property_set_value(&connector->base, property, val);
4368         if (ret)
4369                 return ret;
4370
4371         if (property == dev_priv->force_audio_property) {
4372                 int i = val;
4373                 bool has_audio;
4374
4375                 if (i == intel_dp->force_audio)
4376                         return 0;
4377
4378                 intel_dp->force_audio = i;
4379
4380                 if (i == HDMI_AUDIO_AUTO)
4381                         has_audio = intel_dp_detect_audio(connector);
4382                 else
4383                         has_audio = (i == HDMI_AUDIO_ON);
4384
4385                 if (has_audio == intel_dp->has_audio)
4386                         return 0;
4387
4388                 intel_dp->has_audio = has_audio;
4389                 goto done;
4390         }
4391
4392         if (property == dev_priv->broadcast_rgb_property) {
4393                 bool old_auto = intel_dp->color_range_auto;
4394                 bool old_range = intel_dp->limited_color_range;
4395
4396                 switch (val) {
4397                 case INTEL_BROADCAST_RGB_AUTO:
4398                         intel_dp->color_range_auto = true;
4399                         break;
4400                 case INTEL_BROADCAST_RGB_FULL:
4401                         intel_dp->color_range_auto = false;
4402                         intel_dp->limited_color_range = false;
4403                         break;
4404                 case INTEL_BROADCAST_RGB_LIMITED:
4405                         intel_dp->color_range_auto = false;
4406                         intel_dp->limited_color_range = true;
4407                         break;
4408                 default:
4409                         return -EINVAL;
4410                 }
4411
4412                 if (old_auto == intel_dp->color_range_auto &&
4413                     old_range == intel_dp->limited_color_range)
4414                         return 0;
4415
4416                 goto done;
4417         }
4418
4419         if (is_edp(intel_dp) &&
4420             property == connector->dev->mode_config.scaling_mode_property) {
4421                 if (val == DRM_MODE_SCALE_NONE) {
4422                         DRM_DEBUG_KMS("no scaling not supported\n");
4423                         return -EINVAL;
4424                 }
4425                 if (HAS_GMCH_DISPLAY(dev_priv) &&
4426                     val == DRM_MODE_SCALE_CENTER) {
4427                         DRM_DEBUG_KMS("centering not supported\n");
4428                         return -EINVAL;
4429                 }
4430
4431                 if (intel_connector->panel.fitting_mode == val) {
4432                         /* the eDP scaling property is not changed */
4433                         return 0;
4434                 }
4435                 intel_connector->panel.fitting_mode = val;
4436
4437                 goto done;
4438         }
4439
4440         return -EINVAL;
4441
4442 done:
4443         if (intel_encoder->base.crtc)
4444                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4445
4446         return 0;
4447 }
4448
4449 static void
4450 intel_dp_connector_unregister(struct drm_connector *connector)
4451 {
4452         drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4453         intel_connector_unregister(connector);
4454 }
4455
4456 static void
4457 intel_dp_connector_destroy(struct drm_connector *connector)
4458 {
4459         struct intel_connector *intel_connector = to_intel_connector(connector);
4460
4461         kfree(intel_connector->detect_edid);
4462
4463         if (!IS_ERR_OR_NULL(intel_connector->edid))
4464                 kfree(intel_connector->edid);
4465
4466         /* Can't call is_edp() since the encoder may have been destroyed
4467          * already. */
4468         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4469                 intel_panel_fini(&intel_connector->panel);
4470
4471         drm_connector_cleanup(connector);
4472         kfree(connector);
4473 }
4474
4475 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4476 {
4477         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4478         struct intel_dp *intel_dp = &intel_dig_port->dp;
4479
4480         intel_dp_mst_encoder_cleanup(intel_dig_port);
4481         if (is_edp(intel_dp)) {
4482                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4483                 /*
4484                  * vdd might still be enabled do to the delayed vdd off.
4485                  * Make sure vdd is actually turned off here.
4486                  */
4487                 pps_lock(intel_dp);
4488                 edp_panel_vdd_off_sync(intel_dp);
4489                 pps_unlock(intel_dp);
4490
4491                 if (intel_dp->edp_notifier.notifier_call) {
4492                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4493                         intel_dp->edp_notifier.notifier_call = NULL;
4494                 }
4495         }
4496
4497         intel_dp_aux_fini(intel_dp);
4498
4499         drm_encoder_cleanup(encoder);
4500         kfree(intel_dig_port);
4501 }
4502
4503 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4504 {
4505         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4506
4507         if (!is_edp(intel_dp))
4508                 return;
4509
4510         /*
4511          * vdd might still be enabled do to the delayed vdd off.
4512          * Make sure vdd is actually turned off here.
4513          */
4514         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4515         pps_lock(intel_dp);
4516         edp_panel_vdd_off_sync(intel_dp);
4517         pps_unlock(intel_dp);
4518 }
4519
4520 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4521 {
4522         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4523         struct drm_device *dev = intel_dig_port->base.base.dev;
4524         struct drm_i915_private *dev_priv = dev->dev_private;
4525         enum intel_display_power_domain power_domain;
4526
4527         lockdep_assert_held(&dev_priv->pps_mutex);
4528
4529         if (!edp_have_panel_vdd(intel_dp))
4530                 return;
4531
4532         /*
4533          * The VDD bit needs a power domain reference, so if the bit is
4534          * already enabled when we boot or resume, grab this reference and
4535          * schedule a vdd off, so we don't hold on to the reference
4536          * indefinitely.
4537          */
4538         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4539         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4540         intel_display_power_get(dev_priv, power_domain);
4541
4542         edp_panel_vdd_schedule_off(intel_dp);
4543 }
4544
4545 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4546 {
4547         struct intel_dp *intel_dp;
4548
4549         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4550                 return;
4551
4552         intel_dp = enc_to_intel_dp(encoder);
4553
4554         pps_lock(intel_dp);
4555
4556         /*
4557          * Read out the current power sequencer assignment,
4558          * in case the BIOS did something with it.
4559          */
4560         if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4561                 vlv_initial_power_sequencer_setup(intel_dp);
4562
4563         intel_edp_panel_vdd_sanitize(intel_dp);
4564
4565         pps_unlock(intel_dp);
4566 }
4567
4568 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4569         .dpms = drm_atomic_helper_connector_dpms,
4570         .detect = intel_dp_detect,
4571         .force = intel_dp_force,
4572         .fill_modes = drm_helper_probe_single_connector_modes,
4573         .set_property = intel_dp_set_property,
4574         .atomic_get_property = intel_connector_atomic_get_property,
4575         .early_unregister = intel_dp_connector_unregister,
4576         .destroy = intel_dp_connector_destroy,
4577         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4578         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4579 };
4580
4581 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4582         .get_modes = intel_dp_get_modes,
4583         .mode_valid = intel_dp_mode_valid,
4584 };
4585
4586 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4587         .reset = intel_dp_encoder_reset,
4588         .destroy = intel_dp_encoder_destroy,
4589 };
4590
4591 enum irqreturn
4592 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4593 {
4594         struct intel_dp *intel_dp = &intel_dig_port->dp;
4595         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4596         struct drm_device *dev = intel_dig_port->base.base.dev;
4597         struct drm_i915_private *dev_priv = dev->dev_private;
4598         enum intel_display_power_domain power_domain;
4599         enum irqreturn ret = IRQ_NONE;
4600
4601         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4602             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4603                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4604
4605         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4606                 /*
4607                  * vdd off can generate a long pulse on eDP which
4608                  * would require vdd on to handle it, and thus we
4609                  * would end up in an endless cycle of
4610                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4611                  */
4612                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4613                               port_name(intel_dig_port->port));
4614                 return IRQ_HANDLED;
4615         }
4616
4617         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4618                       port_name(intel_dig_port->port),
4619                       long_hpd ? "long" : "short");
4620
4621         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4622         intel_display_power_get(dev_priv, power_domain);
4623
4624         if (long_hpd) {
4625                 intel_dp_long_pulse(intel_dp->attached_connector);
4626                 if (intel_dp->is_mst)
4627                         ret = IRQ_HANDLED;
4628                 goto put_power;
4629
4630         } else {
4631                 if (intel_dp->is_mst) {
4632                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4633                                 /*
4634                                  * If we were in MST mode, and device is not
4635                                  * there, get out of MST mode
4636                                  */
4637                                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4638                                               intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4639                                 intel_dp->is_mst = false;
4640                                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4641                                                                 intel_dp->is_mst);
4642                                 goto put_power;
4643                         }
4644                 }
4645
4646                 if (!intel_dp->is_mst) {
4647                         if (!intel_dp_short_pulse(intel_dp)) {
4648                                 intel_dp_long_pulse(intel_dp->attached_connector);
4649                                 goto put_power;
4650                         }
4651                 }
4652         }
4653
4654         ret = IRQ_HANDLED;
4655
4656 put_power:
4657         intel_display_power_put(dev_priv, power_domain);
4658
4659         return ret;
4660 }
4661
4662 /* check the VBT to see whether the eDP is on another port */
4663 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4664 {
4665         struct drm_i915_private *dev_priv = dev->dev_private;
4666
4667         /*
4668          * eDP not supported on g4x. so bail out early just
4669          * for a bit extra safety in case the VBT is bonkers.
4670          */
4671         if (INTEL_INFO(dev)->gen < 5)
4672                 return false;
4673
4674         if (port == PORT_A)
4675                 return true;
4676
4677         return intel_bios_is_port_edp(dev_priv, port);
4678 }
4679
4680 void
4681 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4682 {
4683         struct intel_connector *intel_connector = to_intel_connector(connector);
4684
4685         intel_attach_force_audio_property(connector);
4686         intel_attach_broadcast_rgb_property(connector);
4687         intel_dp->color_range_auto = true;
4688
4689         if (is_edp(intel_dp)) {
4690                 drm_mode_create_scaling_mode_property(connector->dev);
4691                 drm_object_attach_property(
4692                         &connector->base,
4693                         connector->dev->mode_config.scaling_mode_property,
4694                         DRM_MODE_SCALE_ASPECT);
4695                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4696         }
4697 }
4698
4699 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4700 {
4701         intel_dp->panel_power_off_time = ktime_get_boottime();
4702         intel_dp->last_power_on = jiffies;
4703         intel_dp->last_backlight_off = jiffies;
4704 }
4705
4706 static void
4707 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4708                                     struct intel_dp *intel_dp)
4709 {
4710         struct drm_i915_private *dev_priv = dev->dev_private;
4711         struct edp_power_seq cur, vbt, spec,
4712                 *final = &intel_dp->pps_delays;
4713         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4714         i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4715
4716         lockdep_assert_held(&dev_priv->pps_mutex);
4717
4718         /* already initialized? */
4719         if (final->t11_t12 != 0)
4720                 return;
4721
4722         if (IS_BROXTON(dev)) {
4723                 /*
4724                  * TODO: BXT has 2 sets of PPS registers.
4725                  * Correct Register for Broxton need to be identified
4726                  * using VBT. hardcoding for now
4727                  */
4728                 pp_ctrl_reg = BXT_PP_CONTROL(0);
4729                 pp_on_reg = BXT_PP_ON_DELAYS(0);
4730                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4731         } else if (HAS_PCH_SPLIT(dev)) {
4732                 pp_ctrl_reg = PCH_PP_CONTROL;
4733                 pp_on_reg = PCH_PP_ON_DELAYS;
4734                 pp_off_reg = PCH_PP_OFF_DELAYS;
4735                 pp_div_reg = PCH_PP_DIVISOR;
4736         } else {
4737                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4738
4739                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4740                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4741                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4742                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4743         }
4744
4745         /* Workaround: Need to write PP_CONTROL with the unlock key as
4746          * the very first thing. */
4747         pp_ctl = ironlake_get_pp_control(intel_dp);
4748
4749         pp_on = I915_READ(pp_on_reg);
4750         pp_off = I915_READ(pp_off_reg);
4751         if (!IS_BROXTON(dev)) {
4752                 I915_WRITE(pp_ctrl_reg, pp_ctl);
4753                 pp_div = I915_READ(pp_div_reg);
4754         }
4755
4756         /* Pull timing values out of registers */
4757         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4758                 PANEL_POWER_UP_DELAY_SHIFT;
4759
4760         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4761                 PANEL_LIGHT_ON_DELAY_SHIFT;
4762
4763         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4764                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4765
4766         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4767                 PANEL_POWER_DOWN_DELAY_SHIFT;
4768
4769         if (IS_BROXTON(dev)) {
4770                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4771                         BXT_POWER_CYCLE_DELAY_SHIFT;
4772                 if (tmp > 0)
4773                         cur.t11_t12 = (tmp - 1) * 1000;
4774                 else
4775                         cur.t11_t12 = 0;
4776         } else {
4777                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4778                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4779         }
4780
4781         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4782                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4783
4784         vbt = dev_priv->vbt.edp.pps;
4785
4786         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4787          * our hw here, which are all in 100usec. */
4788         spec.t1_t3 = 210 * 10;
4789         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4790         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4791         spec.t10 = 500 * 10;
4792         /* This one is special and actually in units of 100ms, but zero
4793          * based in the hw (so we need to add 100 ms). But the sw vbt
4794          * table multiplies it with 1000 to make it in units of 100usec,
4795          * too. */
4796         spec.t11_t12 = (510 + 100) * 10;
4797
4798         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4799                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4800
4801         /* Use the max of the register settings and vbt. If both are
4802          * unset, fall back to the spec limits. */
4803 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4804                                        spec.field : \
4805                                        max(cur.field, vbt.field))
4806         assign_final(t1_t3);
4807         assign_final(t8);
4808         assign_final(t9);
4809         assign_final(t10);
4810         assign_final(t11_t12);
4811 #undef assign_final
4812
4813 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4814         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4815         intel_dp->backlight_on_delay = get_delay(t8);
4816         intel_dp->backlight_off_delay = get_delay(t9);
4817         intel_dp->panel_power_down_delay = get_delay(t10);
4818         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4819 #undef get_delay
4820
4821         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4822                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4823                       intel_dp->panel_power_cycle_delay);
4824
4825         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4826                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4827 }
4828
4829 static void
4830 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4831                                               struct intel_dp *intel_dp)
4832 {
4833         struct drm_i915_private *dev_priv = dev->dev_private;
4834         u32 pp_on, pp_off, pp_div, port_sel = 0;
4835         int div = dev_priv->rawclk_freq / 1000;
4836         i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
4837         enum port port = dp_to_dig_port(intel_dp)->port;
4838         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4839
4840         lockdep_assert_held(&dev_priv->pps_mutex);
4841
4842         if (IS_BROXTON(dev)) {
4843                 /*
4844                  * TODO: BXT has 2 sets of PPS registers.
4845                  * Correct Register for Broxton need to be identified
4846                  * using VBT. hardcoding for now
4847                  */
4848                 pp_ctrl_reg = BXT_PP_CONTROL(0);
4849                 pp_on_reg = BXT_PP_ON_DELAYS(0);
4850                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4851
4852         } else if (HAS_PCH_SPLIT(dev)) {
4853                 pp_on_reg = PCH_PP_ON_DELAYS;
4854                 pp_off_reg = PCH_PP_OFF_DELAYS;
4855                 pp_div_reg = PCH_PP_DIVISOR;
4856         } else {
4857                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4858
4859                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4860                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4861                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4862         }
4863
4864         /*
4865          * And finally store the new values in the power sequencer. The
4866          * backlight delays are set to 1 because we do manual waits on them. For
4867          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4868          * we'll end up waiting for the backlight off delay twice: once when we
4869          * do the manual sleep, and once when we disable the panel and wait for
4870          * the PP_STATUS bit to become zero.
4871          */
4872         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4873                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4874         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4875                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4876         /* Compute the divisor for the pp clock, simply match the Bspec
4877          * formula. */
4878         if (IS_BROXTON(dev)) {
4879                 pp_div = I915_READ(pp_ctrl_reg);
4880                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
4881                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
4882                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
4883         } else {
4884                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4885                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4886                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4887         }
4888
4889         /* Haswell doesn't have any port selection bits for the panel
4890          * power sequencer any more. */
4891         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4892                 port_sel = PANEL_PORT_SELECT_VLV(port);
4893         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4894                 if (port == PORT_A)
4895                         port_sel = PANEL_PORT_SELECT_DPA;
4896                 else
4897                         port_sel = PANEL_PORT_SELECT_DPD;
4898         }
4899
4900         pp_on |= port_sel;
4901
4902         I915_WRITE(pp_on_reg, pp_on);
4903         I915_WRITE(pp_off_reg, pp_off);
4904         if (IS_BROXTON(dev))
4905                 I915_WRITE(pp_ctrl_reg, pp_div);
4906         else
4907                 I915_WRITE(pp_div_reg, pp_div);
4908
4909         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4910                       I915_READ(pp_on_reg),
4911                       I915_READ(pp_off_reg),
4912                       IS_BROXTON(dev) ?
4913                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
4914                       I915_READ(pp_div_reg));
4915 }
4916
4917 /**
4918  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4919  * @dev: DRM device
4920  * @refresh_rate: RR to be programmed
4921  *
4922  * This function gets called when refresh rate (RR) has to be changed from
4923  * one frequency to another. Switches can be between high and low RR
4924  * supported by the panel or to any other RR based on media playback (in
4925  * this case, RR value needs to be passed from user space).
4926  *
4927  * The caller of this function needs to take a lock on dev_priv->drrs.
4928  */
4929 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4930 {
4931         struct drm_i915_private *dev_priv = dev->dev_private;
4932         struct intel_encoder *encoder;
4933         struct intel_digital_port *dig_port = NULL;
4934         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4935         struct intel_crtc_state *config = NULL;
4936         struct intel_crtc *intel_crtc = NULL;
4937         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4938
4939         if (refresh_rate <= 0) {
4940                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4941                 return;
4942         }
4943
4944         if (intel_dp == NULL) {
4945                 DRM_DEBUG_KMS("DRRS not supported.\n");
4946                 return;
4947         }
4948
4949         /*
4950          * FIXME: This needs proper synchronization with psr state for some
4951          * platforms that cannot have PSR and DRRS enabled at the same time.
4952          */
4953
4954         dig_port = dp_to_dig_port(intel_dp);
4955         encoder = &dig_port->base;
4956         intel_crtc = to_intel_crtc(encoder->base.crtc);
4957
4958         if (!intel_crtc) {
4959                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4960                 return;
4961         }
4962
4963         config = intel_crtc->config;
4964
4965         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4966                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4967                 return;
4968         }
4969
4970         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4971                         refresh_rate)
4972                 index = DRRS_LOW_RR;
4973
4974         if (index == dev_priv->drrs.refresh_rate_type) {
4975                 DRM_DEBUG_KMS(
4976                         "DRRS requested for previously set RR...ignoring\n");
4977                 return;
4978         }
4979
4980         if (!intel_crtc->active) {
4981                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4982                 return;
4983         }
4984
4985         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4986                 switch (index) {
4987                 case DRRS_HIGH_RR:
4988                         intel_dp_set_m_n(intel_crtc, M1_N1);
4989                         break;
4990                 case DRRS_LOW_RR:
4991                         intel_dp_set_m_n(intel_crtc, M2_N2);
4992                         break;
4993                 case DRRS_MAX_RR:
4994                 default:
4995                         DRM_ERROR("Unsupported refreshrate type\n");
4996                 }
4997         } else if (INTEL_INFO(dev)->gen > 6) {
4998                 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4999                 u32 val;
5000
5001                 val = I915_READ(reg);
5002                 if (index > DRRS_HIGH_RR) {
5003                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5004                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5005                         else
5006                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5007                 } else {
5008                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5009                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5010                         else
5011                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5012                 }
5013                 I915_WRITE(reg, val);
5014         }
5015
5016         dev_priv->drrs.refresh_rate_type = index;
5017
5018         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5019 }
5020
5021 /**
5022  * intel_edp_drrs_enable - init drrs struct if supported
5023  * @intel_dp: DP struct
5024  *
5025  * Initializes frontbuffer_bits and drrs.dp
5026  */
5027 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5028 {
5029         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5030         struct drm_i915_private *dev_priv = dev->dev_private;
5031         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5032         struct drm_crtc *crtc = dig_port->base.base.crtc;
5033         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5034
5035         if (!intel_crtc->config->has_drrs) {
5036                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5037                 return;
5038         }
5039
5040         mutex_lock(&dev_priv->drrs.mutex);
5041         if (WARN_ON(dev_priv->drrs.dp)) {
5042                 DRM_ERROR("DRRS already enabled\n");
5043                 goto unlock;
5044         }
5045
5046         dev_priv->drrs.busy_frontbuffer_bits = 0;
5047
5048         dev_priv->drrs.dp = intel_dp;
5049
5050 unlock:
5051         mutex_unlock(&dev_priv->drrs.mutex);
5052 }
5053
5054 /**
5055  * intel_edp_drrs_disable - Disable DRRS
5056  * @intel_dp: DP struct
5057  *
5058  */
5059 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5060 {
5061         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5062         struct drm_i915_private *dev_priv = dev->dev_private;
5063         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5064         struct drm_crtc *crtc = dig_port->base.base.crtc;
5065         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5066
5067         if (!intel_crtc->config->has_drrs)
5068                 return;
5069
5070         mutex_lock(&dev_priv->drrs.mutex);
5071         if (!dev_priv->drrs.dp) {
5072                 mutex_unlock(&dev_priv->drrs.mutex);
5073                 return;
5074         }
5075
5076         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5077                 intel_dp_set_drrs_state(dev_priv->dev,
5078                         intel_dp->attached_connector->panel.
5079                         fixed_mode->vrefresh);
5080
5081         dev_priv->drrs.dp = NULL;
5082         mutex_unlock(&dev_priv->drrs.mutex);
5083
5084         cancel_delayed_work_sync(&dev_priv->drrs.work);
5085 }
5086
5087 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5088 {
5089         struct drm_i915_private *dev_priv =
5090                 container_of(work, typeof(*dev_priv), drrs.work.work);
5091         struct intel_dp *intel_dp;
5092
5093         mutex_lock(&dev_priv->drrs.mutex);
5094
5095         intel_dp = dev_priv->drrs.dp;
5096
5097         if (!intel_dp)
5098                 goto unlock;
5099
5100         /*
5101          * The delayed work can race with an invalidate hence we need to
5102          * recheck.
5103          */
5104
5105         if (dev_priv->drrs.busy_frontbuffer_bits)
5106                 goto unlock;
5107
5108         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5109                 intel_dp_set_drrs_state(dev_priv->dev,
5110                         intel_dp->attached_connector->panel.
5111                         downclock_mode->vrefresh);
5112
5113 unlock:
5114         mutex_unlock(&dev_priv->drrs.mutex);
5115 }
5116
5117 /**
5118  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5119  * @dev: DRM device
5120  * @frontbuffer_bits: frontbuffer plane tracking bits
5121  *
5122  * This function gets called everytime rendering on the given planes start.
5123  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5124  *
5125  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5126  */
5127 void intel_edp_drrs_invalidate(struct drm_device *dev,
5128                 unsigned frontbuffer_bits)
5129 {
5130         struct drm_i915_private *dev_priv = dev->dev_private;
5131         struct drm_crtc *crtc;
5132         enum pipe pipe;
5133
5134         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5135                 return;
5136
5137         cancel_delayed_work(&dev_priv->drrs.work);
5138
5139         mutex_lock(&dev_priv->drrs.mutex);
5140         if (!dev_priv->drrs.dp) {
5141                 mutex_unlock(&dev_priv->drrs.mutex);
5142                 return;
5143         }
5144
5145         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5146         pipe = to_intel_crtc(crtc)->pipe;
5147
5148         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5149         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5150
5151         /* invalidate means busy screen hence upclock */
5152         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5153                 intel_dp_set_drrs_state(dev_priv->dev,
5154                                 dev_priv->drrs.dp->attached_connector->panel.
5155                                 fixed_mode->vrefresh);
5156
5157         mutex_unlock(&dev_priv->drrs.mutex);
5158 }
5159
5160 /**
5161  * intel_edp_drrs_flush - Restart Idleness DRRS
5162  * @dev: DRM device
5163  * @frontbuffer_bits: frontbuffer plane tracking bits
5164  *
5165  * This function gets called every time rendering on the given planes has
5166  * completed or flip on a crtc is completed. So DRRS should be upclocked
5167  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5168  * if no other planes are dirty.
5169  *
5170  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5171  */
5172 void intel_edp_drrs_flush(struct drm_device *dev,
5173                 unsigned frontbuffer_bits)
5174 {
5175         struct drm_i915_private *dev_priv = dev->dev_private;
5176         struct drm_crtc *crtc;
5177         enum pipe pipe;
5178
5179         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5180                 return;
5181
5182         cancel_delayed_work(&dev_priv->drrs.work);
5183
5184         mutex_lock(&dev_priv->drrs.mutex);
5185         if (!dev_priv->drrs.dp) {
5186                 mutex_unlock(&dev_priv->drrs.mutex);
5187                 return;
5188         }
5189
5190         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5191         pipe = to_intel_crtc(crtc)->pipe;
5192
5193         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5194         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5195
5196         /* flush means busy screen hence upclock */
5197         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5198                 intel_dp_set_drrs_state(dev_priv->dev,
5199                                 dev_priv->drrs.dp->attached_connector->panel.
5200                                 fixed_mode->vrefresh);
5201
5202         /*
5203          * flush also means no more activity hence schedule downclock, if all
5204          * other fbs are quiescent too
5205          */
5206         if (!dev_priv->drrs.busy_frontbuffer_bits)
5207                 schedule_delayed_work(&dev_priv->drrs.work,
5208                                 msecs_to_jiffies(1000));
5209         mutex_unlock(&dev_priv->drrs.mutex);
5210 }
5211
5212 /**
5213  * DOC: Display Refresh Rate Switching (DRRS)
5214  *
5215  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5216  * which enables swtching between low and high refresh rates,
5217  * dynamically, based on the usage scenario. This feature is applicable
5218  * for internal panels.
5219  *
5220  * Indication that the panel supports DRRS is given by the panel EDID, which
5221  * would list multiple refresh rates for one resolution.
5222  *
5223  * DRRS is of 2 types - static and seamless.
5224  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5225  * (may appear as a blink on screen) and is used in dock-undock scenario.
5226  * Seamless DRRS involves changing RR without any visual effect to the user
5227  * and can be used during normal system usage. This is done by programming
5228  * certain registers.
5229  *
5230  * Support for static/seamless DRRS may be indicated in the VBT based on
5231  * inputs from the panel spec.
5232  *
5233  * DRRS saves power by switching to low RR based on usage scenarios.
5234  *
5235  * The implementation is based on frontbuffer tracking implementation.  When
5236  * there is a disturbance on the screen triggered by user activity or a periodic
5237  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5238  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5239  * made.
5240  *
5241  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5242  * and intel_edp_drrs_flush() are called.
5243  *
5244  * DRRS can be further extended to support other internal panels and also
5245  * the scenario of video playback wherein RR is set based on the rate
5246  * requested by userspace.
5247  */
5248
5249 /**
5250  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5251  * @intel_connector: eDP connector
5252  * @fixed_mode: preferred mode of panel
5253  *
5254  * This function is  called only once at driver load to initialize basic
5255  * DRRS stuff.
5256  *
5257  * Returns:
5258  * Downclock mode if panel supports it, else return NULL.
5259  * DRRS support is determined by the presence of downclock mode (apart
5260  * from VBT setting).
5261  */
5262 static struct drm_display_mode *
5263 intel_dp_drrs_init(struct intel_connector *intel_connector,
5264                 struct drm_display_mode *fixed_mode)
5265 {
5266         struct drm_connector *connector = &intel_connector->base;
5267         struct drm_device *dev = connector->dev;
5268         struct drm_i915_private *dev_priv = dev->dev_private;
5269         struct drm_display_mode *downclock_mode = NULL;
5270
5271         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5272         mutex_init(&dev_priv->drrs.mutex);
5273
5274         if (INTEL_INFO(dev)->gen <= 6) {
5275                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5276                 return NULL;
5277         }
5278
5279         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5280                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5281                 return NULL;
5282         }
5283
5284         downclock_mode = intel_find_panel_downclock
5285                                         (dev, fixed_mode, connector);
5286
5287         if (!downclock_mode) {
5288                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5289                 return NULL;
5290         }
5291
5292         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5293
5294         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5295         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5296         return downclock_mode;
5297 }
5298
5299 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5300                                      struct intel_connector *intel_connector)
5301 {
5302         struct drm_connector *connector = &intel_connector->base;
5303         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5304         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5305         struct drm_device *dev = intel_encoder->base.dev;
5306         struct drm_i915_private *dev_priv = dev->dev_private;
5307         struct drm_display_mode *fixed_mode = NULL;
5308         struct drm_display_mode *downclock_mode = NULL;
5309         bool has_dpcd;
5310         struct drm_display_mode *scan;
5311         struct edid *edid;
5312         enum pipe pipe = INVALID_PIPE;
5313
5314         if (!is_edp(intel_dp))
5315                 return true;
5316
5317         pps_lock(intel_dp);
5318         intel_edp_panel_vdd_sanitize(intel_dp);
5319         pps_unlock(intel_dp);
5320
5321         /* Cache DPCD and EDID for edp. */
5322         has_dpcd = intel_dp_get_dpcd(intel_dp);
5323
5324         if (has_dpcd) {
5325                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5326                         dev_priv->no_aux_handshake =
5327                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5328                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5329         } else {
5330                 /* if this fails, presume the device is a ghost */
5331                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5332                 return false;
5333         }
5334
5335         /* We now know it's not a ghost, init power sequence regs. */
5336         pps_lock(intel_dp);
5337         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5338         pps_unlock(intel_dp);
5339
5340         mutex_lock(&dev->mode_config.mutex);
5341         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5342         if (edid) {
5343                 if (drm_add_edid_modes(connector, edid)) {
5344                         drm_mode_connector_update_edid_property(connector,
5345                                                                 edid);
5346                         drm_edid_to_eld(connector, edid);
5347                 } else {
5348                         kfree(edid);
5349                         edid = ERR_PTR(-EINVAL);
5350                 }
5351         } else {
5352                 edid = ERR_PTR(-ENOENT);
5353         }
5354         intel_connector->edid = edid;
5355
5356         /* prefer fixed mode from EDID if available */
5357         list_for_each_entry(scan, &connector->probed_modes, head) {
5358                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5359                         fixed_mode = drm_mode_duplicate(dev, scan);
5360                         downclock_mode = intel_dp_drrs_init(
5361                                                 intel_connector, fixed_mode);
5362                         break;
5363                 }
5364         }
5365
5366         /* fallback to VBT if available for eDP */
5367         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5368                 fixed_mode = drm_mode_duplicate(dev,
5369                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5370                 if (fixed_mode) {
5371                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5372                         connector->display_info.width_mm = fixed_mode->width_mm;
5373                         connector->display_info.height_mm = fixed_mode->height_mm;
5374                 }
5375         }
5376         mutex_unlock(&dev->mode_config.mutex);
5377
5378         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5379                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5380                 register_reboot_notifier(&intel_dp->edp_notifier);
5381
5382                 /*
5383                  * Figure out the current pipe for the initial backlight setup.
5384                  * If the current pipe isn't valid, try the PPS pipe, and if that
5385                  * fails just assume pipe A.
5386                  */
5387                 if (IS_CHERRYVIEW(dev))
5388                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5389                 else
5390                         pipe = PORT_TO_PIPE(intel_dp->DP);
5391
5392                 if (pipe != PIPE_A && pipe != PIPE_B)
5393                         pipe = intel_dp->pps_pipe;
5394
5395                 if (pipe != PIPE_A && pipe != PIPE_B)
5396                         pipe = PIPE_A;
5397
5398                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5399                               pipe_name(pipe));
5400         }
5401
5402         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5403         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5404         intel_panel_setup_backlight(connector, pipe);
5405
5406         return true;
5407 }
5408
5409 bool
5410 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5411                         struct intel_connector *intel_connector)
5412 {
5413         struct drm_connector *connector = &intel_connector->base;
5414         struct intel_dp *intel_dp = &intel_dig_port->dp;
5415         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5416         struct drm_device *dev = intel_encoder->base.dev;
5417         struct drm_i915_private *dev_priv = dev->dev_private;
5418         enum port port = intel_dig_port->port;
5419         int type, ret;
5420
5421         if (WARN(intel_dig_port->max_lanes < 1,
5422                  "Not enough lanes (%d) for DP on port %c\n",
5423                  intel_dig_port->max_lanes, port_name(port)))
5424                 return false;
5425
5426         intel_dp->pps_pipe = INVALID_PIPE;
5427
5428         /* intel_dp vfuncs */
5429         if (INTEL_INFO(dev)->gen >= 9)
5430                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5431         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5432                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5433         else if (HAS_PCH_SPLIT(dev))
5434                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5435         else
5436                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5437
5438         if (INTEL_INFO(dev)->gen >= 9)
5439                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5440         else
5441                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5442
5443         if (HAS_DDI(dev))
5444                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5445
5446         /* Preserve the current hw state. */
5447         intel_dp->DP = I915_READ(intel_dp->output_reg);
5448         intel_dp->attached_connector = intel_connector;
5449
5450         if (intel_dp_is_edp(dev, port))
5451                 type = DRM_MODE_CONNECTOR_eDP;
5452         else
5453                 type = DRM_MODE_CONNECTOR_DisplayPort;
5454
5455         /*
5456          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5457          * for DP the encoder type can be set by the caller to
5458          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5459          */
5460         if (type == DRM_MODE_CONNECTOR_eDP)
5461                 intel_encoder->type = INTEL_OUTPUT_EDP;
5462
5463         /* eDP only on port B and/or C on vlv/chv */
5464         if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5465                     is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5466                 return false;
5467
5468         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5469                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5470                         port_name(port));
5471
5472         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5473         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5474
5475         connector->interlace_allowed = true;
5476         connector->doublescan_allowed = 0;
5477
5478         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5479                           edp_panel_vdd_work);
5480
5481         intel_connector_attach_encoder(intel_connector, intel_encoder);
5482         drm_connector_register(connector);
5483
5484         if (HAS_DDI(dev))
5485                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5486         else
5487                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5488
5489         /* Set up the hotplug pin. */
5490         switch (port) {
5491         case PORT_A:
5492                 intel_encoder->hpd_pin = HPD_PORT_A;
5493                 break;
5494         case PORT_B:
5495                 intel_encoder->hpd_pin = HPD_PORT_B;
5496                 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5497                         intel_encoder->hpd_pin = HPD_PORT_A;
5498                 break;
5499         case PORT_C:
5500                 intel_encoder->hpd_pin = HPD_PORT_C;
5501                 break;
5502         case PORT_D:
5503                 intel_encoder->hpd_pin = HPD_PORT_D;
5504                 break;
5505         case PORT_E:
5506                 intel_encoder->hpd_pin = HPD_PORT_E;
5507                 break;
5508         default:
5509                 BUG();
5510         }
5511
5512         if (is_edp(intel_dp)) {
5513                 pps_lock(intel_dp);
5514                 intel_dp_init_panel_power_timestamps(intel_dp);
5515                 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5516                         vlv_initial_power_sequencer_setup(intel_dp);
5517                 else
5518                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5519                 pps_unlock(intel_dp);
5520         }
5521
5522         ret = intel_dp_aux_init(intel_dp, intel_connector);
5523         if (ret)
5524                 goto fail;
5525
5526         /* init MST on ports that can support it */
5527         if (HAS_DP_MST(dev) &&
5528             (port == PORT_B || port == PORT_C || port == PORT_D))
5529                 intel_dp_mst_encoder_init(intel_dig_port,
5530                                           intel_connector->base.base.id);
5531
5532         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5533                 intel_dp_aux_fini(intel_dp);
5534                 intel_dp_mst_encoder_cleanup(intel_dig_port);
5535                 goto fail;
5536         }
5537
5538         intel_dp_add_properties(intel_dp, connector);
5539
5540         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5541          * 0xd.  Failure to do so will result in spurious interrupts being
5542          * generated on the port when a cable is not attached.
5543          */
5544         if (IS_G4X(dev) && !IS_GM45(dev)) {
5545                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5546                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5547         }
5548
5549         i915_debugfs_connector_add(connector);
5550
5551         return true;
5552
5553 fail:
5554         if (is_edp(intel_dp)) {
5555                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5556                 /*
5557                  * vdd might still be enabled do to the delayed vdd off.
5558                  * Make sure vdd is actually turned off here.
5559                  */
5560                 pps_lock(intel_dp);
5561                 edp_panel_vdd_off_sync(intel_dp);
5562                 pps_unlock(intel_dp);
5563         }
5564         drm_connector_unregister(connector);
5565         drm_connector_cleanup(connector);
5566
5567         return false;
5568 }
5569
5570 bool intel_dp_init(struct drm_device *dev,
5571                    i915_reg_t output_reg,
5572                    enum port port)
5573 {
5574         struct drm_i915_private *dev_priv = dev->dev_private;
5575         struct intel_digital_port *intel_dig_port;
5576         struct intel_encoder *intel_encoder;
5577         struct drm_encoder *encoder;
5578         struct intel_connector *intel_connector;
5579
5580         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5581         if (!intel_dig_port)
5582                 return false;
5583
5584         intel_connector = intel_connector_alloc();
5585         if (!intel_connector)
5586                 goto err_connector_alloc;
5587
5588         intel_encoder = &intel_dig_port->base;
5589         encoder = &intel_encoder->base;
5590
5591         if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5592                              DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5593                 goto err_encoder_init;
5594
5595         intel_encoder->compute_config = intel_dp_compute_config;
5596         intel_encoder->disable = intel_disable_dp;
5597         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5598         intel_encoder->get_config = intel_dp_get_config;
5599         intel_encoder->suspend = intel_dp_encoder_suspend;
5600         if (IS_CHERRYVIEW(dev)) {
5601                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5602                 intel_encoder->pre_enable = chv_pre_enable_dp;
5603                 intel_encoder->enable = vlv_enable_dp;
5604                 intel_encoder->post_disable = chv_post_disable_dp;
5605                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5606         } else if (IS_VALLEYVIEW(dev)) {
5607                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5608                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5609                 intel_encoder->enable = vlv_enable_dp;
5610                 intel_encoder->post_disable = vlv_post_disable_dp;
5611         } else {
5612                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5613                 intel_encoder->enable = g4x_enable_dp;
5614                 if (INTEL_INFO(dev)->gen >= 5)
5615                         intel_encoder->post_disable = ilk_post_disable_dp;
5616         }
5617
5618         intel_dig_port->port = port;
5619         intel_dig_port->dp.output_reg = output_reg;
5620         intel_dig_port->max_lanes = 4;
5621
5622         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5623         if (IS_CHERRYVIEW(dev)) {
5624                 if (port == PORT_D)
5625                         intel_encoder->crtc_mask = 1 << 2;
5626                 else
5627                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5628         } else {
5629                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5630         }
5631         intel_encoder->cloneable = 0;
5632
5633         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5634         dev_priv->hotplug.irq_port[port] = intel_dig_port;
5635
5636         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5637                 goto err_init_connector;
5638
5639         return true;
5640
5641 err_init_connector:
5642         drm_encoder_cleanup(encoder);
5643 err_encoder_init:
5644         kfree(intel_connector);
5645 err_connector_alloc:
5646         kfree(intel_dig_port);
5647         return false;
5648 }
5649
5650 void intel_dp_mst_suspend(struct drm_device *dev)
5651 {
5652         struct drm_i915_private *dev_priv = dev->dev_private;
5653         int i;
5654
5655         /* disable MST */
5656         for (i = 0; i < I915_MAX_PORTS; i++) {
5657                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5658                 if (!intel_dig_port)
5659                         continue;
5660
5661                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5662                         if (!intel_dig_port->dp.can_mst)
5663                                 continue;
5664                         if (intel_dig_port->dp.is_mst)
5665                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5666                 }
5667         }
5668 }
5669
5670 void intel_dp_mst_resume(struct drm_device *dev)
5671 {
5672         struct drm_i915_private *dev_priv = dev->dev_private;
5673         int i;
5674
5675         for (i = 0; i < I915_MAX_PORTS; i++) {
5676                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5677                 if (!intel_dig_port)
5678                         continue;
5679                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5680                         int ret;
5681
5682                         if (!intel_dig_port->dp.can_mst)
5683                                 continue;
5684
5685                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5686                         if (ret != 0) {
5687                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5688                         }
5689                 }
5690         }
5691 }