]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Hide the source vs. sink rate handling from intel_dp_compute_config()
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int default_rates[] = { 162000, 270000, 540000 };
91
92 /**
93  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94  * @intel_dp: DP struct
95  *
96  * If a CPU or PCH DP output is attached to an eDP panel, this function
97  * will return true, and false otherwise.
98  */
99 static bool is_edp(struct intel_dp *intel_dp)
100 {
101         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
104 }
105
106 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110         return intel_dig_port->base.base.dev;
111 }
112
113 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114 {
115         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
116 }
117
118 static void intel_dp_link_down(struct intel_dp *intel_dp);
119 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
120 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
121 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
122 static void vlv_steal_power_sequencer(struct drm_device *dev,
123                                       enum pipe pipe);
124
125 int
126 intel_dp_max_link_bw(struct intel_dp *intel_dp)
127 {
128         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
129
130         switch (max_link_bw) {
131         case DP_LINK_BW_1_62:
132         case DP_LINK_BW_2_7:
133         case DP_LINK_BW_5_4:
134                 break;
135         default:
136                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137                      max_link_bw);
138                 max_link_bw = DP_LINK_BW_1_62;
139                 break;
140         }
141         return max_link_bw;
142 }
143
144 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145 {
146         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147         struct drm_device *dev = intel_dig_port->base.base.dev;
148         u8 source_max, sink_max;
149
150         source_max = 4;
151         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153                 source_max = 2;
154
155         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157         return min(source_max, sink_max);
158 }
159
160 /*
161  * The units on the numbers in the next two are... bizarre.  Examples will
162  * make it clearer; this one parallels an example in the eDP spec.
163  *
164  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165  *
166  *     270000 * 1 * 8 / 10 == 216000
167  *
168  * The actual data capacity of that configuration is 2.16Gbit/s, so the
169  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
170  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171  * 119000.  At 18bpp that's 2142000 kilobits per second.
172  *
173  * Thus the strange-looking division by 10 in intel_dp_link_required, to
174  * get the result in decakilobits instead of kilobits.
175  */
176
177 static int
178 intel_dp_link_required(int pixel_clock, int bpp)
179 {
180         return (pixel_clock * bpp + 9) / 10;
181 }
182
183 static int
184 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185 {
186         return (max_link_clock * max_lanes * 8) / 10;
187 }
188
189 static enum drm_mode_status
190 intel_dp_mode_valid(struct drm_connector *connector,
191                     struct drm_display_mode *mode)
192 {
193         struct intel_dp *intel_dp = intel_attached_dp(connector);
194         struct intel_connector *intel_connector = to_intel_connector(connector);
195         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
196         int target_clock = mode->clock;
197         int max_rate, mode_rate, max_lanes, max_link_clock;
198
199         if (is_edp(intel_dp) && fixed_mode) {
200                 if (mode->hdisplay > fixed_mode->hdisplay)
201                         return MODE_PANEL;
202
203                 if (mode->vdisplay > fixed_mode->vdisplay)
204                         return MODE_PANEL;
205
206                 target_clock = fixed_mode->clock;
207         }
208
209         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
210         max_lanes = intel_dp_max_lane_count(intel_dp);
211
212         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213         mode_rate = intel_dp_link_required(target_clock, 18);
214
215         if (mode_rate > max_rate)
216                 return MODE_CLOCK_HIGH;
217
218         if (mode->clock < 10000)
219                 return MODE_CLOCK_LOW;
220
221         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222                 return MODE_H_ILLEGAL;
223
224         return MODE_OK;
225 }
226
227 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
228 {
229         int     i;
230         uint32_t v = 0;
231
232         if (src_bytes > 4)
233                 src_bytes = 4;
234         for (i = 0; i < src_bytes; i++)
235                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236         return v;
237 }
238
239 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
240 {
241         int i;
242         if (dst_bytes > 4)
243                 dst_bytes = 4;
244         for (i = 0; i < dst_bytes; i++)
245                 dst[i] = src >> ((3-i) * 8);
246 }
247
248 /* hrawclock is 1/4 the FSB frequency */
249 static int
250 intel_hrawclk(struct drm_device *dev)
251 {
252         struct drm_i915_private *dev_priv = dev->dev_private;
253         uint32_t clkcfg;
254
255         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256         if (IS_VALLEYVIEW(dev))
257                 return 200;
258
259         clkcfg = I915_READ(CLKCFG);
260         switch (clkcfg & CLKCFG_FSB_MASK) {
261         case CLKCFG_FSB_400:
262                 return 100;
263         case CLKCFG_FSB_533:
264                 return 133;
265         case CLKCFG_FSB_667:
266                 return 166;
267         case CLKCFG_FSB_800:
268                 return 200;
269         case CLKCFG_FSB_1067:
270                 return 266;
271         case CLKCFG_FSB_1333:
272                 return 333;
273         /* these two are just a guess; one of them might be right */
274         case CLKCFG_FSB_1600:
275         case CLKCFG_FSB_1600_ALT:
276                 return 400;
277         default:
278                 return 133;
279         }
280 }
281
282 static void
283 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
284                                     struct intel_dp *intel_dp);
285 static void
286 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
287                                               struct intel_dp *intel_dp);
288
289 static void pps_lock(struct intel_dp *intel_dp)
290 {
291         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292         struct intel_encoder *encoder = &intel_dig_port->base;
293         struct drm_device *dev = encoder->base.dev;
294         struct drm_i915_private *dev_priv = dev->dev_private;
295         enum intel_display_power_domain power_domain;
296
297         /*
298          * See vlv_power_sequencer_reset() why we need
299          * a power domain reference here.
300          */
301         power_domain = intel_display_port_power_domain(encoder);
302         intel_display_power_get(dev_priv, power_domain);
303
304         mutex_lock(&dev_priv->pps_mutex);
305 }
306
307 static void pps_unlock(struct intel_dp *intel_dp)
308 {
309         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310         struct intel_encoder *encoder = &intel_dig_port->base;
311         struct drm_device *dev = encoder->base.dev;
312         struct drm_i915_private *dev_priv = dev->dev_private;
313         enum intel_display_power_domain power_domain;
314
315         mutex_unlock(&dev_priv->pps_mutex);
316
317         power_domain = intel_display_port_power_domain(encoder);
318         intel_display_power_put(dev_priv, power_domain);
319 }
320
321 static void
322 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323 {
324         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325         struct drm_device *dev = intel_dig_port->base.base.dev;
326         struct drm_i915_private *dev_priv = dev->dev_private;
327         enum pipe pipe = intel_dp->pps_pipe;
328         bool pll_enabled;
329         uint32_t DP;
330
331         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333                  pipe_name(pipe), port_name(intel_dig_port->port)))
334                 return;
335
336         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337                       pipe_name(pipe), port_name(intel_dig_port->port));
338
339         /* Preserve the BIOS-computed detected bit. This is
340          * supposed to be read-only.
341          */
342         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344         DP |= DP_PORT_WIDTH(1);
345         DP |= DP_LINK_TRAIN_PAT_1;
346
347         if (IS_CHERRYVIEW(dev))
348                 DP |= DP_PIPE_SELECT_CHV(pipe);
349         else if (pipe == PIPE_B)
350                 DP |= DP_PIPEB_SELECT;
351
352         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354         /*
355          * The DPLL for the pipe must be enabled for this to work.
356          * So enable temporarily it if it's not already enabled.
357          */
358         if (!pll_enabled)
359                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
362         /*
363          * Similar magic as in intel_dp_enable_port().
364          * We _must_ do this port enable + disable trick
365          * to make this power seqeuencer lock onto the port.
366          * Otherwise even VDD force bit won't work.
367          */
368         I915_WRITE(intel_dp->output_reg, DP);
369         POSTING_READ(intel_dp->output_reg);
370
371         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         if (!pll_enabled)
378                 vlv_force_pll_off(dev, pipe);
379 }
380
381 static enum pipe
382 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383 {
384         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385         struct drm_device *dev = intel_dig_port->base.base.dev;
386         struct drm_i915_private *dev_priv = dev->dev_private;
387         struct intel_encoder *encoder;
388         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
389         enum pipe pipe;
390
391         lockdep_assert_held(&dev_priv->pps_mutex);
392
393         /* We should never land here with regular DP ports */
394         WARN_ON(!is_edp(intel_dp));
395
396         if (intel_dp->pps_pipe != INVALID_PIPE)
397                 return intel_dp->pps_pipe;
398
399         /*
400          * We don't have power sequencer currently.
401          * Pick one that's not used by other ports.
402          */
403         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404                             base.head) {
405                 struct intel_dp *tmp;
406
407                 if (encoder->type != INTEL_OUTPUT_EDP)
408                         continue;
409
410                 tmp = enc_to_intel_dp(&encoder->base);
411
412                 if (tmp->pps_pipe != INVALID_PIPE)
413                         pipes &= ~(1 << tmp->pps_pipe);
414         }
415
416         /*
417          * Didn't find one. This should not happen since there
418          * are two power sequencers and up to two eDP ports.
419          */
420         if (WARN_ON(pipes == 0))
421                 pipe = PIPE_A;
422         else
423                 pipe = ffs(pipes) - 1;
424
425         vlv_steal_power_sequencer(dev, pipe);
426         intel_dp->pps_pipe = pipe;
427
428         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429                       pipe_name(intel_dp->pps_pipe),
430                       port_name(intel_dig_port->port));
431
432         /* init power sequencer on this pipe and port */
433         intel_dp_init_panel_power_sequencer(dev, intel_dp);
434         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
435
436         /*
437          * Even vdd force doesn't work until we've made
438          * the power sequencer lock in on the port.
439          */
440         vlv_power_sequencer_kick(intel_dp);
441
442         return intel_dp->pps_pipe;
443 }
444
445 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446                                enum pipe pipe);
447
448 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449                                enum pipe pipe)
450 {
451         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452 }
453
454 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455                                 enum pipe pipe)
456 {
457         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458 }
459
460 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461                          enum pipe pipe)
462 {
463         return true;
464 }
465
466 static enum pipe
467 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468                      enum port port,
469                      vlv_pipe_check pipe_check)
470 {
471         enum pipe pipe;
472
473         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475                         PANEL_PORT_SELECT_MASK;
476
477                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478                         continue;
479
480                 if (!pipe_check(dev_priv, pipe))
481                         continue;
482
483                 return pipe;
484         }
485
486         return INVALID_PIPE;
487 }
488
489 static void
490 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491 {
492         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493         struct drm_device *dev = intel_dig_port->base.base.dev;
494         struct drm_i915_private *dev_priv = dev->dev_private;
495         enum port port = intel_dig_port->port;
496
497         lockdep_assert_held(&dev_priv->pps_mutex);
498
499         /* try to find a pipe with this port selected */
500         /* first pick one where the panel is on */
501         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502                                                   vlv_pipe_has_pp_on);
503         /* didn't find one? pick one where vdd is on */
504         if (intel_dp->pps_pipe == INVALID_PIPE)
505                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506                                                           vlv_pipe_has_vdd_on);
507         /* didn't find one? pick one with just the correct port */
508         if (intel_dp->pps_pipe == INVALID_PIPE)
509                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510                                                           vlv_pipe_any);
511
512         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513         if (intel_dp->pps_pipe == INVALID_PIPE) {
514                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515                               port_name(port));
516                 return;
517         }
518
519         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520                       port_name(port), pipe_name(intel_dp->pps_pipe));
521
522         intel_dp_init_panel_power_sequencer(dev, intel_dp);
523         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
524 }
525
526 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527 {
528         struct drm_device *dev = dev_priv->dev;
529         struct intel_encoder *encoder;
530
531         if (WARN_ON(!IS_VALLEYVIEW(dev)))
532                 return;
533
534         /*
535          * We can't grab pps_mutex here due to deadlock with power_domain
536          * mutex when power_domain functions are called while holding pps_mutex.
537          * That also means that in order to use pps_pipe the code needs to
538          * hold both a power domain reference and pps_mutex, and the power domain
539          * reference get/put must be done while _not_ holding pps_mutex.
540          * pps_{lock,unlock}() do these steps in the correct order, so one
541          * should use them always.
542          */
543
544         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545                 struct intel_dp *intel_dp;
546
547                 if (encoder->type != INTEL_OUTPUT_EDP)
548                         continue;
549
550                 intel_dp = enc_to_intel_dp(&encoder->base);
551                 intel_dp->pps_pipe = INVALID_PIPE;
552         }
553 }
554
555 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556 {
557         struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559         if (HAS_PCH_SPLIT(dev))
560                 return PCH_PP_CONTROL;
561         else
562                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563 }
564
565 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566 {
567         struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569         if (HAS_PCH_SPLIT(dev))
570                 return PCH_PP_STATUS;
571         else
572                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573 }
574
575 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576    This function only applicable when panel PM state is not to be tracked */
577 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578                               void *unused)
579 {
580         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581                                                  edp_notifier);
582         struct drm_device *dev = intel_dp_to_dev(intel_dp);
583         struct drm_i915_private *dev_priv = dev->dev_private;
584         u32 pp_div;
585         u32 pp_ctrl_reg, pp_div_reg;
586
587         if (!is_edp(intel_dp) || code != SYS_RESTART)
588                 return 0;
589
590         pps_lock(intel_dp);
591
592         if (IS_VALLEYVIEW(dev)) {
593                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
595                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
597                 pp_div = I915_READ(pp_div_reg);
598                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603                 msleep(intel_dp->panel_power_cycle_delay);
604         }
605
606         pps_unlock(intel_dp);
607
608         return 0;
609 }
610
611 static bool edp_have_panel_power(struct intel_dp *intel_dp)
612 {
613         struct drm_device *dev = intel_dp_to_dev(intel_dp);
614         struct drm_i915_private *dev_priv = dev->dev_private;
615
616         lockdep_assert_held(&dev_priv->pps_mutex);
617
618         if (IS_VALLEYVIEW(dev) &&
619             intel_dp->pps_pipe == INVALID_PIPE)
620                 return false;
621
622         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
623 }
624
625 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
626 {
627         struct drm_device *dev = intel_dp_to_dev(intel_dp);
628         struct drm_i915_private *dev_priv = dev->dev_private;
629
630         lockdep_assert_held(&dev_priv->pps_mutex);
631
632         if (IS_VALLEYVIEW(dev) &&
633             intel_dp->pps_pipe == INVALID_PIPE)
634                 return false;
635
636         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
637 }
638
639 static void
640 intel_dp_check_edp(struct intel_dp *intel_dp)
641 {
642         struct drm_device *dev = intel_dp_to_dev(intel_dp);
643         struct drm_i915_private *dev_priv = dev->dev_private;
644
645         if (!is_edp(intel_dp))
646                 return;
647
648         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
649                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
651                               I915_READ(_pp_stat_reg(intel_dp)),
652                               I915_READ(_pp_ctrl_reg(intel_dp)));
653         }
654 }
655
656 static uint32_t
657 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658 {
659         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660         struct drm_device *dev = intel_dig_port->base.base.dev;
661         struct drm_i915_private *dev_priv = dev->dev_private;
662         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
663         uint32_t status;
664         bool done;
665
666 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
667         if (has_aux_irq)
668                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
669                                           msecs_to_jiffies_timeout(10));
670         else
671                 done = wait_for_atomic(C, 10) == 0;
672         if (!done)
673                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674                           has_aux_irq);
675 #undef C
676
677         return status;
678 }
679
680 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
681 {
682         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683         struct drm_device *dev = intel_dig_port->base.base.dev;
684
685         /*
686          * The clock divider is based off the hrawclk, and would like to run at
687          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
688          */
689         return index ? 0 : intel_hrawclk(dev) / 2;
690 }
691
692 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693 {
694         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695         struct drm_device *dev = intel_dig_port->base.base.dev;
696
697         if (index)
698                 return 0;
699
700         if (intel_dig_port->port == PORT_A) {
701                 if (IS_GEN6(dev) || IS_GEN7(dev))
702                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
703                 else
704                         return 225; /* eDP input clock at 450Mhz */
705         } else {
706                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707         }
708 }
709
710 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711 {
712         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713         struct drm_device *dev = intel_dig_port->base.base.dev;
714         struct drm_i915_private *dev_priv = dev->dev_private;
715
716         if (intel_dig_port->port == PORT_A) {
717                 if (index)
718                         return 0;
719                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
720         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721                 /* Workaround for non-ULT HSW */
722                 switch (index) {
723                 case 0: return 63;
724                 case 1: return 72;
725                 default: return 0;
726                 }
727         } else  {
728                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
729         }
730 }
731
732 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733 {
734         return index ? 0 : 100;
735 }
736
737 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738 {
739         /*
740          * SKL doesn't need us to program the AUX clock divider (Hardware will
741          * derive the clock from CDCLK automatically). We still implement the
742          * get_aux_clock_divider vfunc to plug-in into the existing code.
743          */
744         return index ? 0 : 1;
745 }
746
747 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748                                       bool has_aux_irq,
749                                       int send_bytes,
750                                       uint32_t aux_clock_divider)
751 {
752         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753         struct drm_device *dev = intel_dig_port->base.base.dev;
754         uint32_t precharge, timeout;
755
756         if (IS_GEN6(dev))
757                 precharge = 3;
758         else
759                 precharge = 5;
760
761         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763         else
764                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766         return DP_AUX_CH_CTL_SEND_BUSY |
767                DP_AUX_CH_CTL_DONE |
768                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
769                DP_AUX_CH_CTL_TIME_OUT_ERROR |
770                timeout |
771                DP_AUX_CH_CTL_RECEIVE_ERROR |
772                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
774                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
775 }
776
777 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778                                       bool has_aux_irq,
779                                       int send_bytes,
780                                       uint32_t unused)
781 {
782         return DP_AUX_CH_CTL_SEND_BUSY |
783                DP_AUX_CH_CTL_DONE |
784                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785                DP_AUX_CH_CTL_TIME_OUT_ERROR |
786                DP_AUX_CH_CTL_TIME_OUT_1600us |
787                DP_AUX_CH_CTL_RECEIVE_ERROR |
788                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790 }
791
792 static int
793 intel_dp_aux_ch(struct intel_dp *intel_dp,
794                 const uint8_t *send, int send_bytes,
795                 uint8_t *recv, int recv_size)
796 {
797         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798         struct drm_device *dev = intel_dig_port->base.base.dev;
799         struct drm_i915_private *dev_priv = dev->dev_private;
800         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801         uint32_t ch_data = ch_ctl + 4;
802         uint32_t aux_clock_divider;
803         int i, ret, recv_bytes;
804         uint32_t status;
805         int try, clock = 0;
806         bool has_aux_irq = HAS_AUX_IRQ(dev);
807         bool vdd;
808
809         pps_lock(intel_dp);
810
811         /*
812          * We will be called with VDD already enabled for dpcd/edid/oui reads.
813          * In such cases we want to leave VDD enabled and it's up to upper layers
814          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815          * ourselves.
816          */
817         vdd = edp_panel_vdd_on(intel_dp);
818
819         /* dp aux is extremely sensitive to irq latency, hence request the
820          * lowest possible wakeup latency and so prevent the cpu from going into
821          * deep sleep states.
822          */
823         pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825         intel_dp_check_edp(intel_dp);
826
827         intel_aux_display_runtime_get(dev_priv);
828
829         /* Try to wait for any previous AUX channel activity */
830         for (try = 0; try < 3; try++) {
831                 status = I915_READ_NOTRACE(ch_ctl);
832                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833                         break;
834                 msleep(1);
835         }
836
837         if (try == 3) {
838                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839                      I915_READ(ch_ctl));
840                 ret = -EBUSY;
841                 goto out;
842         }
843
844         /* Only 5 data registers! */
845         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846                 ret = -E2BIG;
847                 goto out;
848         }
849
850         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
851                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852                                                           has_aux_irq,
853                                                           send_bytes,
854                                                           aux_clock_divider);
855
856                 /* Must try at least 3 times according to DP spec */
857                 for (try = 0; try < 5; try++) {
858                         /* Load the send data into the aux channel data registers */
859                         for (i = 0; i < send_bytes; i += 4)
860                                 I915_WRITE(ch_data + i,
861                                            intel_dp_pack_aux(send + i,
862                                                              send_bytes - i));
863
864                         /* Send the command and wait for it to complete */
865                         I915_WRITE(ch_ctl, send_ctl);
866
867                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869                         /* Clear done status and any errors */
870                         I915_WRITE(ch_ctl,
871                                    status |
872                                    DP_AUX_CH_CTL_DONE |
873                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
874                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
878                                 continue;
879                         if (status & DP_AUX_CH_CTL_DONE)
880                                 break;
881                 }
882                 if (status & DP_AUX_CH_CTL_DONE)
883                         break;
884         }
885
886         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
887                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
888                 ret = -EBUSY;
889                 goto out;
890         }
891
892         /* Check for timeout or receive error.
893          * Timeouts occur when the sink is not connected
894          */
895         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
896                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
897                 ret = -EIO;
898                 goto out;
899         }
900
901         /* Timeouts occur when the device isn't connected, so they're
902          * "normal" -- don't fill the kernel log with these */
903         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
904                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
905                 ret = -ETIMEDOUT;
906                 goto out;
907         }
908
909         /* Unload any bytes sent back from the other side */
910         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
912         if (recv_bytes > recv_size)
913                 recv_bytes = recv_size;
914
915         for (i = 0; i < recv_bytes; i += 4)
916                 intel_dp_unpack_aux(I915_READ(ch_data + i),
917                                     recv + i, recv_bytes - i);
918
919         ret = recv_bytes;
920 out:
921         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
922         intel_aux_display_runtime_put(dev_priv);
923
924         if (vdd)
925                 edp_panel_vdd_off(intel_dp, false);
926
927         pps_unlock(intel_dp);
928
929         return ret;
930 }
931
932 #define BARE_ADDRESS_SIZE       3
933 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
934 static ssize_t
935 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
936 {
937         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938         uint8_t txbuf[20], rxbuf[20];
939         size_t txsize, rxsize;
940         int ret;
941
942         txbuf[0] = msg->request << 4;
943         txbuf[1] = msg->address >> 8;
944         txbuf[2] = msg->address & 0xff;
945         txbuf[3] = msg->size - 1;
946
947         switch (msg->request & ~DP_AUX_I2C_MOT) {
948         case DP_AUX_NATIVE_WRITE:
949         case DP_AUX_I2C_WRITE:
950                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
951                 rxsize = 1;
952
953                 if (WARN_ON(txsize > 20))
954                         return -E2BIG;
955
956                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
957
958                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959                 if (ret > 0) {
960                         msg->reply = rxbuf[0] >> 4;
961
962                         /* Return payload size. */
963                         ret = msg->size;
964                 }
965                 break;
966
967         case DP_AUX_NATIVE_READ:
968         case DP_AUX_I2C_READ:
969                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
970                 rxsize = msg->size + 1;
971
972                 if (WARN_ON(rxsize > 20))
973                         return -E2BIG;
974
975                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976                 if (ret > 0) {
977                         msg->reply = rxbuf[0] >> 4;
978                         /*
979                          * Assume happy day, and copy the data. The caller is
980                          * expected to check msg->reply before touching it.
981                          *
982                          * Return payload size.
983                          */
984                         ret--;
985                         memcpy(msg->buffer, rxbuf + 1, ret);
986                 }
987                 break;
988
989         default:
990                 ret = -EINVAL;
991                 break;
992         }
993
994         return ret;
995 }
996
997 static void
998 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999 {
1000         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1001         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002         enum port port = intel_dig_port->port;
1003         const char *name = NULL;
1004         int ret;
1005
1006         switch (port) {
1007         case PORT_A:
1008                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1009                 name = "DPDDC-A";
1010                 break;
1011         case PORT_B:
1012                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1013                 name = "DPDDC-B";
1014                 break;
1015         case PORT_C:
1016                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1017                 name = "DPDDC-C";
1018                 break;
1019         case PORT_D:
1020                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1021                 name = "DPDDC-D";
1022                 break;
1023         default:
1024                 BUG();
1025         }
1026
1027         /*
1028          * The AUX_CTL register is usually DP_CTL + 0x10.
1029          *
1030          * On Haswell and Broadwell though:
1031          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033          *
1034          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035          */
1036         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1037                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1038
1039         intel_dp->aux.name = name;
1040         intel_dp->aux.dev = dev->dev;
1041         intel_dp->aux.transfer = intel_dp_aux_transfer;
1042
1043         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044                       connector->base.kdev->kobj.name);
1045
1046         ret = drm_dp_aux_register(&intel_dp->aux);
1047         if (ret < 0) {
1048                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1049                           name, ret);
1050                 return;
1051         }
1052
1053         ret = sysfs_create_link(&connector->base.kdev->kobj,
1054                                 &intel_dp->aux.ddc.dev.kobj,
1055                                 intel_dp->aux.ddc.dev.kobj.name);
1056         if (ret < 0) {
1057                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1058                 drm_dp_aux_unregister(&intel_dp->aux);
1059         }
1060 }
1061
1062 static void
1063 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064 {
1065         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
1067         if (!intel_connector->mst_port)
1068                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069                                   intel_dp->aux.ddc.dev.kobj.name);
1070         intel_connector_unregister(intel_connector);
1071 }
1072
1073 static void
1074 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1075 {
1076         u32 ctrl1;
1077
1078         pipe_config->ddi_pll_sel = SKL_DPLL0;
1079         pipe_config->dpll_hw_state.cfgcr1 = 0;
1080         pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1083         switch (link_clock / 2) {
1084         case 81000:
1085                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086                                               SKL_DPLL0);
1087                 break;
1088         case 135000:
1089                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090                                               SKL_DPLL0);
1091                 break;
1092         case 270000:
1093                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094                                               SKL_DPLL0);
1095                 break;
1096         case 162000:
1097                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098                                               SKL_DPLL0);
1099                 break;
1100         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101         results in CDCLK change. Need to handle the change of CDCLK by
1102         disabling pipes and re-enabling them */
1103         case 108000:
1104                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105                                               SKL_DPLL0);
1106                 break;
1107         case 216000:
1108                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109                                               SKL_DPLL0);
1110                 break;
1111
1112         }
1113         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114 }
1115
1116 static void
1117 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1118 {
1119         switch (link_bw) {
1120         case DP_LINK_BW_1_62:
1121                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122                 break;
1123         case DP_LINK_BW_2_7:
1124                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125                 break;
1126         case DP_LINK_BW_5_4:
1127                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128                 break;
1129         }
1130 }
1131
1132 static int
1133 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1134 {
1135         if (intel_dp->num_supported_rates) {
1136                 *sink_rates = intel_dp->supported_rates;
1137                 return intel_dp->num_supported_rates;
1138         }
1139
1140         *sink_rates = default_rates;
1141
1142         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1143 }
1144
1145 static int
1146 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1147 {
1148         if (INTEL_INFO(dev)->gen >= 9) {
1149                 *source_rates = gen9_rates;
1150                 return ARRAY_SIZE(gen9_rates);
1151         }
1152
1153         *source_rates = default_rates;
1154
1155         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156                 /* WaDisableHBR2:skl */
1157                 return (DP_LINK_BW_2_7 >> 3) + 1;
1158         else if (INTEL_INFO(dev)->gen >= 8 ||
1159             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160                 return (DP_LINK_BW_5_4 >> 3) + 1;
1161         else
1162                 return (DP_LINK_BW_2_7 >> 3) + 1;
1163 }
1164
1165 static void
1166 intel_dp_set_clock(struct intel_encoder *encoder,
1167                    struct intel_crtc_state *pipe_config, int link_bw)
1168 {
1169         struct drm_device *dev = encoder->base.dev;
1170         const struct dp_link_dpll *divisor = NULL;
1171         int i, count = 0;
1172
1173         if (IS_G4X(dev)) {
1174                 divisor = gen4_dpll;
1175                 count = ARRAY_SIZE(gen4_dpll);
1176         } else if (HAS_PCH_SPLIT(dev)) {
1177                 divisor = pch_dpll;
1178                 count = ARRAY_SIZE(pch_dpll);
1179         } else if (IS_CHERRYVIEW(dev)) {
1180                 divisor = chv_dpll;
1181                 count = ARRAY_SIZE(chv_dpll);
1182         } else if (IS_VALLEYVIEW(dev)) {
1183                 divisor = vlv_dpll;
1184                 count = ARRAY_SIZE(vlv_dpll);
1185         }
1186
1187         if (divisor && count) {
1188                 for (i = 0; i < count; i++) {
1189                         if (link_bw == divisor[i].link_bw) {
1190                                 pipe_config->dpll = divisor[i].dpll;
1191                                 pipe_config->clock_set = true;
1192                                 break;
1193                         }
1194                 }
1195         }
1196 }
1197
1198 static int intersect_rates(const int *source_rates, int source_len,
1199                            const int *sink_rates, int sink_len,
1200                            int *supported_rates)
1201 {
1202         int i = 0, j = 0, k = 0;
1203
1204         while (i < source_len && j < sink_len) {
1205                 if (source_rates[i] == sink_rates[j]) {
1206                         supported_rates[k] = source_rates[i];
1207                         ++k;
1208                         ++i;
1209                         ++j;
1210                 } else if (source_rates[i] < sink_rates[j]) {
1211                         ++i;
1212                 } else {
1213                         ++j;
1214                 }
1215         }
1216         return k;
1217 }
1218
1219 static int intel_supported_rates(struct intel_dp *intel_dp,
1220                                  int *supported_rates)
1221 {
1222         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1223         const int *source_rates, *sink_rates;
1224         int source_len, sink_len;
1225
1226         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1227         source_len = intel_dp_source_rates(dev, &source_rates);
1228
1229         return intersect_rates(source_rates, source_len,
1230                                sink_rates, sink_len,
1231                                supported_rates);
1232 }
1233
1234 static int rate_to_index(int find, const int *rates)
1235 {
1236         int i = 0;
1237
1238         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1239                 if (find == rates[i])
1240                         break;
1241
1242         return i;
1243 }
1244
1245 bool
1246 intel_dp_compute_config(struct intel_encoder *encoder,
1247                         struct intel_crtc_state *pipe_config)
1248 {
1249         struct drm_device *dev = encoder->base.dev;
1250         struct drm_i915_private *dev_priv = dev->dev_private;
1251         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1252         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1253         enum port port = dp_to_dig_port(intel_dp)->port;
1254         struct intel_crtc *intel_crtc = encoder->new_crtc;
1255         struct intel_connector *intel_connector = intel_dp->attached_connector;
1256         int lane_count, clock;
1257         int min_lane_count = 1;
1258         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1259         /* Conveniently, the link BW constants become indices with a shift...*/
1260         int min_clock = 0;
1261         int max_clock;
1262         int bpp, mode_rate;
1263         int link_avail, link_clock;
1264         int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1265         int supported_len;
1266
1267         supported_len = intel_supported_rates(intel_dp, supported_rates);
1268
1269         /* No common link rates between source and sink */
1270         WARN_ON(supported_len <= 0);
1271
1272         max_clock = supported_len - 1;
1273
1274         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1275                 pipe_config->has_pch_encoder = true;
1276
1277         pipe_config->has_dp_encoder = true;
1278         pipe_config->has_drrs = false;
1279         pipe_config->has_audio = intel_dp->has_audio;
1280
1281         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1282                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1283                                        adjusted_mode);
1284                 if (!HAS_PCH_SPLIT(dev))
1285                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1286                                                  intel_connector->panel.fitting_mode);
1287                 else
1288                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1289                                                 intel_connector->panel.fitting_mode);
1290         }
1291
1292         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1293                 return false;
1294
1295         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1296                       "max bw %d pixel clock %iKHz\n",
1297                       max_lane_count, supported_rates[max_clock],
1298                       adjusted_mode->crtc_clock);
1299
1300         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1301          * bpc in between. */
1302         bpp = pipe_config->pipe_bpp;
1303         if (is_edp(intel_dp)) {
1304                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1305                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1306                                       dev_priv->vbt.edp_bpp);
1307                         bpp = dev_priv->vbt.edp_bpp;
1308                 }
1309
1310                 /*
1311                  * Use the maximum clock and number of lanes the eDP panel
1312                  * advertizes being capable of. The panels are generally
1313                  * designed to support only a single clock and lane
1314                  * configuration, and typically these values correspond to the
1315                  * native resolution of the panel.
1316                  */
1317                 min_lane_count = max_lane_count;
1318                 min_clock = max_clock;
1319         }
1320
1321         for (; bpp >= 6*3; bpp -= 2*3) {
1322                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1323                                                    bpp);
1324
1325                 for (clock = min_clock; clock <= max_clock; clock++) {
1326                         for (lane_count = min_lane_count;
1327                                 lane_count <= max_lane_count;
1328                                 lane_count <<= 1) {
1329
1330                                 link_clock = supported_rates[clock];
1331                                 link_avail = intel_dp_max_data_rate(link_clock,
1332                                                                     lane_count);
1333
1334                                 if (mode_rate <= link_avail) {
1335                                         goto found;
1336                                 }
1337                         }
1338                 }
1339         }
1340
1341         return false;
1342
1343 found:
1344         if (intel_dp->color_range_auto) {
1345                 /*
1346                  * See:
1347                  * CEA-861-E - 5.1 Default Encoding Parameters
1348                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1349                  */
1350                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1351                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1352                 else
1353                         intel_dp->color_range = 0;
1354         }
1355
1356         if (intel_dp->color_range)
1357                 pipe_config->limited_color_range = true;
1358
1359         intel_dp->lane_count = lane_count;
1360
1361         intel_dp->link_bw =
1362                 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1363
1364         if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1365                 intel_dp->rate_select =
1366                         rate_to_index(supported_rates[clock],
1367                                       intel_dp->supported_rates);
1368                 intel_dp->link_bw = 0;
1369         }
1370
1371         pipe_config->pipe_bpp = bpp;
1372         pipe_config->port_clock = supported_rates[clock];
1373
1374         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1375                       intel_dp->link_bw, intel_dp->lane_count,
1376                       pipe_config->port_clock, bpp);
1377         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1378                       mode_rate, link_avail);
1379
1380         intel_link_compute_m_n(bpp, lane_count,
1381                                adjusted_mode->crtc_clock,
1382                                pipe_config->port_clock,
1383                                &pipe_config->dp_m_n);
1384
1385         if (intel_connector->panel.downclock_mode != NULL &&
1386                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1387                         pipe_config->has_drrs = true;
1388                         intel_link_compute_m_n(bpp, lane_count,
1389                                 intel_connector->panel.downclock_mode->clock,
1390                                 pipe_config->port_clock,
1391                                 &pipe_config->dp_m2_n2);
1392         }
1393
1394         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1395                 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
1396         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1397                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1398         else
1399                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1400
1401         return true;
1402 }
1403
1404 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1405 {
1406         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1407         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1408         struct drm_device *dev = crtc->base.dev;
1409         struct drm_i915_private *dev_priv = dev->dev_private;
1410         u32 dpa_ctl;
1411
1412         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1413                       crtc->config->port_clock);
1414         dpa_ctl = I915_READ(DP_A);
1415         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1416
1417         if (crtc->config->port_clock == 162000) {
1418                 /* For a long time we've carried around a ILK-DevA w/a for the
1419                  * 160MHz clock. If we're really unlucky, it's still required.
1420                  */
1421                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1422                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1423                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1424         } else {
1425                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1426                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1427         }
1428
1429         I915_WRITE(DP_A, dpa_ctl);
1430
1431         POSTING_READ(DP_A);
1432         udelay(500);
1433 }
1434
1435 static void intel_dp_prepare(struct intel_encoder *encoder)
1436 {
1437         struct drm_device *dev = encoder->base.dev;
1438         struct drm_i915_private *dev_priv = dev->dev_private;
1439         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1440         enum port port = dp_to_dig_port(intel_dp)->port;
1441         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1442         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1443
1444         /*
1445          * There are four kinds of DP registers:
1446          *
1447          *      IBX PCH
1448          *      SNB CPU
1449          *      IVB CPU
1450          *      CPT PCH
1451          *
1452          * IBX PCH and CPU are the same for almost everything,
1453          * except that the CPU DP PLL is configured in this
1454          * register
1455          *
1456          * CPT PCH is quite different, having many bits moved
1457          * to the TRANS_DP_CTL register instead. That
1458          * configuration happens (oddly) in ironlake_pch_enable
1459          */
1460
1461         /* Preserve the BIOS-computed detected bit. This is
1462          * supposed to be read-only.
1463          */
1464         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1465
1466         /* Handle DP bits in common between all three register formats */
1467         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1468         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1469
1470         if (crtc->config->has_audio)
1471                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1472
1473         /* Split out the IBX/CPU vs CPT settings */
1474
1475         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1476                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1477                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1478                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1479                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1480                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1481
1482                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1483                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1484
1485                 intel_dp->DP |= crtc->pipe << 29;
1486         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1487                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1488                         intel_dp->DP |= intel_dp->color_range;
1489
1490                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1491                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1492                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1493                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1494                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1495
1496                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1497                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1498
1499                 if (!IS_CHERRYVIEW(dev)) {
1500                         if (crtc->pipe == 1)
1501                                 intel_dp->DP |= DP_PIPEB_SELECT;
1502                 } else {
1503                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1504                 }
1505         } else {
1506                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1507         }
1508 }
1509
1510 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1511 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1512
1513 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1514 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1515
1516 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1517 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1518
1519 static void wait_panel_status(struct intel_dp *intel_dp,
1520                                        u32 mask,
1521                                        u32 value)
1522 {
1523         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1524         struct drm_i915_private *dev_priv = dev->dev_private;
1525         u32 pp_stat_reg, pp_ctrl_reg;
1526
1527         lockdep_assert_held(&dev_priv->pps_mutex);
1528
1529         pp_stat_reg = _pp_stat_reg(intel_dp);
1530         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1531
1532         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1533                         mask, value,
1534                         I915_READ(pp_stat_reg),
1535                         I915_READ(pp_ctrl_reg));
1536
1537         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1538                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1539                                 I915_READ(pp_stat_reg),
1540                                 I915_READ(pp_ctrl_reg));
1541         }
1542
1543         DRM_DEBUG_KMS("Wait complete\n");
1544 }
1545
1546 static void wait_panel_on(struct intel_dp *intel_dp)
1547 {
1548         DRM_DEBUG_KMS("Wait for panel power on\n");
1549         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1550 }
1551
1552 static void wait_panel_off(struct intel_dp *intel_dp)
1553 {
1554         DRM_DEBUG_KMS("Wait for panel power off time\n");
1555         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1556 }
1557
1558 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1559 {
1560         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1561
1562         /* When we disable the VDD override bit last we have to do the manual
1563          * wait. */
1564         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1565                                        intel_dp->panel_power_cycle_delay);
1566
1567         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1568 }
1569
1570 static void wait_backlight_on(struct intel_dp *intel_dp)
1571 {
1572         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1573                                        intel_dp->backlight_on_delay);
1574 }
1575
1576 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1577 {
1578         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1579                                        intel_dp->backlight_off_delay);
1580 }
1581
1582 /* Read the current pp_control value, unlocking the register if it
1583  * is locked
1584  */
1585
1586 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1587 {
1588         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1589         struct drm_i915_private *dev_priv = dev->dev_private;
1590         u32 control;
1591
1592         lockdep_assert_held(&dev_priv->pps_mutex);
1593
1594         control = I915_READ(_pp_ctrl_reg(intel_dp));
1595         control &= ~PANEL_UNLOCK_MASK;
1596         control |= PANEL_UNLOCK_REGS;
1597         return control;
1598 }
1599
1600 /*
1601  * Must be paired with edp_panel_vdd_off().
1602  * Must hold pps_mutex around the whole on/off sequence.
1603  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1604  */
1605 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1606 {
1607         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1608         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1609         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1610         struct drm_i915_private *dev_priv = dev->dev_private;
1611         enum intel_display_power_domain power_domain;
1612         u32 pp;
1613         u32 pp_stat_reg, pp_ctrl_reg;
1614         bool need_to_disable = !intel_dp->want_panel_vdd;
1615
1616         lockdep_assert_held(&dev_priv->pps_mutex);
1617
1618         if (!is_edp(intel_dp))
1619                 return false;
1620
1621         cancel_delayed_work(&intel_dp->panel_vdd_work);
1622         intel_dp->want_panel_vdd = true;
1623
1624         if (edp_have_panel_vdd(intel_dp))
1625                 return need_to_disable;
1626
1627         power_domain = intel_display_port_power_domain(intel_encoder);
1628         intel_display_power_get(dev_priv, power_domain);
1629
1630         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1631                       port_name(intel_dig_port->port));
1632
1633         if (!edp_have_panel_power(intel_dp))
1634                 wait_panel_power_cycle(intel_dp);
1635
1636         pp = ironlake_get_pp_control(intel_dp);
1637         pp |= EDP_FORCE_VDD;
1638
1639         pp_stat_reg = _pp_stat_reg(intel_dp);
1640         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1641
1642         I915_WRITE(pp_ctrl_reg, pp);
1643         POSTING_READ(pp_ctrl_reg);
1644         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1645                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1646         /*
1647          * If the panel wasn't on, delay before accessing aux channel
1648          */
1649         if (!edp_have_panel_power(intel_dp)) {
1650                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1651                               port_name(intel_dig_port->port));
1652                 msleep(intel_dp->panel_power_up_delay);
1653         }
1654
1655         return need_to_disable;
1656 }
1657
1658 /*
1659  * Must be paired with intel_edp_panel_vdd_off() or
1660  * intel_edp_panel_off().
1661  * Nested calls to these functions are not allowed since
1662  * we drop the lock. Caller must use some higher level
1663  * locking to prevent nested calls from other threads.
1664  */
1665 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1666 {
1667         bool vdd;
1668
1669         if (!is_edp(intel_dp))
1670                 return;
1671
1672         pps_lock(intel_dp);
1673         vdd = edp_panel_vdd_on(intel_dp);
1674         pps_unlock(intel_dp);
1675
1676         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1677              port_name(dp_to_dig_port(intel_dp)->port));
1678 }
1679
1680 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1681 {
1682         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1683         struct drm_i915_private *dev_priv = dev->dev_private;
1684         struct intel_digital_port *intel_dig_port =
1685                 dp_to_dig_port(intel_dp);
1686         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1687         enum intel_display_power_domain power_domain;
1688         u32 pp;
1689         u32 pp_stat_reg, pp_ctrl_reg;
1690
1691         lockdep_assert_held(&dev_priv->pps_mutex);
1692
1693         WARN_ON(intel_dp->want_panel_vdd);
1694
1695         if (!edp_have_panel_vdd(intel_dp))
1696                 return;
1697
1698         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1699                       port_name(intel_dig_port->port));
1700
1701         pp = ironlake_get_pp_control(intel_dp);
1702         pp &= ~EDP_FORCE_VDD;
1703
1704         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1705         pp_stat_reg = _pp_stat_reg(intel_dp);
1706
1707         I915_WRITE(pp_ctrl_reg, pp);
1708         POSTING_READ(pp_ctrl_reg);
1709
1710         /* Make sure sequencer is idle before allowing subsequent activity */
1711         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1712         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1713
1714         if ((pp & POWER_TARGET_ON) == 0)
1715                 intel_dp->last_power_cycle = jiffies;
1716
1717         power_domain = intel_display_port_power_domain(intel_encoder);
1718         intel_display_power_put(dev_priv, power_domain);
1719 }
1720
1721 static void edp_panel_vdd_work(struct work_struct *__work)
1722 {
1723         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1724                                                  struct intel_dp, panel_vdd_work);
1725
1726         pps_lock(intel_dp);
1727         if (!intel_dp->want_panel_vdd)
1728                 edp_panel_vdd_off_sync(intel_dp);
1729         pps_unlock(intel_dp);
1730 }
1731
1732 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1733 {
1734         unsigned long delay;
1735
1736         /*
1737          * Queue the timer to fire a long time from now (relative to the power
1738          * down delay) to keep the panel power up across a sequence of
1739          * operations.
1740          */
1741         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1742         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1743 }
1744
1745 /*
1746  * Must be paired with edp_panel_vdd_on().
1747  * Must hold pps_mutex around the whole on/off sequence.
1748  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1749  */
1750 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1751 {
1752         struct drm_i915_private *dev_priv =
1753                 intel_dp_to_dev(intel_dp)->dev_private;
1754
1755         lockdep_assert_held(&dev_priv->pps_mutex);
1756
1757         if (!is_edp(intel_dp))
1758                 return;
1759
1760         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1761              port_name(dp_to_dig_port(intel_dp)->port));
1762
1763         intel_dp->want_panel_vdd = false;
1764
1765         if (sync)
1766                 edp_panel_vdd_off_sync(intel_dp);
1767         else
1768                 edp_panel_vdd_schedule_off(intel_dp);
1769 }
1770
1771 static void edp_panel_on(struct intel_dp *intel_dp)
1772 {
1773         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1774         struct drm_i915_private *dev_priv = dev->dev_private;
1775         u32 pp;
1776         u32 pp_ctrl_reg;
1777
1778         lockdep_assert_held(&dev_priv->pps_mutex);
1779
1780         if (!is_edp(intel_dp))
1781                 return;
1782
1783         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1784                       port_name(dp_to_dig_port(intel_dp)->port));
1785
1786         if (WARN(edp_have_panel_power(intel_dp),
1787                  "eDP port %c panel power already on\n",
1788                  port_name(dp_to_dig_port(intel_dp)->port)))
1789                 return;
1790
1791         wait_panel_power_cycle(intel_dp);
1792
1793         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1794         pp = ironlake_get_pp_control(intel_dp);
1795         if (IS_GEN5(dev)) {
1796                 /* ILK workaround: disable reset around power sequence */
1797                 pp &= ~PANEL_POWER_RESET;
1798                 I915_WRITE(pp_ctrl_reg, pp);
1799                 POSTING_READ(pp_ctrl_reg);
1800         }
1801
1802         pp |= POWER_TARGET_ON;
1803         if (!IS_GEN5(dev))
1804                 pp |= PANEL_POWER_RESET;
1805
1806         I915_WRITE(pp_ctrl_reg, pp);
1807         POSTING_READ(pp_ctrl_reg);
1808
1809         wait_panel_on(intel_dp);
1810         intel_dp->last_power_on = jiffies;
1811
1812         if (IS_GEN5(dev)) {
1813                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1814                 I915_WRITE(pp_ctrl_reg, pp);
1815                 POSTING_READ(pp_ctrl_reg);
1816         }
1817 }
1818
1819 void intel_edp_panel_on(struct intel_dp *intel_dp)
1820 {
1821         if (!is_edp(intel_dp))
1822                 return;
1823
1824         pps_lock(intel_dp);
1825         edp_panel_on(intel_dp);
1826         pps_unlock(intel_dp);
1827 }
1828
1829
1830 static void edp_panel_off(struct intel_dp *intel_dp)
1831 {
1832         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1833         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1834         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1835         struct drm_i915_private *dev_priv = dev->dev_private;
1836         enum intel_display_power_domain power_domain;
1837         u32 pp;
1838         u32 pp_ctrl_reg;
1839
1840         lockdep_assert_held(&dev_priv->pps_mutex);
1841
1842         if (!is_edp(intel_dp))
1843                 return;
1844
1845         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1846                       port_name(dp_to_dig_port(intel_dp)->port));
1847
1848         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1849              port_name(dp_to_dig_port(intel_dp)->port));
1850
1851         pp = ironlake_get_pp_control(intel_dp);
1852         /* We need to switch off panel power _and_ force vdd, for otherwise some
1853          * panels get very unhappy and cease to work. */
1854         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1855                 EDP_BLC_ENABLE);
1856
1857         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1858
1859         intel_dp->want_panel_vdd = false;
1860
1861         I915_WRITE(pp_ctrl_reg, pp);
1862         POSTING_READ(pp_ctrl_reg);
1863
1864         intel_dp->last_power_cycle = jiffies;
1865         wait_panel_off(intel_dp);
1866
1867         /* We got a reference when we enabled the VDD. */
1868         power_domain = intel_display_port_power_domain(intel_encoder);
1869         intel_display_power_put(dev_priv, power_domain);
1870 }
1871
1872 void intel_edp_panel_off(struct intel_dp *intel_dp)
1873 {
1874         if (!is_edp(intel_dp))
1875                 return;
1876
1877         pps_lock(intel_dp);
1878         edp_panel_off(intel_dp);
1879         pps_unlock(intel_dp);
1880 }
1881
1882 /* Enable backlight in the panel power control. */
1883 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1884 {
1885         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1886         struct drm_device *dev = intel_dig_port->base.base.dev;
1887         struct drm_i915_private *dev_priv = dev->dev_private;
1888         u32 pp;
1889         u32 pp_ctrl_reg;
1890
1891         /*
1892          * If we enable the backlight right away following a panel power
1893          * on, we may see slight flicker as the panel syncs with the eDP
1894          * link.  So delay a bit to make sure the image is solid before
1895          * allowing it to appear.
1896          */
1897         wait_backlight_on(intel_dp);
1898
1899         pps_lock(intel_dp);
1900
1901         pp = ironlake_get_pp_control(intel_dp);
1902         pp |= EDP_BLC_ENABLE;
1903
1904         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1905
1906         I915_WRITE(pp_ctrl_reg, pp);
1907         POSTING_READ(pp_ctrl_reg);
1908
1909         pps_unlock(intel_dp);
1910 }
1911
1912 /* Enable backlight PWM and backlight PP control. */
1913 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1914 {
1915         if (!is_edp(intel_dp))
1916                 return;
1917
1918         DRM_DEBUG_KMS("\n");
1919
1920         intel_panel_enable_backlight(intel_dp->attached_connector);
1921         _intel_edp_backlight_on(intel_dp);
1922 }
1923
1924 /* Disable backlight in the panel power control. */
1925 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1926 {
1927         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1928         struct drm_i915_private *dev_priv = dev->dev_private;
1929         u32 pp;
1930         u32 pp_ctrl_reg;
1931
1932         if (!is_edp(intel_dp))
1933                 return;
1934
1935         pps_lock(intel_dp);
1936
1937         pp = ironlake_get_pp_control(intel_dp);
1938         pp &= ~EDP_BLC_ENABLE;
1939
1940         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1941
1942         I915_WRITE(pp_ctrl_reg, pp);
1943         POSTING_READ(pp_ctrl_reg);
1944
1945         pps_unlock(intel_dp);
1946
1947         intel_dp->last_backlight_off = jiffies;
1948         edp_wait_backlight_off(intel_dp);
1949 }
1950
1951 /* Disable backlight PP control and backlight PWM. */
1952 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1953 {
1954         if (!is_edp(intel_dp))
1955                 return;
1956
1957         DRM_DEBUG_KMS("\n");
1958
1959         _intel_edp_backlight_off(intel_dp);
1960         intel_panel_disable_backlight(intel_dp->attached_connector);
1961 }
1962
1963 /*
1964  * Hook for controlling the panel power control backlight through the bl_power
1965  * sysfs attribute. Take care to handle multiple calls.
1966  */
1967 static void intel_edp_backlight_power(struct intel_connector *connector,
1968                                       bool enable)
1969 {
1970         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1971         bool is_enabled;
1972
1973         pps_lock(intel_dp);
1974         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1975         pps_unlock(intel_dp);
1976
1977         if (is_enabled == enable)
1978                 return;
1979
1980         DRM_DEBUG_KMS("panel power control backlight %s\n",
1981                       enable ? "enable" : "disable");
1982
1983         if (enable)
1984                 _intel_edp_backlight_on(intel_dp);
1985         else
1986                 _intel_edp_backlight_off(intel_dp);
1987 }
1988
1989 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1990 {
1991         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1993         struct drm_device *dev = crtc->dev;
1994         struct drm_i915_private *dev_priv = dev->dev_private;
1995         u32 dpa_ctl;
1996
1997         assert_pipe_disabled(dev_priv,
1998                              to_intel_crtc(crtc)->pipe);
1999
2000         DRM_DEBUG_KMS("\n");
2001         dpa_ctl = I915_READ(DP_A);
2002         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2003         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2004
2005         /* We don't adjust intel_dp->DP while tearing down the link, to
2006          * facilitate link retraining (e.g. after hotplug). Hence clear all
2007          * enable bits here to ensure that we don't enable too much. */
2008         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2009         intel_dp->DP |= DP_PLL_ENABLE;
2010         I915_WRITE(DP_A, intel_dp->DP);
2011         POSTING_READ(DP_A);
2012         udelay(200);
2013 }
2014
2015 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2016 {
2017         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2018         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2019         struct drm_device *dev = crtc->dev;
2020         struct drm_i915_private *dev_priv = dev->dev_private;
2021         u32 dpa_ctl;
2022
2023         assert_pipe_disabled(dev_priv,
2024                              to_intel_crtc(crtc)->pipe);
2025
2026         dpa_ctl = I915_READ(DP_A);
2027         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2028              "dp pll off, should be on\n");
2029         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2030
2031         /* We can't rely on the value tracked for the DP register in
2032          * intel_dp->DP because link_down must not change that (otherwise link
2033          * re-training will fail. */
2034         dpa_ctl &= ~DP_PLL_ENABLE;
2035         I915_WRITE(DP_A, dpa_ctl);
2036         POSTING_READ(DP_A);
2037         udelay(200);
2038 }
2039
2040 /* If the sink supports it, try to set the power state appropriately */
2041 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2042 {
2043         int ret, i;
2044
2045         /* Should have a valid DPCD by this point */
2046         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2047                 return;
2048
2049         if (mode != DRM_MODE_DPMS_ON) {
2050                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2051                                          DP_SET_POWER_D3);
2052         } else {
2053                 /*
2054                  * When turning on, we need to retry for 1ms to give the sink
2055                  * time to wake up.
2056                  */
2057                 for (i = 0; i < 3; i++) {
2058                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2059                                                  DP_SET_POWER_D0);
2060                         if (ret == 1)
2061                                 break;
2062                         msleep(1);
2063                 }
2064         }
2065
2066         if (ret != 1)
2067                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2068                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2069 }
2070
2071 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2072                                   enum pipe *pipe)
2073 {
2074         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2075         enum port port = dp_to_dig_port(intel_dp)->port;
2076         struct drm_device *dev = encoder->base.dev;
2077         struct drm_i915_private *dev_priv = dev->dev_private;
2078         enum intel_display_power_domain power_domain;
2079         u32 tmp;
2080
2081         power_domain = intel_display_port_power_domain(encoder);
2082         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2083                 return false;
2084
2085         tmp = I915_READ(intel_dp->output_reg);
2086
2087         if (!(tmp & DP_PORT_EN))
2088                 return false;
2089
2090         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2091                 *pipe = PORT_TO_PIPE_CPT(tmp);
2092         } else if (IS_CHERRYVIEW(dev)) {
2093                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2094         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2095                 *pipe = PORT_TO_PIPE(tmp);
2096         } else {
2097                 u32 trans_sel;
2098                 u32 trans_dp;
2099                 int i;
2100
2101                 switch (intel_dp->output_reg) {
2102                 case PCH_DP_B:
2103                         trans_sel = TRANS_DP_PORT_SEL_B;
2104                         break;
2105                 case PCH_DP_C:
2106                         trans_sel = TRANS_DP_PORT_SEL_C;
2107                         break;
2108                 case PCH_DP_D:
2109                         trans_sel = TRANS_DP_PORT_SEL_D;
2110                         break;
2111                 default:
2112                         return true;
2113                 }
2114
2115                 for_each_pipe(dev_priv, i) {
2116                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2117                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2118                                 *pipe = i;
2119                                 return true;
2120                         }
2121                 }
2122
2123                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2124                               intel_dp->output_reg);
2125         }
2126
2127         return true;
2128 }
2129
2130 static void intel_dp_get_config(struct intel_encoder *encoder,
2131                                 struct intel_crtc_state *pipe_config)
2132 {
2133         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2134         u32 tmp, flags = 0;
2135         struct drm_device *dev = encoder->base.dev;
2136         struct drm_i915_private *dev_priv = dev->dev_private;
2137         enum port port = dp_to_dig_port(intel_dp)->port;
2138         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2139         int dotclock;
2140
2141         tmp = I915_READ(intel_dp->output_reg);
2142         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2143                 pipe_config->has_audio = true;
2144
2145         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2146                 if (tmp & DP_SYNC_HS_HIGH)
2147                         flags |= DRM_MODE_FLAG_PHSYNC;
2148                 else
2149                         flags |= DRM_MODE_FLAG_NHSYNC;
2150
2151                 if (tmp & DP_SYNC_VS_HIGH)
2152                         flags |= DRM_MODE_FLAG_PVSYNC;
2153                 else
2154                         flags |= DRM_MODE_FLAG_NVSYNC;
2155         } else {
2156                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2157                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2158                         flags |= DRM_MODE_FLAG_PHSYNC;
2159                 else
2160                         flags |= DRM_MODE_FLAG_NHSYNC;
2161
2162                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2163                         flags |= DRM_MODE_FLAG_PVSYNC;
2164                 else
2165                         flags |= DRM_MODE_FLAG_NVSYNC;
2166         }
2167
2168         pipe_config->base.adjusted_mode.flags |= flags;
2169
2170         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2171             tmp & DP_COLOR_RANGE_16_235)
2172                 pipe_config->limited_color_range = true;
2173
2174         pipe_config->has_dp_encoder = true;
2175
2176         intel_dp_get_m_n(crtc, pipe_config);
2177
2178         if (port == PORT_A) {
2179                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2180                         pipe_config->port_clock = 162000;
2181                 else
2182                         pipe_config->port_clock = 270000;
2183         }
2184
2185         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2186                                             &pipe_config->dp_m_n);
2187
2188         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2189                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2190
2191         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2192
2193         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2194             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2195                 /*
2196                  * This is a big fat ugly hack.
2197                  *
2198                  * Some machines in UEFI boot mode provide us a VBT that has 18
2199                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2200                  * unknown we fail to light up. Yet the same BIOS boots up with
2201                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2202                  * max, not what it tells us to use.
2203                  *
2204                  * Note: This will still be broken if the eDP panel is not lit
2205                  * up by the BIOS, and thus we can't get the mode at module
2206                  * load.
2207                  */
2208                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2209                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2210                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2211         }
2212 }
2213
2214 static void intel_disable_dp(struct intel_encoder *encoder)
2215 {
2216         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2217         struct drm_device *dev = encoder->base.dev;
2218         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2219
2220         if (crtc->config->has_audio)
2221                 intel_audio_codec_disable(encoder);
2222
2223         if (HAS_PSR(dev) && !HAS_DDI(dev))
2224                 intel_psr_disable(intel_dp);
2225
2226         /* Make sure the panel is off before trying to change the mode. But also
2227          * ensure that we have vdd while we switch off the panel. */
2228         intel_edp_panel_vdd_on(intel_dp);
2229         intel_edp_backlight_off(intel_dp);
2230         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2231         intel_edp_panel_off(intel_dp);
2232
2233         /* disable the port before the pipe on g4x */
2234         if (INTEL_INFO(dev)->gen < 5)
2235                 intel_dp_link_down(intel_dp);
2236 }
2237
2238 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2239 {
2240         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2241         enum port port = dp_to_dig_port(intel_dp)->port;
2242
2243         intel_dp_link_down(intel_dp);
2244         if (port == PORT_A)
2245                 ironlake_edp_pll_off(intel_dp);
2246 }
2247
2248 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2249 {
2250         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2251
2252         intel_dp_link_down(intel_dp);
2253 }
2254
2255 static void chv_post_disable_dp(struct intel_encoder *encoder)
2256 {
2257         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2258         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2259         struct drm_device *dev = encoder->base.dev;
2260         struct drm_i915_private *dev_priv = dev->dev_private;
2261         struct intel_crtc *intel_crtc =
2262                 to_intel_crtc(encoder->base.crtc);
2263         enum dpio_channel ch = vlv_dport_to_channel(dport);
2264         enum pipe pipe = intel_crtc->pipe;
2265         u32 val;
2266
2267         intel_dp_link_down(intel_dp);
2268
2269         mutex_lock(&dev_priv->dpio_lock);
2270
2271         /* Propagate soft reset to data lane reset */
2272         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2273         val |= CHV_PCS_REQ_SOFTRESET_EN;
2274         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2275
2276         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2277         val |= CHV_PCS_REQ_SOFTRESET_EN;
2278         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2279
2280         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2281         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2282         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2283
2284         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2285         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2286         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2287
2288         mutex_unlock(&dev_priv->dpio_lock);
2289 }
2290
2291 static void
2292 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2293                          uint32_t *DP,
2294                          uint8_t dp_train_pat)
2295 {
2296         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2297         struct drm_device *dev = intel_dig_port->base.base.dev;
2298         struct drm_i915_private *dev_priv = dev->dev_private;
2299         enum port port = intel_dig_port->port;
2300
2301         if (HAS_DDI(dev)) {
2302                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2303
2304                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2305                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2306                 else
2307                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2308
2309                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2310                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2311                 case DP_TRAINING_PATTERN_DISABLE:
2312                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2313
2314                         break;
2315                 case DP_TRAINING_PATTERN_1:
2316                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2317                         break;
2318                 case DP_TRAINING_PATTERN_2:
2319                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2320                         break;
2321                 case DP_TRAINING_PATTERN_3:
2322                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2323                         break;
2324                 }
2325                 I915_WRITE(DP_TP_CTL(port), temp);
2326
2327         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2328                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2329
2330                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2331                 case DP_TRAINING_PATTERN_DISABLE:
2332                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2333                         break;
2334                 case DP_TRAINING_PATTERN_1:
2335                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2336                         break;
2337                 case DP_TRAINING_PATTERN_2:
2338                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2339                         break;
2340                 case DP_TRAINING_PATTERN_3:
2341                         DRM_ERROR("DP training pattern 3 not supported\n");
2342                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2343                         break;
2344                 }
2345
2346         } else {
2347                 if (IS_CHERRYVIEW(dev))
2348                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2349                 else
2350                         *DP &= ~DP_LINK_TRAIN_MASK;
2351
2352                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2353                 case DP_TRAINING_PATTERN_DISABLE:
2354                         *DP |= DP_LINK_TRAIN_OFF;
2355                         break;
2356                 case DP_TRAINING_PATTERN_1:
2357                         *DP |= DP_LINK_TRAIN_PAT_1;
2358                         break;
2359                 case DP_TRAINING_PATTERN_2:
2360                         *DP |= DP_LINK_TRAIN_PAT_2;
2361                         break;
2362                 case DP_TRAINING_PATTERN_3:
2363                         if (IS_CHERRYVIEW(dev)) {
2364                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2365                         } else {
2366                                 DRM_ERROR("DP training pattern 3 not supported\n");
2367                                 *DP |= DP_LINK_TRAIN_PAT_2;
2368                         }
2369                         break;
2370                 }
2371         }
2372 }
2373
2374 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2375 {
2376         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2377         struct drm_i915_private *dev_priv = dev->dev_private;
2378
2379         /* enable with pattern 1 (as per spec) */
2380         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2381                                  DP_TRAINING_PATTERN_1);
2382
2383         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2384         POSTING_READ(intel_dp->output_reg);
2385
2386         /*
2387          * Magic for VLV/CHV. We _must_ first set up the register
2388          * without actually enabling the port, and then do another
2389          * write to enable the port. Otherwise link training will
2390          * fail when the power sequencer is freshly used for this port.
2391          */
2392         intel_dp->DP |= DP_PORT_EN;
2393
2394         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2395         POSTING_READ(intel_dp->output_reg);
2396 }
2397
2398 static void intel_enable_dp(struct intel_encoder *encoder)
2399 {
2400         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2401         struct drm_device *dev = encoder->base.dev;
2402         struct drm_i915_private *dev_priv = dev->dev_private;
2403         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2404         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2405
2406         if (WARN_ON(dp_reg & DP_PORT_EN))
2407                 return;
2408
2409         pps_lock(intel_dp);
2410
2411         if (IS_VALLEYVIEW(dev))
2412                 vlv_init_panel_power_sequencer(intel_dp);
2413
2414         intel_dp_enable_port(intel_dp);
2415
2416         edp_panel_vdd_on(intel_dp);
2417         edp_panel_on(intel_dp);
2418         edp_panel_vdd_off(intel_dp, true);
2419
2420         pps_unlock(intel_dp);
2421
2422         if (IS_VALLEYVIEW(dev))
2423                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2424
2425         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2426         intel_dp_start_link_train(intel_dp);
2427         intel_dp_complete_link_train(intel_dp);
2428         intel_dp_stop_link_train(intel_dp);
2429
2430         if (crtc->config->has_audio) {
2431                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2432                                  pipe_name(crtc->pipe));
2433                 intel_audio_codec_enable(encoder);
2434         }
2435 }
2436
2437 static void g4x_enable_dp(struct intel_encoder *encoder)
2438 {
2439         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2440
2441         intel_enable_dp(encoder);
2442         intel_edp_backlight_on(intel_dp);
2443 }
2444
2445 static void vlv_enable_dp(struct intel_encoder *encoder)
2446 {
2447         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2448
2449         intel_edp_backlight_on(intel_dp);
2450         intel_psr_enable(intel_dp);
2451 }
2452
2453 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2454 {
2455         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2456         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2457
2458         intel_dp_prepare(encoder);
2459
2460         /* Only ilk+ has port A */
2461         if (dport->port == PORT_A) {
2462                 ironlake_set_pll_cpu_edp(intel_dp);
2463                 ironlake_edp_pll_on(intel_dp);
2464         }
2465 }
2466
2467 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2468 {
2469         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2470         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2471         enum pipe pipe = intel_dp->pps_pipe;
2472         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2473
2474         edp_panel_vdd_off_sync(intel_dp);
2475
2476         /*
2477          * VLV seems to get confused when multiple power seqeuencers
2478          * have the same port selected (even if only one has power/vdd
2479          * enabled). The failure manifests as vlv_wait_port_ready() failing
2480          * CHV on the other hand doesn't seem to mind having the same port
2481          * selected in multiple power seqeuencers, but let's clear the
2482          * port select always when logically disconnecting a power sequencer
2483          * from a port.
2484          */
2485         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2486                       pipe_name(pipe), port_name(intel_dig_port->port));
2487         I915_WRITE(pp_on_reg, 0);
2488         POSTING_READ(pp_on_reg);
2489
2490         intel_dp->pps_pipe = INVALID_PIPE;
2491 }
2492
2493 static void vlv_steal_power_sequencer(struct drm_device *dev,
2494                                       enum pipe pipe)
2495 {
2496         struct drm_i915_private *dev_priv = dev->dev_private;
2497         struct intel_encoder *encoder;
2498
2499         lockdep_assert_held(&dev_priv->pps_mutex);
2500
2501         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2502                 return;
2503
2504         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2505                             base.head) {
2506                 struct intel_dp *intel_dp;
2507                 enum port port;
2508
2509                 if (encoder->type != INTEL_OUTPUT_EDP)
2510                         continue;
2511
2512                 intel_dp = enc_to_intel_dp(&encoder->base);
2513                 port = dp_to_dig_port(intel_dp)->port;
2514
2515                 if (intel_dp->pps_pipe != pipe)
2516                         continue;
2517
2518                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2519                               pipe_name(pipe), port_name(port));
2520
2521                 WARN(encoder->connectors_active,
2522                      "stealing pipe %c power sequencer from active eDP port %c\n",
2523                      pipe_name(pipe), port_name(port));
2524
2525                 /* make sure vdd is off before we steal it */
2526                 vlv_detach_power_sequencer(intel_dp);
2527         }
2528 }
2529
2530 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2531 {
2532         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2533         struct intel_encoder *encoder = &intel_dig_port->base;
2534         struct drm_device *dev = encoder->base.dev;
2535         struct drm_i915_private *dev_priv = dev->dev_private;
2536         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2537
2538         lockdep_assert_held(&dev_priv->pps_mutex);
2539
2540         if (!is_edp(intel_dp))
2541                 return;
2542
2543         if (intel_dp->pps_pipe == crtc->pipe)
2544                 return;
2545
2546         /*
2547          * If another power sequencer was being used on this
2548          * port previously make sure to turn off vdd there while
2549          * we still have control of it.
2550          */
2551         if (intel_dp->pps_pipe != INVALID_PIPE)
2552                 vlv_detach_power_sequencer(intel_dp);
2553
2554         /*
2555          * We may be stealing the power
2556          * sequencer from another port.
2557          */
2558         vlv_steal_power_sequencer(dev, crtc->pipe);
2559
2560         /* now it's all ours */
2561         intel_dp->pps_pipe = crtc->pipe;
2562
2563         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2564                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2565
2566         /* init power sequencer on this pipe and port */
2567         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2568         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2569 }
2570
2571 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2572 {
2573         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2575         struct drm_device *dev = encoder->base.dev;
2576         struct drm_i915_private *dev_priv = dev->dev_private;
2577         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2578         enum dpio_channel port = vlv_dport_to_channel(dport);
2579         int pipe = intel_crtc->pipe;
2580         u32 val;
2581
2582         mutex_lock(&dev_priv->dpio_lock);
2583
2584         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2585         val = 0;
2586         if (pipe)
2587                 val |= (1<<21);
2588         else
2589                 val &= ~(1<<21);
2590         val |= 0x001000c4;
2591         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2592         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2593         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2594
2595         mutex_unlock(&dev_priv->dpio_lock);
2596
2597         intel_enable_dp(encoder);
2598 }
2599
2600 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2601 {
2602         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2603         struct drm_device *dev = encoder->base.dev;
2604         struct drm_i915_private *dev_priv = dev->dev_private;
2605         struct intel_crtc *intel_crtc =
2606                 to_intel_crtc(encoder->base.crtc);
2607         enum dpio_channel port = vlv_dport_to_channel(dport);
2608         int pipe = intel_crtc->pipe;
2609
2610         intel_dp_prepare(encoder);
2611
2612         /* Program Tx lane resets to default */
2613         mutex_lock(&dev_priv->dpio_lock);
2614         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2615                          DPIO_PCS_TX_LANE2_RESET |
2616                          DPIO_PCS_TX_LANE1_RESET);
2617         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2618                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2619                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2620                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2621                                  DPIO_PCS_CLK_SOFT_RESET);
2622
2623         /* Fix up inter-pair skew failure */
2624         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2625         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2626         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2627         mutex_unlock(&dev_priv->dpio_lock);
2628 }
2629
2630 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2631 {
2632         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2633         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2634         struct drm_device *dev = encoder->base.dev;
2635         struct drm_i915_private *dev_priv = dev->dev_private;
2636         struct intel_crtc *intel_crtc =
2637                 to_intel_crtc(encoder->base.crtc);
2638         enum dpio_channel ch = vlv_dport_to_channel(dport);
2639         int pipe = intel_crtc->pipe;
2640         int data, i;
2641         u32 val;
2642
2643         mutex_lock(&dev_priv->dpio_lock);
2644
2645         /* allow hardware to manage TX FIFO reset source */
2646         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2647         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2648         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2649
2650         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2651         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2652         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2653
2654         /* Deassert soft data lane reset*/
2655         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2656         val |= CHV_PCS_REQ_SOFTRESET_EN;
2657         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2658
2659         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2660         val |= CHV_PCS_REQ_SOFTRESET_EN;
2661         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2662
2663         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2664         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2665         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2666
2667         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2668         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2669         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2670
2671         /* Program Tx lane latency optimal setting*/
2672         for (i = 0; i < 4; i++) {
2673                 /* Set the latency optimal bit */
2674                 data = (i == 1) ? 0x0 : 0x6;
2675                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2676                                 data << DPIO_FRC_LATENCY_SHFIT);
2677
2678                 /* Set the upar bit */
2679                 data = (i == 1) ? 0x0 : 0x1;
2680                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2681                                 data << DPIO_UPAR_SHIFT);
2682         }
2683
2684         /* Data lane stagger programming */
2685         /* FIXME: Fix up value only after power analysis */
2686
2687         mutex_unlock(&dev_priv->dpio_lock);
2688
2689         intel_enable_dp(encoder);
2690 }
2691
2692 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2693 {
2694         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2695         struct drm_device *dev = encoder->base.dev;
2696         struct drm_i915_private *dev_priv = dev->dev_private;
2697         struct intel_crtc *intel_crtc =
2698                 to_intel_crtc(encoder->base.crtc);
2699         enum dpio_channel ch = vlv_dport_to_channel(dport);
2700         enum pipe pipe = intel_crtc->pipe;
2701         u32 val;
2702
2703         intel_dp_prepare(encoder);
2704
2705         mutex_lock(&dev_priv->dpio_lock);
2706
2707         /* program left/right clock distribution */
2708         if (pipe != PIPE_B) {
2709                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2710                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2711                 if (ch == DPIO_CH0)
2712                         val |= CHV_BUFLEFTENA1_FORCE;
2713                 if (ch == DPIO_CH1)
2714                         val |= CHV_BUFRIGHTENA1_FORCE;
2715                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2716         } else {
2717                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2718                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2719                 if (ch == DPIO_CH0)
2720                         val |= CHV_BUFLEFTENA2_FORCE;
2721                 if (ch == DPIO_CH1)
2722                         val |= CHV_BUFRIGHTENA2_FORCE;
2723                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2724         }
2725
2726         /* program clock channel usage */
2727         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2728         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2729         if (pipe != PIPE_B)
2730                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2731         else
2732                 val |= CHV_PCS_USEDCLKCHANNEL;
2733         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2734
2735         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2736         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2737         if (pipe != PIPE_B)
2738                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2739         else
2740                 val |= CHV_PCS_USEDCLKCHANNEL;
2741         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2742
2743         /*
2744          * This a a bit weird since generally CL
2745          * matches the pipe, but here we need to
2746          * pick the CL based on the port.
2747          */
2748         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2749         if (pipe != PIPE_B)
2750                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2751         else
2752                 val |= CHV_CMN_USEDCLKCHANNEL;
2753         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2754
2755         mutex_unlock(&dev_priv->dpio_lock);
2756 }
2757
2758 /*
2759  * Native read with retry for link status and receiver capability reads for
2760  * cases where the sink may still be asleep.
2761  *
2762  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2763  * supposed to retry 3 times per the spec.
2764  */
2765 static ssize_t
2766 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2767                         void *buffer, size_t size)
2768 {
2769         ssize_t ret;
2770         int i;
2771
2772         /*
2773          * Sometime we just get the same incorrect byte repeated
2774          * over the entire buffer. Doing just one throw away read
2775          * initially seems to "solve" it.
2776          */
2777         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2778
2779         for (i = 0; i < 3; i++) {
2780                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2781                 if (ret == size)
2782                         return ret;
2783                 msleep(1);
2784         }
2785
2786         return ret;
2787 }
2788
2789 /*
2790  * Fetch AUX CH registers 0x202 - 0x207 which contain
2791  * link status information
2792  */
2793 static bool
2794 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2795 {
2796         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2797                                        DP_LANE0_1_STATUS,
2798                                        link_status,
2799                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2800 }
2801
2802 /* These are source-specific values. */
2803 static uint8_t
2804 intel_dp_voltage_max(struct intel_dp *intel_dp)
2805 {
2806         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2807         struct drm_i915_private *dev_priv = dev->dev_private;
2808         enum port port = dp_to_dig_port(intel_dp)->port;
2809
2810         if (INTEL_INFO(dev)->gen >= 9) {
2811                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2812                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2813                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2814         } else if (IS_VALLEYVIEW(dev))
2815                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2816         else if (IS_GEN7(dev) && port == PORT_A)
2817                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2818         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2819                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2820         else
2821                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2822 }
2823
2824 static uint8_t
2825 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2826 {
2827         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2828         enum port port = dp_to_dig_port(intel_dp)->port;
2829
2830         if (INTEL_INFO(dev)->gen >= 9) {
2831                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2832                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2833                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2834                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2835                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2836                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2837                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2838                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2839                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2840                 default:
2841                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2842                 }
2843         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2844                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2845                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2846                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2847                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2848                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2849                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2850                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2851                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2852                 default:
2853                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2854                 }
2855         } else if (IS_VALLEYVIEW(dev)) {
2856                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2857                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2858                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2859                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2860                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2861                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2862                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2863                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2864                 default:
2865                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2866                 }
2867         } else if (IS_GEN7(dev) && port == PORT_A) {
2868                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2869                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2870                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2871                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2872                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2873                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2874                 default:
2875                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2876                 }
2877         } else {
2878                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2879                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2880                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2881                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2882                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2883                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2884                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2885                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2886                 default:
2887                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2888                 }
2889         }
2890 }
2891
2892 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2893 {
2894         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2895         struct drm_i915_private *dev_priv = dev->dev_private;
2896         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2897         struct intel_crtc *intel_crtc =
2898                 to_intel_crtc(dport->base.base.crtc);
2899         unsigned long demph_reg_value, preemph_reg_value,
2900                 uniqtranscale_reg_value;
2901         uint8_t train_set = intel_dp->train_set[0];
2902         enum dpio_channel port = vlv_dport_to_channel(dport);
2903         int pipe = intel_crtc->pipe;
2904
2905         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2906         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2907                 preemph_reg_value = 0x0004000;
2908                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2909                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2910                         demph_reg_value = 0x2B405555;
2911                         uniqtranscale_reg_value = 0x552AB83A;
2912                         break;
2913                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2914                         demph_reg_value = 0x2B404040;
2915                         uniqtranscale_reg_value = 0x5548B83A;
2916                         break;
2917                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2918                         demph_reg_value = 0x2B245555;
2919                         uniqtranscale_reg_value = 0x5560B83A;
2920                         break;
2921                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2922                         demph_reg_value = 0x2B405555;
2923                         uniqtranscale_reg_value = 0x5598DA3A;
2924                         break;
2925                 default:
2926                         return 0;
2927                 }
2928                 break;
2929         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2930                 preemph_reg_value = 0x0002000;
2931                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2932                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2933                         demph_reg_value = 0x2B404040;
2934                         uniqtranscale_reg_value = 0x5552B83A;
2935                         break;
2936                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2937                         demph_reg_value = 0x2B404848;
2938                         uniqtranscale_reg_value = 0x5580B83A;
2939                         break;
2940                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2941                         demph_reg_value = 0x2B404040;
2942                         uniqtranscale_reg_value = 0x55ADDA3A;
2943                         break;
2944                 default:
2945                         return 0;
2946                 }
2947                 break;
2948         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2949                 preemph_reg_value = 0x0000000;
2950                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2951                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2952                         demph_reg_value = 0x2B305555;
2953                         uniqtranscale_reg_value = 0x5570B83A;
2954                         break;
2955                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2956                         demph_reg_value = 0x2B2B4040;
2957                         uniqtranscale_reg_value = 0x55ADDA3A;
2958                         break;
2959                 default:
2960                         return 0;
2961                 }
2962                 break;
2963         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2964                 preemph_reg_value = 0x0006000;
2965                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2966                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2967                         demph_reg_value = 0x1B405555;
2968                         uniqtranscale_reg_value = 0x55ADDA3A;
2969                         break;
2970                 default:
2971                         return 0;
2972                 }
2973                 break;
2974         default:
2975                 return 0;
2976         }
2977
2978         mutex_lock(&dev_priv->dpio_lock);
2979         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2980         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2981         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2982                          uniqtranscale_reg_value);
2983         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2984         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2985         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2986         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2987         mutex_unlock(&dev_priv->dpio_lock);
2988
2989         return 0;
2990 }
2991
2992 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2993 {
2994         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2995         struct drm_i915_private *dev_priv = dev->dev_private;
2996         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2997         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2998         u32 deemph_reg_value, margin_reg_value, val;
2999         uint8_t train_set = intel_dp->train_set[0];
3000         enum dpio_channel ch = vlv_dport_to_channel(dport);
3001         enum pipe pipe = intel_crtc->pipe;
3002         int i;
3003
3004         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3005         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3006                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3008                         deemph_reg_value = 128;
3009                         margin_reg_value = 52;
3010                         break;
3011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3012                         deemph_reg_value = 128;
3013                         margin_reg_value = 77;
3014                         break;
3015                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3016                         deemph_reg_value = 128;
3017                         margin_reg_value = 102;
3018                         break;
3019                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3020                         deemph_reg_value = 128;
3021                         margin_reg_value = 154;
3022                         /* FIXME extra to set for 1200 */
3023                         break;
3024                 default:
3025                         return 0;
3026                 }
3027                 break;
3028         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3029                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3030                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3031                         deemph_reg_value = 85;
3032                         margin_reg_value = 78;
3033                         break;
3034                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3035                         deemph_reg_value = 85;
3036                         margin_reg_value = 116;
3037                         break;
3038                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3039                         deemph_reg_value = 85;
3040                         margin_reg_value = 154;
3041                         break;
3042                 default:
3043                         return 0;
3044                 }
3045                 break;
3046         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3047                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3048                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049                         deemph_reg_value = 64;
3050                         margin_reg_value = 104;
3051                         break;
3052                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3053                         deemph_reg_value = 64;
3054                         margin_reg_value = 154;
3055                         break;
3056                 default:
3057                         return 0;
3058                 }
3059                 break;
3060         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3061                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3062                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3063                         deemph_reg_value = 43;
3064                         margin_reg_value = 154;
3065                         break;
3066                 default:
3067                         return 0;
3068                 }
3069                 break;
3070         default:
3071                 return 0;
3072         }
3073
3074         mutex_lock(&dev_priv->dpio_lock);
3075
3076         /* Clear calc init */
3077         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3078         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3079         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3080         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3081         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3082
3083         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3084         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3085         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3086         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3087         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3088
3089         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3090         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3091         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3092         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3093
3094         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3095         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3096         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3097         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3098
3099         /* Program swing deemph */
3100         for (i = 0; i < 4; i++) {
3101                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3102                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3103                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3104                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3105         }
3106
3107         /* Program swing margin */
3108         for (i = 0; i < 4; i++) {
3109                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3110                 val &= ~DPIO_SWING_MARGIN000_MASK;
3111                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3112                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3113         }
3114
3115         /* Disable unique transition scale */
3116         for (i = 0; i < 4; i++) {
3117                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3118                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3119                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3120         }
3121
3122         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3123                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3124                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3125                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3126
3127                 /*
3128                  * The document said it needs to set bit 27 for ch0 and bit 26
3129                  * for ch1. Might be a typo in the doc.
3130                  * For now, for this unique transition scale selection, set bit
3131                  * 27 for ch0 and ch1.
3132                  */
3133                 for (i = 0; i < 4; i++) {
3134                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3135                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3136                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3137                 }
3138
3139                 for (i = 0; i < 4; i++) {
3140                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3141                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3142                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3143                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3144                 }
3145         }
3146
3147         /* Start swing calculation */
3148         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3149         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3150         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3151
3152         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3153         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3154         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3155
3156         /* LRC Bypass */
3157         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3158         val |= DPIO_LRC_BYPASS;
3159         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3160
3161         mutex_unlock(&dev_priv->dpio_lock);
3162
3163         return 0;
3164 }
3165
3166 static void
3167 intel_get_adjust_train(struct intel_dp *intel_dp,
3168                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3169 {
3170         uint8_t v = 0;
3171         uint8_t p = 0;
3172         int lane;
3173         uint8_t voltage_max;
3174         uint8_t preemph_max;
3175
3176         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3177                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3178                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3179
3180                 if (this_v > v)
3181                         v = this_v;
3182                 if (this_p > p)
3183                         p = this_p;
3184         }
3185
3186         voltage_max = intel_dp_voltage_max(intel_dp);
3187         if (v >= voltage_max)
3188                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3189
3190         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3191         if (p >= preemph_max)
3192                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3193
3194         for (lane = 0; lane < 4; lane++)
3195                 intel_dp->train_set[lane] = v | p;
3196 }
3197
3198 static uint32_t
3199 intel_gen4_signal_levels(uint8_t train_set)
3200 {
3201         uint32_t        signal_levels = 0;
3202
3203         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3204         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3205         default:
3206                 signal_levels |= DP_VOLTAGE_0_4;
3207                 break;
3208         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3209                 signal_levels |= DP_VOLTAGE_0_6;
3210                 break;
3211         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3212                 signal_levels |= DP_VOLTAGE_0_8;
3213                 break;
3214         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3215                 signal_levels |= DP_VOLTAGE_1_2;
3216                 break;
3217         }
3218         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3219         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3220         default:
3221                 signal_levels |= DP_PRE_EMPHASIS_0;
3222                 break;
3223         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3224                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3225                 break;
3226         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3227                 signal_levels |= DP_PRE_EMPHASIS_6;
3228                 break;
3229         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3230                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3231                 break;
3232         }
3233         return signal_levels;
3234 }
3235
3236 /* Gen6's DP voltage swing and pre-emphasis control */
3237 static uint32_t
3238 intel_gen6_edp_signal_levels(uint8_t train_set)
3239 {
3240         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3241                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3242         switch (signal_levels) {
3243         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3244         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3245                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3246         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3247                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3248         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3249         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3250                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3251         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3252         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3253                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3254         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3255         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3256                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3257         default:
3258                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3259                               "0x%x\n", signal_levels);
3260                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3261         }
3262 }
3263
3264 /* Gen7's DP voltage swing and pre-emphasis control */
3265 static uint32_t
3266 intel_gen7_edp_signal_levels(uint8_t train_set)
3267 {
3268         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3269                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3270         switch (signal_levels) {
3271         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3272                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3273         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3274                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3275         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3276                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3277
3278         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3279                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3280         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3281                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3282
3283         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3284                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3285         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3286                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3287
3288         default:
3289                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3290                               "0x%x\n", signal_levels);
3291                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3292         }
3293 }
3294
3295 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3296 static uint32_t
3297 intel_hsw_signal_levels(uint8_t train_set)
3298 {
3299         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3300                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3301         switch (signal_levels) {
3302         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3303                 return DDI_BUF_TRANS_SELECT(0);
3304         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3305                 return DDI_BUF_TRANS_SELECT(1);
3306         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3307                 return DDI_BUF_TRANS_SELECT(2);
3308         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3309                 return DDI_BUF_TRANS_SELECT(3);
3310
3311         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3312                 return DDI_BUF_TRANS_SELECT(4);
3313         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3314                 return DDI_BUF_TRANS_SELECT(5);
3315         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316                 return DDI_BUF_TRANS_SELECT(6);
3317
3318         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3319                 return DDI_BUF_TRANS_SELECT(7);
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3321                 return DDI_BUF_TRANS_SELECT(8);
3322
3323         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3324                 return DDI_BUF_TRANS_SELECT(9);
3325         default:
3326                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3327                               "0x%x\n", signal_levels);
3328                 return DDI_BUF_TRANS_SELECT(0);
3329         }
3330 }
3331
3332 /* Properly updates "DP" with the correct signal levels. */
3333 static void
3334 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3335 {
3336         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3337         enum port port = intel_dig_port->port;
3338         struct drm_device *dev = intel_dig_port->base.base.dev;
3339         uint32_t signal_levels, mask;
3340         uint8_t train_set = intel_dp->train_set[0];
3341
3342         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3343                 signal_levels = intel_hsw_signal_levels(train_set);
3344                 mask = DDI_BUF_EMP_MASK;
3345         } else if (IS_CHERRYVIEW(dev)) {
3346                 signal_levels = intel_chv_signal_levels(intel_dp);
3347                 mask = 0;
3348         } else if (IS_VALLEYVIEW(dev)) {
3349                 signal_levels = intel_vlv_signal_levels(intel_dp);
3350                 mask = 0;
3351         } else if (IS_GEN7(dev) && port == PORT_A) {
3352                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3353                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3354         } else if (IS_GEN6(dev) && port == PORT_A) {
3355                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3356                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3357         } else {
3358                 signal_levels = intel_gen4_signal_levels(train_set);
3359                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3360         }
3361
3362         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3363
3364         *DP = (*DP & ~mask) | signal_levels;
3365 }
3366
3367 static bool
3368 intel_dp_set_link_train(struct intel_dp *intel_dp,
3369                         uint32_t *DP,
3370                         uint8_t dp_train_pat)
3371 {
3372         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3373         struct drm_device *dev = intel_dig_port->base.base.dev;
3374         struct drm_i915_private *dev_priv = dev->dev_private;
3375         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3376         int ret, len;
3377
3378         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3379
3380         I915_WRITE(intel_dp->output_reg, *DP);
3381         POSTING_READ(intel_dp->output_reg);
3382
3383         buf[0] = dp_train_pat;
3384         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3385             DP_TRAINING_PATTERN_DISABLE) {
3386                 /* don't write DP_TRAINING_LANEx_SET on disable */
3387                 len = 1;
3388         } else {
3389                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3390                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3391                 len = intel_dp->lane_count + 1;
3392         }
3393
3394         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3395                                 buf, len);
3396
3397         return ret == len;
3398 }
3399
3400 static bool
3401 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3402                         uint8_t dp_train_pat)
3403 {
3404         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3405         intel_dp_set_signal_levels(intel_dp, DP);
3406         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3407 }
3408
3409 static bool
3410 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3411                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3412 {
3413         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3414         struct drm_device *dev = intel_dig_port->base.base.dev;
3415         struct drm_i915_private *dev_priv = dev->dev_private;
3416         int ret;
3417
3418         intel_get_adjust_train(intel_dp, link_status);
3419         intel_dp_set_signal_levels(intel_dp, DP);
3420
3421         I915_WRITE(intel_dp->output_reg, *DP);
3422         POSTING_READ(intel_dp->output_reg);
3423
3424         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3425                                 intel_dp->train_set, intel_dp->lane_count);
3426
3427         return ret == intel_dp->lane_count;
3428 }
3429
3430 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3431 {
3432         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3433         struct drm_device *dev = intel_dig_port->base.base.dev;
3434         struct drm_i915_private *dev_priv = dev->dev_private;
3435         enum port port = intel_dig_port->port;
3436         uint32_t val;
3437
3438         if (!HAS_DDI(dev))
3439                 return;
3440
3441         val = I915_READ(DP_TP_CTL(port));
3442         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3443         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3444         I915_WRITE(DP_TP_CTL(port), val);
3445
3446         /*
3447          * On PORT_A we can have only eDP in SST mode. There the only reason
3448          * we need to set idle transmission mode is to work around a HW issue
3449          * where we enable the pipe while not in idle link-training mode.
3450          * In this case there is requirement to wait for a minimum number of
3451          * idle patterns to be sent.
3452          */
3453         if (port == PORT_A)
3454                 return;
3455
3456         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3457                      1))
3458                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3459 }
3460
3461 /* Enable corresponding port and start training pattern 1 */
3462 void
3463 intel_dp_start_link_train(struct intel_dp *intel_dp)
3464 {
3465         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3466         struct drm_device *dev = encoder->dev;
3467         int i;
3468         uint8_t voltage;
3469         int voltage_tries, loop_tries;
3470         uint32_t DP = intel_dp->DP;
3471         uint8_t link_config[2];
3472
3473         if (HAS_DDI(dev))
3474                 intel_ddi_prepare_link_retrain(encoder);
3475
3476         /* Write the link configuration data */
3477         link_config[0] = intel_dp->link_bw;
3478         link_config[1] = intel_dp->lane_count;
3479         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3480                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3481         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3482         if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3483                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3484                                 &intel_dp->rate_select, 1);
3485
3486         link_config[0] = 0;
3487         link_config[1] = DP_SET_ANSI_8B10B;
3488         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3489
3490         DP |= DP_PORT_EN;
3491
3492         /* clock recovery */
3493         if (!intel_dp_reset_link_train(intel_dp, &DP,
3494                                        DP_TRAINING_PATTERN_1 |
3495                                        DP_LINK_SCRAMBLING_DISABLE)) {
3496                 DRM_ERROR("failed to enable link training\n");
3497                 return;
3498         }
3499
3500         voltage = 0xff;
3501         voltage_tries = 0;
3502         loop_tries = 0;
3503         for (;;) {
3504                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3505
3506                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3507                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3508                         DRM_ERROR("failed to get link status\n");
3509                         break;
3510                 }
3511
3512                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3513                         DRM_DEBUG_KMS("clock recovery OK\n");
3514                         break;
3515                 }
3516
3517                 /* Check to see if we've tried the max voltage */
3518                 for (i = 0; i < intel_dp->lane_count; i++)
3519                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3520                                 break;
3521                 if (i == intel_dp->lane_count) {
3522                         ++loop_tries;
3523                         if (loop_tries == 5) {
3524                                 DRM_ERROR("too many full retries, give up\n");
3525                                 break;
3526                         }
3527                         intel_dp_reset_link_train(intel_dp, &DP,
3528                                                   DP_TRAINING_PATTERN_1 |
3529                                                   DP_LINK_SCRAMBLING_DISABLE);
3530                         voltage_tries = 0;
3531                         continue;
3532                 }
3533
3534                 /* Check to see if we've tried the same voltage 5 times */
3535                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3536                         ++voltage_tries;
3537                         if (voltage_tries == 5) {
3538                                 DRM_ERROR("too many voltage retries, give up\n");
3539                                 break;
3540                         }
3541                 } else
3542                         voltage_tries = 0;
3543                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3544
3545                 /* Update training set as requested by target */
3546                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3547                         DRM_ERROR("failed to update link training\n");
3548                         break;
3549                 }
3550         }
3551
3552         intel_dp->DP = DP;
3553 }
3554
3555 void
3556 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3557 {
3558         bool channel_eq = false;
3559         int tries, cr_tries;
3560         uint32_t DP = intel_dp->DP;
3561         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3562
3563         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3564         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3565                 training_pattern = DP_TRAINING_PATTERN_3;
3566
3567         /* channel equalization */
3568         if (!intel_dp_set_link_train(intel_dp, &DP,
3569                                      training_pattern |
3570                                      DP_LINK_SCRAMBLING_DISABLE)) {
3571                 DRM_ERROR("failed to start channel equalization\n");
3572                 return;
3573         }
3574
3575         tries = 0;
3576         cr_tries = 0;
3577         channel_eq = false;
3578         for (;;) {
3579                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3580
3581                 if (cr_tries > 5) {
3582                         DRM_ERROR("failed to train DP, aborting\n");
3583                         break;
3584                 }
3585
3586                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3587                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3588                         DRM_ERROR("failed to get link status\n");
3589                         break;
3590                 }
3591
3592                 /* Make sure clock is still ok */
3593                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3594                         intel_dp_start_link_train(intel_dp);
3595                         intel_dp_set_link_train(intel_dp, &DP,
3596                                                 training_pattern |
3597                                                 DP_LINK_SCRAMBLING_DISABLE);
3598                         cr_tries++;
3599                         continue;
3600                 }
3601
3602                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3603                         channel_eq = true;
3604                         break;
3605                 }
3606
3607                 /* Try 5 times, then try clock recovery if that fails */
3608                 if (tries > 5) {
3609                         intel_dp_start_link_train(intel_dp);
3610                         intel_dp_set_link_train(intel_dp, &DP,
3611                                                 training_pattern |
3612                                                 DP_LINK_SCRAMBLING_DISABLE);
3613                         tries = 0;
3614                         cr_tries++;
3615                         continue;
3616                 }
3617
3618                 /* Update training set as requested by target */
3619                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3620                         DRM_ERROR("failed to update link training\n");
3621                         break;
3622                 }
3623                 ++tries;
3624         }
3625
3626         intel_dp_set_idle_link_train(intel_dp);
3627
3628         intel_dp->DP = DP;
3629
3630         if (channel_eq)
3631                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3632
3633 }
3634
3635 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3636 {
3637         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3638                                 DP_TRAINING_PATTERN_DISABLE);
3639 }
3640
3641 static void
3642 intel_dp_link_down(struct intel_dp *intel_dp)
3643 {
3644         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3645         enum port port = intel_dig_port->port;
3646         struct drm_device *dev = intel_dig_port->base.base.dev;
3647         struct drm_i915_private *dev_priv = dev->dev_private;
3648         uint32_t DP = intel_dp->DP;
3649
3650         if (WARN_ON(HAS_DDI(dev)))
3651                 return;
3652
3653         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3654                 return;
3655
3656         DRM_DEBUG_KMS("\n");
3657
3658         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3659                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3660                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3661         } else {
3662                 if (IS_CHERRYVIEW(dev))
3663                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3664                 else
3665                         DP &= ~DP_LINK_TRAIN_MASK;
3666                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3667         }
3668         POSTING_READ(intel_dp->output_reg);
3669
3670         if (HAS_PCH_IBX(dev) &&
3671             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3672                 /* Hardware workaround: leaving our transcoder select
3673                  * set to transcoder B while it's off will prevent the
3674                  * corresponding HDMI output on transcoder A.
3675                  *
3676                  * Combine this with another hardware workaround:
3677                  * transcoder select bit can only be cleared while the
3678                  * port is enabled.
3679                  */
3680                 DP &= ~DP_PIPEB_SELECT;
3681                 I915_WRITE(intel_dp->output_reg, DP);
3682                 POSTING_READ(intel_dp->output_reg);
3683         }
3684
3685         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3686         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3687         POSTING_READ(intel_dp->output_reg);
3688         msleep(intel_dp->panel_power_down_delay);
3689 }
3690
3691 static bool
3692 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3693 {
3694         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3695         struct drm_device *dev = dig_port->base.base.dev;
3696         struct drm_i915_private *dev_priv = dev->dev_private;
3697         uint8_t rev;
3698
3699         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3700                                     sizeof(intel_dp->dpcd)) < 0)
3701                 return false; /* aux transfer failed */
3702
3703         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3704
3705         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3706                 return false; /* DPCD not present */
3707
3708         /* Check if the panel supports PSR */
3709         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3710         if (is_edp(intel_dp)) {
3711                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3712                                         intel_dp->psr_dpcd,
3713                                         sizeof(intel_dp->psr_dpcd));
3714                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3715                         dev_priv->psr.sink_support = true;
3716                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3717                 }
3718         }
3719
3720         /* Training Pattern 3 support, both source and sink */
3721         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3722             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3723             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3724                 intel_dp->use_tps3 = true;
3725                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3726         } else
3727                 intel_dp->use_tps3 = false;
3728
3729         /* Intermediate frequency support */
3730         if (is_edp(intel_dp) &&
3731             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3732             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3733             (rev >= 0x03)) { /* eDp v1.4 or higher */
3734                 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3735                 int i;
3736
3737                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3738                                 DP_SUPPORTED_LINK_RATES,
3739                                 supported_rates,
3740                                 sizeof(supported_rates));
3741
3742                 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3743                         int val = le16_to_cpu(supported_rates[i]);
3744
3745                         if (val == 0)
3746                                 break;
3747
3748                         intel_dp->supported_rates[i] = val * 200;
3749                 }
3750                 intel_dp->num_supported_rates = i;
3751         }
3752         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3753               DP_DWN_STRM_PORT_PRESENT))
3754                 return true; /* native DP sink */
3755
3756         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3757                 return true; /* no per-port downstream info */
3758
3759         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3760                                     intel_dp->downstream_ports,
3761                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3762                 return false; /* downstream port status fetch failed */
3763
3764         return true;
3765 }
3766
3767 static void
3768 intel_dp_probe_oui(struct intel_dp *intel_dp)
3769 {
3770         u8 buf[3];
3771
3772         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3773                 return;
3774
3775         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3776                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3777                               buf[0], buf[1], buf[2]);
3778
3779         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3780                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3781                               buf[0], buf[1], buf[2]);
3782 }
3783
3784 static bool
3785 intel_dp_probe_mst(struct intel_dp *intel_dp)
3786 {
3787         u8 buf[1];
3788
3789         if (!intel_dp->can_mst)
3790                 return false;
3791
3792         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3793                 return false;
3794
3795         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3796                 if (buf[0] & DP_MST_CAP) {
3797                         DRM_DEBUG_KMS("Sink is MST capable\n");
3798                         intel_dp->is_mst = true;
3799                 } else {
3800                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3801                         intel_dp->is_mst = false;
3802                 }
3803         }
3804
3805         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3806         return intel_dp->is_mst;
3807 }
3808
3809 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3810 {
3811         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3812         struct drm_device *dev = intel_dig_port->base.base.dev;
3813         struct intel_crtc *intel_crtc =
3814                 to_intel_crtc(intel_dig_port->base.base.crtc);
3815         u8 buf;
3816         int test_crc_count;
3817         int attempts = 6;
3818
3819         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3820                 return -EIO;
3821
3822         if (!(buf & DP_TEST_CRC_SUPPORTED))
3823                 return -ENOTTY;
3824
3825         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3826                 return -EIO;
3827
3828         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3829                                 buf | DP_TEST_SINK_START) < 0)
3830                 return -EIO;
3831
3832         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3833                 return -EIO;
3834         test_crc_count = buf & DP_TEST_COUNT_MASK;
3835
3836         do {
3837                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3838                                       DP_TEST_SINK_MISC, &buf) < 0)
3839                         return -EIO;
3840                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3841         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3842
3843         if (attempts == 0) {
3844                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3845                 return -ETIMEDOUT;
3846         }
3847
3848         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3849                 return -EIO;
3850
3851         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3852                 return -EIO;
3853         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3854                                buf & ~DP_TEST_SINK_START) < 0)
3855                 return -EIO;
3856
3857         return 0;
3858 }
3859
3860 static bool
3861 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3862 {
3863         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3864                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3865                                        sink_irq_vector, 1) == 1;
3866 }
3867
3868 static bool
3869 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3870 {
3871         int ret;
3872
3873         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3874                                              DP_SINK_COUNT_ESI,
3875                                              sink_irq_vector, 14);
3876         if (ret != 14)
3877                 return false;
3878
3879         return true;
3880 }
3881
3882 static void
3883 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3884 {
3885         /* NAK by default */
3886         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3887 }
3888
3889 static int
3890 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3891 {
3892         bool bret;
3893
3894         if (intel_dp->is_mst) {
3895                 u8 esi[16] = { 0 };
3896                 int ret = 0;
3897                 int retry;
3898                 bool handled;
3899                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3900 go_again:
3901                 if (bret == true) {
3902
3903                         /* check link status - esi[10] = 0x200c */
3904                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3905                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3906                                 intel_dp_start_link_train(intel_dp);
3907                                 intel_dp_complete_link_train(intel_dp);
3908                                 intel_dp_stop_link_train(intel_dp);
3909                         }
3910
3911                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3912                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3913
3914                         if (handled) {
3915                                 for (retry = 0; retry < 3; retry++) {
3916                                         int wret;
3917                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3918                                                                  DP_SINK_COUNT_ESI+1,
3919                                                                  &esi[1], 3);
3920                                         if (wret == 3) {
3921                                                 break;
3922                                         }
3923                                 }
3924
3925                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3926                                 if (bret == true) {
3927                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3928                                         goto go_again;
3929                                 }
3930                         } else
3931                                 ret = 0;
3932
3933                         return ret;
3934                 } else {
3935                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3936                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3937                         intel_dp->is_mst = false;
3938                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3939                         /* send a hotplug event */
3940                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3941                 }
3942         }
3943         return -EINVAL;
3944 }
3945
3946 /*
3947  * According to DP spec
3948  * 5.1.2:
3949  *  1. Read DPCD
3950  *  2. Configure link according to Receiver Capabilities
3951  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3952  *  4. Check link status on receipt of hot-plug interrupt
3953  */
3954 static void
3955 intel_dp_check_link_status(struct intel_dp *intel_dp)
3956 {
3957         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3958         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3959         u8 sink_irq_vector;
3960         u8 link_status[DP_LINK_STATUS_SIZE];
3961
3962         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3963
3964         if (!intel_encoder->connectors_active)
3965                 return;
3966
3967         if (WARN_ON(!intel_encoder->base.crtc))
3968                 return;
3969
3970         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3971                 return;
3972
3973         /* Try to read receiver status if the link appears to be up */
3974         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3975                 return;
3976         }
3977
3978         /* Now read the DPCD to see if it's actually running */
3979         if (!intel_dp_get_dpcd(intel_dp)) {
3980                 return;
3981         }
3982
3983         /* Try to read the source of the interrupt */
3984         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3985             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3986                 /* Clear interrupt source */
3987                 drm_dp_dpcd_writeb(&intel_dp->aux,
3988                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
3989                                    sink_irq_vector);
3990
3991                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3992                         intel_dp_handle_test_request(intel_dp);
3993                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3994                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3995         }
3996
3997         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3998                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3999                               intel_encoder->base.name);
4000                 intel_dp_start_link_train(intel_dp);
4001                 intel_dp_complete_link_train(intel_dp);
4002                 intel_dp_stop_link_train(intel_dp);
4003         }
4004 }
4005
4006 /* XXX this is probably wrong for multiple downstream ports */
4007 static enum drm_connector_status
4008 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4009 {
4010         uint8_t *dpcd = intel_dp->dpcd;
4011         uint8_t type;
4012
4013         if (!intel_dp_get_dpcd(intel_dp))
4014                 return connector_status_disconnected;
4015
4016         /* if there's no downstream port, we're done */
4017         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4018                 return connector_status_connected;
4019
4020         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4021         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4022             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4023                 uint8_t reg;
4024
4025                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4026                                             &reg, 1) < 0)
4027                         return connector_status_unknown;
4028
4029                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4030                                               : connector_status_disconnected;
4031         }
4032
4033         /* If no HPD, poke DDC gently */
4034         if (drm_probe_ddc(&intel_dp->aux.ddc))
4035                 return connector_status_connected;
4036
4037         /* Well we tried, say unknown for unreliable port types */
4038         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4039                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4040                 if (type == DP_DS_PORT_TYPE_VGA ||
4041                     type == DP_DS_PORT_TYPE_NON_EDID)
4042                         return connector_status_unknown;
4043         } else {
4044                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4045                         DP_DWN_STRM_PORT_TYPE_MASK;
4046                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4047                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4048                         return connector_status_unknown;
4049         }
4050
4051         /* Anything else is out of spec, warn and ignore */
4052         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4053         return connector_status_disconnected;
4054 }
4055
4056 static enum drm_connector_status
4057 edp_detect(struct intel_dp *intel_dp)
4058 {
4059         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4060         enum drm_connector_status status;
4061
4062         status = intel_panel_detect(dev);
4063         if (status == connector_status_unknown)
4064                 status = connector_status_connected;
4065
4066         return status;
4067 }
4068
4069 static enum drm_connector_status
4070 ironlake_dp_detect(struct intel_dp *intel_dp)
4071 {
4072         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4073         struct drm_i915_private *dev_priv = dev->dev_private;
4074         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4075
4076         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4077                 return connector_status_disconnected;
4078
4079         return intel_dp_detect_dpcd(intel_dp);
4080 }
4081
4082 static int g4x_digital_port_connected(struct drm_device *dev,
4083                                        struct intel_digital_port *intel_dig_port)
4084 {
4085         struct drm_i915_private *dev_priv = dev->dev_private;
4086         uint32_t bit;
4087
4088         if (IS_VALLEYVIEW(dev)) {
4089                 switch (intel_dig_port->port) {
4090                 case PORT_B:
4091                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4092                         break;
4093                 case PORT_C:
4094                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4095                         break;
4096                 case PORT_D:
4097                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4098                         break;
4099                 default:
4100                         return -EINVAL;
4101                 }
4102         } else {
4103                 switch (intel_dig_port->port) {
4104                 case PORT_B:
4105                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4106                         break;
4107                 case PORT_C:
4108                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4109                         break;
4110                 case PORT_D:
4111                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4112                         break;
4113                 default:
4114                         return -EINVAL;
4115                 }
4116         }
4117
4118         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4119                 return 0;
4120         return 1;
4121 }
4122
4123 static enum drm_connector_status
4124 g4x_dp_detect(struct intel_dp *intel_dp)
4125 {
4126         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4127         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4128         int ret;
4129
4130         /* Can't disconnect eDP, but you can close the lid... */
4131         if (is_edp(intel_dp)) {
4132                 enum drm_connector_status status;
4133
4134                 status = intel_panel_detect(dev);
4135                 if (status == connector_status_unknown)
4136                         status = connector_status_connected;
4137                 return status;
4138         }
4139
4140         ret = g4x_digital_port_connected(dev, intel_dig_port);
4141         if (ret == -EINVAL)
4142                 return connector_status_unknown;
4143         else if (ret == 0)
4144                 return connector_status_disconnected;
4145
4146         return intel_dp_detect_dpcd(intel_dp);
4147 }
4148
4149 static struct edid *
4150 intel_dp_get_edid(struct intel_dp *intel_dp)
4151 {
4152         struct intel_connector *intel_connector = intel_dp->attached_connector;
4153
4154         /* use cached edid if we have one */
4155         if (intel_connector->edid) {
4156                 /* invalid edid */
4157                 if (IS_ERR(intel_connector->edid))
4158                         return NULL;
4159
4160                 return drm_edid_duplicate(intel_connector->edid);
4161         } else
4162                 return drm_get_edid(&intel_connector->base,
4163                                     &intel_dp->aux.ddc);
4164 }
4165
4166 static void
4167 intel_dp_set_edid(struct intel_dp *intel_dp)
4168 {
4169         struct intel_connector *intel_connector = intel_dp->attached_connector;
4170         struct edid *edid;
4171
4172         edid = intel_dp_get_edid(intel_dp);
4173         intel_connector->detect_edid = edid;
4174
4175         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4176                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4177         else
4178                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4179 }
4180
4181 static void
4182 intel_dp_unset_edid(struct intel_dp *intel_dp)
4183 {
4184         struct intel_connector *intel_connector = intel_dp->attached_connector;
4185
4186         kfree(intel_connector->detect_edid);
4187         intel_connector->detect_edid = NULL;
4188
4189         intel_dp->has_audio = false;
4190 }
4191
4192 static enum intel_display_power_domain
4193 intel_dp_power_get(struct intel_dp *dp)
4194 {
4195         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4196         enum intel_display_power_domain power_domain;
4197
4198         power_domain = intel_display_port_power_domain(encoder);
4199         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4200
4201         return power_domain;
4202 }
4203
4204 static void
4205 intel_dp_power_put(struct intel_dp *dp,
4206                    enum intel_display_power_domain power_domain)
4207 {
4208         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4209         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4210 }
4211
4212 static enum drm_connector_status
4213 intel_dp_detect(struct drm_connector *connector, bool force)
4214 {
4215         struct intel_dp *intel_dp = intel_attached_dp(connector);
4216         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4217         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4218         struct drm_device *dev = connector->dev;
4219         enum drm_connector_status status;
4220         enum intel_display_power_domain power_domain;
4221         bool ret;
4222
4223         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4224                       connector->base.id, connector->name);
4225         intel_dp_unset_edid(intel_dp);
4226
4227         if (intel_dp->is_mst) {
4228                 /* MST devices are disconnected from a monitor POV */
4229                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4230                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4231                 return connector_status_disconnected;
4232         }
4233
4234         power_domain = intel_dp_power_get(intel_dp);
4235
4236         /* Can't disconnect eDP, but you can close the lid... */
4237         if (is_edp(intel_dp))
4238                 status = edp_detect(intel_dp);
4239         else if (HAS_PCH_SPLIT(dev))
4240                 status = ironlake_dp_detect(intel_dp);
4241         else
4242                 status = g4x_dp_detect(intel_dp);
4243         if (status != connector_status_connected)
4244                 goto out;
4245
4246         intel_dp_probe_oui(intel_dp);
4247
4248         ret = intel_dp_probe_mst(intel_dp);
4249         if (ret) {
4250                 /* if we are in MST mode then this connector
4251                    won't appear connected or have anything with EDID on it */
4252                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4253                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4254                 status = connector_status_disconnected;
4255                 goto out;
4256         }
4257
4258         intel_dp_set_edid(intel_dp);
4259
4260         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4261                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4262         status = connector_status_connected;
4263
4264 out:
4265         intel_dp_power_put(intel_dp, power_domain);
4266         return status;
4267 }
4268
4269 static void
4270 intel_dp_force(struct drm_connector *connector)
4271 {
4272         struct intel_dp *intel_dp = intel_attached_dp(connector);
4273         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4274         enum intel_display_power_domain power_domain;
4275
4276         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4277                       connector->base.id, connector->name);
4278         intel_dp_unset_edid(intel_dp);
4279
4280         if (connector->status != connector_status_connected)
4281                 return;
4282
4283         power_domain = intel_dp_power_get(intel_dp);
4284
4285         intel_dp_set_edid(intel_dp);
4286
4287         intel_dp_power_put(intel_dp, power_domain);
4288
4289         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4290                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4291 }
4292
4293 static int intel_dp_get_modes(struct drm_connector *connector)
4294 {
4295         struct intel_connector *intel_connector = to_intel_connector(connector);
4296         struct edid *edid;
4297
4298         edid = intel_connector->detect_edid;
4299         if (edid) {
4300                 int ret = intel_connector_update_modes(connector, edid);
4301                 if (ret)
4302                         return ret;
4303         }
4304
4305         /* if eDP has no EDID, fall back to fixed mode */
4306         if (is_edp(intel_attached_dp(connector)) &&
4307             intel_connector->panel.fixed_mode) {
4308                 struct drm_display_mode *mode;
4309
4310                 mode = drm_mode_duplicate(connector->dev,
4311                                           intel_connector->panel.fixed_mode);
4312                 if (mode) {
4313                         drm_mode_probed_add(connector, mode);
4314                         return 1;
4315                 }
4316         }
4317
4318         return 0;
4319 }
4320
4321 static bool
4322 intel_dp_detect_audio(struct drm_connector *connector)
4323 {
4324         bool has_audio = false;
4325         struct edid *edid;
4326
4327         edid = to_intel_connector(connector)->detect_edid;
4328         if (edid)
4329                 has_audio = drm_detect_monitor_audio(edid);
4330
4331         return has_audio;
4332 }
4333
4334 static int
4335 intel_dp_set_property(struct drm_connector *connector,
4336                       struct drm_property *property,
4337                       uint64_t val)
4338 {
4339         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4340         struct intel_connector *intel_connector = to_intel_connector(connector);
4341         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4342         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4343         int ret;
4344
4345         ret = drm_object_property_set_value(&connector->base, property, val);
4346         if (ret)
4347                 return ret;
4348
4349         if (property == dev_priv->force_audio_property) {
4350                 int i = val;
4351                 bool has_audio;
4352
4353                 if (i == intel_dp->force_audio)
4354                         return 0;
4355
4356                 intel_dp->force_audio = i;
4357
4358                 if (i == HDMI_AUDIO_AUTO)
4359                         has_audio = intel_dp_detect_audio(connector);
4360                 else
4361                         has_audio = (i == HDMI_AUDIO_ON);
4362
4363                 if (has_audio == intel_dp->has_audio)
4364                         return 0;
4365
4366                 intel_dp->has_audio = has_audio;
4367                 goto done;
4368         }
4369
4370         if (property == dev_priv->broadcast_rgb_property) {
4371                 bool old_auto = intel_dp->color_range_auto;
4372                 uint32_t old_range = intel_dp->color_range;
4373
4374                 switch (val) {
4375                 case INTEL_BROADCAST_RGB_AUTO:
4376                         intel_dp->color_range_auto = true;
4377                         break;
4378                 case INTEL_BROADCAST_RGB_FULL:
4379                         intel_dp->color_range_auto = false;
4380                         intel_dp->color_range = 0;
4381                         break;
4382                 case INTEL_BROADCAST_RGB_LIMITED:
4383                         intel_dp->color_range_auto = false;
4384                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4385                         break;
4386                 default:
4387                         return -EINVAL;
4388                 }
4389
4390                 if (old_auto == intel_dp->color_range_auto &&
4391                     old_range == intel_dp->color_range)
4392                         return 0;
4393
4394                 goto done;
4395         }
4396
4397         if (is_edp(intel_dp) &&
4398             property == connector->dev->mode_config.scaling_mode_property) {
4399                 if (val == DRM_MODE_SCALE_NONE) {
4400                         DRM_DEBUG_KMS("no scaling not supported\n");
4401                         return -EINVAL;
4402                 }
4403
4404                 if (intel_connector->panel.fitting_mode == val) {
4405                         /* the eDP scaling property is not changed */
4406                         return 0;
4407                 }
4408                 intel_connector->panel.fitting_mode = val;
4409
4410                 goto done;
4411         }
4412
4413         return -EINVAL;
4414
4415 done:
4416         if (intel_encoder->base.crtc)
4417                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4418
4419         return 0;
4420 }
4421
4422 static void
4423 intel_dp_connector_destroy(struct drm_connector *connector)
4424 {
4425         struct intel_connector *intel_connector = to_intel_connector(connector);
4426
4427         kfree(intel_connector->detect_edid);
4428
4429         if (!IS_ERR_OR_NULL(intel_connector->edid))
4430                 kfree(intel_connector->edid);
4431
4432         /* Can't call is_edp() since the encoder may have been destroyed
4433          * already. */
4434         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4435                 intel_panel_fini(&intel_connector->panel);
4436
4437         drm_connector_cleanup(connector);
4438         kfree(connector);
4439 }
4440
4441 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4442 {
4443         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4444         struct intel_dp *intel_dp = &intel_dig_port->dp;
4445
4446         drm_dp_aux_unregister(&intel_dp->aux);
4447         intel_dp_mst_encoder_cleanup(intel_dig_port);
4448         if (is_edp(intel_dp)) {
4449                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4450                 /*
4451                  * vdd might still be enabled do to the delayed vdd off.
4452                  * Make sure vdd is actually turned off here.
4453                  */
4454                 pps_lock(intel_dp);
4455                 edp_panel_vdd_off_sync(intel_dp);
4456                 pps_unlock(intel_dp);
4457
4458                 if (intel_dp->edp_notifier.notifier_call) {
4459                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4460                         intel_dp->edp_notifier.notifier_call = NULL;
4461                 }
4462         }
4463         drm_encoder_cleanup(encoder);
4464         kfree(intel_dig_port);
4465 }
4466
4467 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4468 {
4469         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4470
4471         if (!is_edp(intel_dp))
4472                 return;
4473
4474         /*
4475          * vdd might still be enabled do to the delayed vdd off.
4476          * Make sure vdd is actually turned off here.
4477          */
4478         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4479         pps_lock(intel_dp);
4480         edp_panel_vdd_off_sync(intel_dp);
4481         pps_unlock(intel_dp);
4482 }
4483
4484 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4485 {
4486         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4487         struct drm_device *dev = intel_dig_port->base.base.dev;
4488         struct drm_i915_private *dev_priv = dev->dev_private;
4489         enum intel_display_power_domain power_domain;
4490
4491         lockdep_assert_held(&dev_priv->pps_mutex);
4492
4493         if (!edp_have_panel_vdd(intel_dp))
4494                 return;
4495
4496         /*
4497          * The VDD bit needs a power domain reference, so if the bit is
4498          * already enabled when we boot or resume, grab this reference and
4499          * schedule a vdd off, so we don't hold on to the reference
4500          * indefinitely.
4501          */
4502         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4503         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4504         intel_display_power_get(dev_priv, power_domain);
4505
4506         edp_panel_vdd_schedule_off(intel_dp);
4507 }
4508
4509 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4510 {
4511         struct intel_dp *intel_dp;
4512
4513         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4514                 return;
4515
4516         intel_dp = enc_to_intel_dp(encoder);
4517
4518         pps_lock(intel_dp);
4519
4520         /*
4521          * Read out the current power sequencer assignment,
4522          * in case the BIOS did something with it.
4523          */
4524         if (IS_VALLEYVIEW(encoder->dev))
4525                 vlv_initial_power_sequencer_setup(intel_dp);
4526
4527         intel_edp_panel_vdd_sanitize(intel_dp);
4528
4529         pps_unlock(intel_dp);
4530 }
4531
4532 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4533         .dpms = intel_connector_dpms,
4534         .detect = intel_dp_detect,
4535         .force = intel_dp_force,
4536         .fill_modes = drm_helper_probe_single_connector_modes,
4537         .set_property = intel_dp_set_property,
4538         .atomic_get_property = intel_connector_atomic_get_property,
4539         .destroy = intel_dp_connector_destroy,
4540         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4541 };
4542
4543 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4544         .get_modes = intel_dp_get_modes,
4545         .mode_valid = intel_dp_mode_valid,
4546         .best_encoder = intel_best_encoder,
4547 };
4548
4549 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4550         .reset = intel_dp_encoder_reset,
4551         .destroy = intel_dp_encoder_destroy,
4552 };
4553
4554 void
4555 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4556 {
4557         return;
4558 }
4559
4560 enum irqreturn
4561 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4562 {
4563         struct intel_dp *intel_dp = &intel_dig_port->dp;
4564         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4565         struct drm_device *dev = intel_dig_port->base.base.dev;
4566         struct drm_i915_private *dev_priv = dev->dev_private;
4567         enum intel_display_power_domain power_domain;
4568         enum irqreturn ret = IRQ_NONE;
4569
4570         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4571                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4572
4573         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4574                 /*
4575                  * vdd off can generate a long pulse on eDP which
4576                  * would require vdd on to handle it, and thus we
4577                  * would end up in an endless cycle of
4578                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4579                  */
4580                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4581                               port_name(intel_dig_port->port));
4582                 return IRQ_HANDLED;
4583         }
4584
4585         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4586                       port_name(intel_dig_port->port),
4587                       long_hpd ? "long" : "short");
4588
4589         power_domain = intel_display_port_power_domain(intel_encoder);
4590         intel_display_power_get(dev_priv, power_domain);
4591
4592         if (long_hpd) {
4593
4594                 if (HAS_PCH_SPLIT(dev)) {
4595                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4596                                 goto mst_fail;
4597                 } else {
4598                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4599                                 goto mst_fail;
4600                 }
4601
4602                 if (!intel_dp_get_dpcd(intel_dp)) {
4603                         goto mst_fail;
4604                 }
4605
4606                 intel_dp_probe_oui(intel_dp);
4607
4608                 if (!intel_dp_probe_mst(intel_dp))
4609                         goto mst_fail;
4610
4611         } else {
4612                 if (intel_dp->is_mst) {
4613                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4614                                 goto mst_fail;
4615                 }
4616
4617                 if (!intel_dp->is_mst) {
4618                         /*
4619                          * we'll check the link status via the normal hot plug path later -
4620                          * but for short hpds we should check it now
4621                          */
4622                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4623                         intel_dp_check_link_status(intel_dp);
4624                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4625                 }
4626         }
4627
4628         ret = IRQ_HANDLED;
4629
4630         goto put_power;
4631 mst_fail:
4632         /* if we were in MST mode, and device is not there get out of MST mode */
4633         if (intel_dp->is_mst) {
4634                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4635                 intel_dp->is_mst = false;
4636                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4637         }
4638 put_power:
4639         intel_display_power_put(dev_priv, power_domain);
4640
4641         return ret;
4642 }
4643
4644 /* Return which DP Port should be selected for Transcoder DP control */
4645 int
4646 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4647 {
4648         struct drm_device *dev = crtc->dev;
4649         struct intel_encoder *intel_encoder;
4650         struct intel_dp *intel_dp;
4651
4652         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4653                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4654
4655                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4656                     intel_encoder->type == INTEL_OUTPUT_EDP)
4657                         return intel_dp->output_reg;
4658         }
4659
4660         return -1;
4661 }
4662
4663 /* check the VBT to see whether the eDP is on DP-D port */
4664 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4665 {
4666         struct drm_i915_private *dev_priv = dev->dev_private;
4667         union child_device_config *p_child;
4668         int i;
4669         static const short port_mapping[] = {
4670                 [PORT_B] = PORT_IDPB,
4671                 [PORT_C] = PORT_IDPC,
4672                 [PORT_D] = PORT_IDPD,
4673         };
4674
4675         if (port == PORT_A)
4676                 return true;
4677
4678         if (!dev_priv->vbt.child_dev_num)
4679                 return false;
4680
4681         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4682                 p_child = dev_priv->vbt.child_dev + i;
4683
4684                 if (p_child->common.dvo_port == port_mapping[port] &&
4685                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4686                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4687                         return true;
4688         }
4689         return false;
4690 }
4691
4692 void
4693 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4694 {
4695         struct intel_connector *intel_connector = to_intel_connector(connector);
4696
4697         intel_attach_force_audio_property(connector);
4698         intel_attach_broadcast_rgb_property(connector);
4699         intel_dp->color_range_auto = true;
4700
4701         if (is_edp(intel_dp)) {
4702                 drm_mode_create_scaling_mode_property(connector->dev);
4703                 drm_object_attach_property(
4704                         &connector->base,
4705                         connector->dev->mode_config.scaling_mode_property,
4706                         DRM_MODE_SCALE_ASPECT);
4707                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4708         }
4709 }
4710
4711 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4712 {
4713         intel_dp->last_power_cycle = jiffies;
4714         intel_dp->last_power_on = jiffies;
4715         intel_dp->last_backlight_off = jiffies;
4716 }
4717
4718 static void
4719 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4720                                     struct intel_dp *intel_dp)
4721 {
4722         struct drm_i915_private *dev_priv = dev->dev_private;
4723         struct edp_power_seq cur, vbt, spec,
4724                 *final = &intel_dp->pps_delays;
4725         u32 pp_on, pp_off, pp_div, pp;
4726         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4727
4728         lockdep_assert_held(&dev_priv->pps_mutex);
4729
4730         /* already initialized? */
4731         if (final->t11_t12 != 0)
4732                 return;
4733
4734         if (HAS_PCH_SPLIT(dev)) {
4735                 pp_ctrl_reg = PCH_PP_CONTROL;
4736                 pp_on_reg = PCH_PP_ON_DELAYS;
4737                 pp_off_reg = PCH_PP_OFF_DELAYS;
4738                 pp_div_reg = PCH_PP_DIVISOR;
4739         } else {
4740                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4741
4742                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4743                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4744                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4745                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4746         }
4747
4748         /* Workaround: Need to write PP_CONTROL with the unlock key as
4749          * the very first thing. */
4750         pp = ironlake_get_pp_control(intel_dp);
4751         I915_WRITE(pp_ctrl_reg, pp);
4752
4753         pp_on = I915_READ(pp_on_reg);
4754         pp_off = I915_READ(pp_off_reg);
4755         pp_div = I915_READ(pp_div_reg);
4756
4757         /* Pull timing values out of registers */
4758         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4759                 PANEL_POWER_UP_DELAY_SHIFT;
4760
4761         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4762                 PANEL_LIGHT_ON_DELAY_SHIFT;
4763
4764         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4765                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4766
4767         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4768                 PANEL_POWER_DOWN_DELAY_SHIFT;
4769
4770         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4771                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4772
4773         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4774                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4775
4776         vbt = dev_priv->vbt.edp_pps;
4777
4778         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4779          * our hw here, which are all in 100usec. */
4780         spec.t1_t3 = 210 * 10;
4781         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4782         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4783         spec.t10 = 500 * 10;
4784         /* This one is special and actually in units of 100ms, but zero
4785          * based in the hw (so we need to add 100 ms). But the sw vbt
4786          * table multiplies it with 1000 to make it in units of 100usec,
4787          * too. */
4788         spec.t11_t12 = (510 + 100) * 10;
4789
4790         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4791                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4792
4793         /* Use the max of the register settings and vbt. If both are
4794          * unset, fall back to the spec limits. */
4795 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4796                                        spec.field : \
4797                                        max(cur.field, vbt.field))
4798         assign_final(t1_t3);
4799         assign_final(t8);
4800         assign_final(t9);
4801         assign_final(t10);
4802         assign_final(t11_t12);
4803 #undef assign_final
4804
4805 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4806         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4807         intel_dp->backlight_on_delay = get_delay(t8);
4808         intel_dp->backlight_off_delay = get_delay(t9);
4809         intel_dp->panel_power_down_delay = get_delay(t10);
4810         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4811 #undef get_delay
4812
4813         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4814                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4815                       intel_dp->panel_power_cycle_delay);
4816
4817         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4818                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4819 }
4820
4821 static void
4822 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4823                                               struct intel_dp *intel_dp)
4824 {
4825         struct drm_i915_private *dev_priv = dev->dev_private;
4826         u32 pp_on, pp_off, pp_div, port_sel = 0;
4827         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4828         int pp_on_reg, pp_off_reg, pp_div_reg;
4829         enum port port = dp_to_dig_port(intel_dp)->port;
4830         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4831
4832         lockdep_assert_held(&dev_priv->pps_mutex);
4833
4834         if (HAS_PCH_SPLIT(dev)) {
4835                 pp_on_reg = PCH_PP_ON_DELAYS;
4836                 pp_off_reg = PCH_PP_OFF_DELAYS;
4837                 pp_div_reg = PCH_PP_DIVISOR;
4838         } else {
4839                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4840
4841                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4842                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4843                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4844         }
4845
4846         /*
4847          * And finally store the new values in the power sequencer. The
4848          * backlight delays are set to 1 because we do manual waits on them. For
4849          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4850          * we'll end up waiting for the backlight off delay twice: once when we
4851          * do the manual sleep, and once when we disable the panel and wait for
4852          * the PP_STATUS bit to become zero.
4853          */
4854         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4855                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4856         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4857                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4858         /* Compute the divisor for the pp clock, simply match the Bspec
4859          * formula. */
4860         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4861         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4862                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4863
4864         /* Haswell doesn't have any port selection bits for the panel
4865          * power sequencer any more. */
4866         if (IS_VALLEYVIEW(dev)) {
4867                 port_sel = PANEL_PORT_SELECT_VLV(port);
4868         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4869                 if (port == PORT_A)
4870                         port_sel = PANEL_PORT_SELECT_DPA;
4871                 else
4872                         port_sel = PANEL_PORT_SELECT_DPD;
4873         }
4874
4875         pp_on |= port_sel;
4876
4877         I915_WRITE(pp_on_reg, pp_on);
4878         I915_WRITE(pp_off_reg, pp_off);
4879         I915_WRITE(pp_div_reg, pp_div);
4880
4881         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4882                       I915_READ(pp_on_reg),
4883                       I915_READ(pp_off_reg),
4884                       I915_READ(pp_div_reg));
4885 }
4886
4887 /**
4888  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4889  * @dev: DRM device
4890  * @refresh_rate: RR to be programmed
4891  *
4892  * This function gets called when refresh rate (RR) has to be changed from
4893  * one frequency to another. Switches can be between high and low RR
4894  * supported by the panel or to any other RR based on media playback (in
4895  * this case, RR value needs to be passed from user space).
4896  *
4897  * The caller of this function needs to take a lock on dev_priv->drrs.
4898  */
4899 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4900 {
4901         struct drm_i915_private *dev_priv = dev->dev_private;
4902         struct intel_encoder *encoder;
4903         struct intel_digital_port *dig_port = NULL;
4904         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4905         struct intel_crtc_state *config = NULL;
4906         struct intel_crtc *intel_crtc = NULL;
4907         u32 reg, val;
4908         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4909
4910         if (refresh_rate <= 0) {
4911                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4912                 return;
4913         }
4914
4915         if (intel_dp == NULL) {
4916                 DRM_DEBUG_KMS("DRRS not supported.\n");
4917                 return;
4918         }
4919
4920         /*
4921          * FIXME: This needs proper synchronization with psr state for some
4922          * platforms that cannot have PSR and DRRS enabled at the same time.
4923          */
4924
4925         dig_port = dp_to_dig_port(intel_dp);
4926         encoder = &dig_port->base;
4927         intel_crtc = encoder->new_crtc;
4928
4929         if (!intel_crtc) {
4930                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4931                 return;
4932         }
4933
4934         config = intel_crtc->config;
4935
4936         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4937                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4938                 return;
4939         }
4940
4941         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4942                         refresh_rate)
4943                 index = DRRS_LOW_RR;
4944
4945         if (index == dev_priv->drrs.refresh_rate_type) {
4946                 DRM_DEBUG_KMS(
4947                         "DRRS requested for previously set RR...ignoring\n");
4948                 return;
4949         }
4950
4951         if (!intel_crtc->active) {
4952                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4953                 return;
4954         }
4955
4956         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4957                 switch (index) {
4958                 case DRRS_HIGH_RR:
4959                         intel_dp_set_m_n(intel_crtc, M1_N1);
4960                         break;
4961                 case DRRS_LOW_RR:
4962                         intel_dp_set_m_n(intel_crtc, M2_N2);
4963                         break;
4964                 case DRRS_MAX_RR:
4965                 default:
4966                         DRM_ERROR("Unsupported refreshrate type\n");
4967                 }
4968         } else if (INTEL_INFO(dev)->gen > 6) {
4969                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4970                 val = I915_READ(reg);
4971
4972                 if (index > DRRS_HIGH_RR) {
4973                         if (IS_VALLEYVIEW(dev))
4974                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4975                         else
4976                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4977                 } else {
4978                         if (IS_VALLEYVIEW(dev))
4979                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4980                         else
4981                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4982                 }
4983                 I915_WRITE(reg, val);
4984         }
4985
4986         dev_priv->drrs.refresh_rate_type = index;
4987
4988         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4989 }
4990
4991 /**
4992  * intel_edp_drrs_enable - init drrs struct if supported
4993  * @intel_dp: DP struct
4994  *
4995  * Initializes frontbuffer_bits and drrs.dp
4996  */
4997 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4998 {
4999         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5000         struct drm_i915_private *dev_priv = dev->dev_private;
5001         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5002         struct drm_crtc *crtc = dig_port->base.base.crtc;
5003         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5004
5005         if (!intel_crtc->config->has_drrs) {
5006                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5007                 return;
5008         }
5009
5010         mutex_lock(&dev_priv->drrs.mutex);
5011         if (WARN_ON(dev_priv->drrs.dp)) {
5012                 DRM_ERROR("DRRS already enabled\n");
5013                 goto unlock;
5014         }
5015
5016         dev_priv->drrs.busy_frontbuffer_bits = 0;
5017
5018         dev_priv->drrs.dp = intel_dp;
5019
5020 unlock:
5021         mutex_unlock(&dev_priv->drrs.mutex);
5022 }
5023
5024 /**
5025  * intel_edp_drrs_disable - Disable DRRS
5026  * @intel_dp: DP struct
5027  *
5028  */
5029 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5030 {
5031         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5032         struct drm_i915_private *dev_priv = dev->dev_private;
5033         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5034         struct drm_crtc *crtc = dig_port->base.base.crtc;
5035         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5036
5037         if (!intel_crtc->config->has_drrs)
5038                 return;
5039
5040         mutex_lock(&dev_priv->drrs.mutex);
5041         if (!dev_priv->drrs.dp) {
5042                 mutex_unlock(&dev_priv->drrs.mutex);
5043                 return;
5044         }
5045
5046         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5047                 intel_dp_set_drrs_state(dev_priv->dev,
5048                         intel_dp->attached_connector->panel.
5049                         fixed_mode->vrefresh);
5050
5051         dev_priv->drrs.dp = NULL;
5052         mutex_unlock(&dev_priv->drrs.mutex);
5053
5054         cancel_delayed_work_sync(&dev_priv->drrs.work);
5055 }
5056
5057 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5058 {
5059         struct drm_i915_private *dev_priv =
5060                 container_of(work, typeof(*dev_priv), drrs.work.work);
5061         struct intel_dp *intel_dp;
5062
5063         mutex_lock(&dev_priv->drrs.mutex);
5064
5065         intel_dp = dev_priv->drrs.dp;
5066
5067         if (!intel_dp)
5068                 goto unlock;
5069
5070         /*
5071          * The delayed work can race with an invalidate hence we need to
5072          * recheck.
5073          */
5074
5075         if (dev_priv->drrs.busy_frontbuffer_bits)
5076                 goto unlock;
5077
5078         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5079                 intel_dp_set_drrs_state(dev_priv->dev,
5080                         intel_dp->attached_connector->panel.
5081                         downclock_mode->vrefresh);
5082
5083 unlock:
5084
5085         mutex_unlock(&dev_priv->drrs.mutex);
5086 }
5087
5088 /**
5089  * intel_edp_drrs_invalidate - Invalidate DRRS
5090  * @dev: DRM device
5091  * @frontbuffer_bits: frontbuffer plane tracking bits
5092  *
5093  * When there is a disturbance on screen (due to cursor movement/time
5094  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5095  * high RR.
5096  *
5097  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5098  */
5099 void intel_edp_drrs_invalidate(struct drm_device *dev,
5100                 unsigned frontbuffer_bits)
5101 {
5102         struct drm_i915_private *dev_priv = dev->dev_private;
5103         struct drm_crtc *crtc;
5104         enum pipe pipe;
5105
5106         if (!dev_priv->drrs.dp)
5107                 return;
5108
5109         cancel_delayed_work_sync(&dev_priv->drrs.work);
5110
5111         mutex_lock(&dev_priv->drrs.mutex);
5112         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5113         pipe = to_intel_crtc(crtc)->pipe;
5114
5115         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5116                 intel_dp_set_drrs_state(dev_priv->dev,
5117                                 dev_priv->drrs.dp->attached_connector->panel.
5118                                 fixed_mode->vrefresh);
5119         }
5120
5121         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5122
5123         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5124         mutex_unlock(&dev_priv->drrs.mutex);
5125 }
5126
5127 /**
5128  * intel_edp_drrs_flush - Flush DRRS
5129  * @dev: DRM device
5130  * @frontbuffer_bits: frontbuffer plane tracking bits
5131  *
5132  * When there is no movement on screen, DRRS work can be scheduled.
5133  * This DRRS work is responsible for setting relevant registers after a
5134  * timeout of 1 second.
5135  *
5136  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5137  */
5138 void intel_edp_drrs_flush(struct drm_device *dev,
5139                 unsigned frontbuffer_bits)
5140 {
5141         struct drm_i915_private *dev_priv = dev->dev_private;
5142         struct drm_crtc *crtc;
5143         enum pipe pipe;
5144
5145         if (!dev_priv->drrs.dp)
5146                 return;
5147
5148         cancel_delayed_work_sync(&dev_priv->drrs.work);
5149
5150         mutex_lock(&dev_priv->drrs.mutex);
5151         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5152         pipe = to_intel_crtc(crtc)->pipe;
5153         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5154
5155         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5156                         !dev_priv->drrs.busy_frontbuffer_bits)
5157                 schedule_delayed_work(&dev_priv->drrs.work,
5158                                 msecs_to_jiffies(1000));
5159         mutex_unlock(&dev_priv->drrs.mutex);
5160 }
5161
5162 /**
5163  * DOC: Display Refresh Rate Switching (DRRS)
5164  *
5165  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5166  * which enables swtching between low and high refresh rates,
5167  * dynamically, based on the usage scenario. This feature is applicable
5168  * for internal panels.
5169  *
5170  * Indication that the panel supports DRRS is given by the panel EDID, which
5171  * would list multiple refresh rates for one resolution.
5172  *
5173  * DRRS is of 2 types - static and seamless.
5174  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5175  * (may appear as a blink on screen) and is used in dock-undock scenario.
5176  * Seamless DRRS involves changing RR without any visual effect to the user
5177  * and can be used during normal system usage. This is done by programming
5178  * certain registers.
5179  *
5180  * Support for static/seamless DRRS may be indicated in the VBT based on
5181  * inputs from the panel spec.
5182  *
5183  * DRRS saves power by switching to low RR based on usage scenarios.
5184  *
5185  * eDP DRRS:-
5186  *        The implementation is based on frontbuffer tracking implementation.
5187  * When there is a disturbance on the screen triggered by user activity or a
5188  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5189  * When there is no movement on screen, after a timeout of 1 second, a switch
5190  * to low RR is made.
5191  *        For integration with frontbuffer tracking code,
5192  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5193  *
5194  * DRRS can be further extended to support other internal panels and also
5195  * the scenario of video playback wherein RR is set based on the rate
5196  * requested by userspace.
5197  */
5198
5199 /**
5200  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5201  * @intel_connector: eDP connector
5202  * @fixed_mode: preferred mode of panel
5203  *
5204  * This function is  called only once at driver load to initialize basic
5205  * DRRS stuff.
5206  *
5207  * Returns:
5208  * Downclock mode if panel supports it, else return NULL.
5209  * DRRS support is determined by the presence of downclock mode (apart
5210  * from VBT setting).
5211  */
5212 static struct drm_display_mode *
5213 intel_dp_drrs_init(struct intel_connector *intel_connector,
5214                 struct drm_display_mode *fixed_mode)
5215 {
5216         struct drm_connector *connector = &intel_connector->base;
5217         struct drm_device *dev = connector->dev;
5218         struct drm_i915_private *dev_priv = dev->dev_private;
5219         struct drm_display_mode *downclock_mode = NULL;
5220
5221         if (INTEL_INFO(dev)->gen <= 6) {
5222                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5223                 return NULL;
5224         }
5225
5226         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5227                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5228                 return NULL;
5229         }
5230
5231         downclock_mode = intel_find_panel_downclock
5232                                         (dev, fixed_mode, connector);
5233
5234         if (!downclock_mode) {
5235                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5236                 return NULL;
5237         }
5238
5239         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5240
5241         mutex_init(&dev_priv->drrs.mutex);
5242
5243         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5244
5245         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5246         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5247         return downclock_mode;
5248 }
5249
5250 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5251                                      struct intel_connector *intel_connector)
5252 {
5253         struct drm_connector *connector = &intel_connector->base;
5254         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5255         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5256         struct drm_device *dev = intel_encoder->base.dev;
5257         struct drm_i915_private *dev_priv = dev->dev_private;
5258         struct drm_display_mode *fixed_mode = NULL;
5259         struct drm_display_mode *downclock_mode = NULL;
5260         bool has_dpcd;
5261         struct drm_display_mode *scan;
5262         struct edid *edid;
5263         enum pipe pipe = INVALID_PIPE;
5264
5265         dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5266
5267         if (!is_edp(intel_dp))
5268                 return true;
5269
5270         pps_lock(intel_dp);
5271         intel_edp_panel_vdd_sanitize(intel_dp);
5272         pps_unlock(intel_dp);
5273
5274         /* Cache DPCD and EDID for edp. */
5275         has_dpcd = intel_dp_get_dpcd(intel_dp);
5276
5277         if (has_dpcd) {
5278                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5279                         dev_priv->no_aux_handshake =
5280                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5281                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5282         } else {
5283                 /* if this fails, presume the device is a ghost */
5284                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5285                 return false;
5286         }
5287
5288         /* We now know it's not a ghost, init power sequence regs. */
5289         pps_lock(intel_dp);
5290         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5291         pps_unlock(intel_dp);
5292
5293         mutex_lock(&dev->mode_config.mutex);
5294         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5295         if (edid) {
5296                 if (drm_add_edid_modes(connector, edid)) {
5297                         drm_mode_connector_update_edid_property(connector,
5298                                                                 edid);
5299                         drm_edid_to_eld(connector, edid);
5300                 } else {
5301                         kfree(edid);
5302                         edid = ERR_PTR(-EINVAL);
5303                 }
5304         } else {
5305                 edid = ERR_PTR(-ENOENT);
5306         }
5307         intel_connector->edid = edid;
5308
5309         /* prefer fixed mode from EDID if available */
5310         list_for_each_entry(scan, &connector->probed_modes, head) {
5311                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5312                         fixed_mode = drm_mode_duplicate(dev, scan);
5313                         downclock_mode = intel_dp_drrs_init(
5314                                                 intel_connector, fixed_mode);
5315                         break;
5316                 }
5317         }
5318
5319         /* fallback to VBT if available for eDP */
5320         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5321                 fixed_mode = drm_mode_duplicate(dev,
5322                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5323                 if (fixed_mode)
5324                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5325         }
5326         mutex_unlock(&dev->mode_config.mutex);
5327
5328         if (IS_VALLEYVIEW(dev)) {
5329                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5330                 register_reboot_notifier(&intel_dp->edp_notifier);
5331
5332                 /*
5333                  * Figure out the current pipe for the initial backlight setup.
5334                  * If the current pipe isn't valid, try the PPS pipe, and if that
5335                  * fails just assume pipe A.
5336                  */
5337                 if (IS_CHERRYVIEW(dev))
5338                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5339                 else
5340                         pipe = PORT_TO_PIPE(intel_dp->DP);
5341
5342                 if (pipe != PIPE_A && pipe != PIPE_B)
5343                         pipe = intel_dp->pps_pipe;
5344
5345                 if (pipe != PIPE_A && pipe != PIPE_B)
5346                         pipe = PIPE_A;
5347
5348                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5349                               pipe_name(pipe));
5350         }
5351
5352         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5353         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5354         intel_panel_setup_backlight(connector, pipe);
5355
5356         return true;
5357 }
5358
5359 bool
5360 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5361                         struct intel_connector *intel_connector)
5362 {
5363         struct drm_connector *connector = &intel_connector->base;
5364         struct intel_dp *intel_dp = &intel_dig_port->dp;
5365         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5366         struct drm_device *dev = intel_encoder->base.dev;
5367         struct drm_i915_private *dev_priv = dev->dev_private;
5368         enum port port = intel_dig_port->port;
5369         int type;
5370
5371         intel_dp->pps_pipe = INVALID_PIPE;
5372
5373         /* intel_dp vfuncs */
5374         if (INTEL_INFO(dev)->gen >= 9)
5375                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5376         else if (IS_VALLEYVIEW(dev))
5377                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5378         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5379                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5380         else if (HAS_PCH_SPLIT(dev))
5381                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5382         else
5383                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5384
5385         if (INTEL_INFO(dev)->gen >= 9)
5386                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5387         else
5388                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5389
5390         /* Preserve the current hw state. */
5391         intel_dp->DP = I915_READ(intel_dp->output_reg);
5392         intel_dp->attached_connector = intel_connector;
5393
5394         if (intel_dp_is_edp(dev, port))
5395                 type = DRM_MODE_CONNECTOR_eDP;
5396         else
5397                 type = DRM_MODE_CONNECTOR_DisplayPort;
5398
5399         /*
5400          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5401          * for DP the encoder type can be set by the caller to
5402          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5403          */
5404         if (type == DRM_MODE_CONNECTOR_eDP)
5405                 intel_encoder->type = INTEL_OUTPUT_EDP;
5406
5407         /* eDP only on port B and/or C on vlv/chv */
5408         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5409                     port != PORT_B && port != PORT_C))
5410                 return false;
5411
5412         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5413                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5414                         port_name(port));
5415
5416         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5417         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5418
5419         connector->interlace_allowed = true;
5420         connector->doublescan_allowed = 0;
5421
5422         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5423                           edp_panel_vdd_work);
5424
5425         intel_connector_attach_encoder(intel_connector, intel_encoder);
5426         drm_connector_register(connector);
5427
5428         if (HAS_DDI(dev))
5429                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5430         else
5431                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5432         intel_connector->unregister = intel_dp_connector_unregister;
5433
5434         /* Set up the hotplug pin. */
5435         switch (port) {
5436         case PORT_A:
5437                 intel_encoder->hpd_pin = HPD_PORT_A;
5438                 break;
5439         case PORT_B:
5440                 intel_encoder->hpd_pin = HPD_PORT_B;
5441                 break;
5442         case PORT_C:
5443                 intel_encoder->hpd_pin = HPD_PORT_C;
5444                 break;
5445         case PORT_D:
5446                 intel_encoder->hpd_pin = HPD_PORT_D;
5447                 break;
5448         default:
5449                 BUG();
5450         }
5451
5452         if (is_edp(intel_dp)) {
5453                 pps_lock(intel_dp);
5454                 intel_dp_init_panel_power_timestamps(intel_dp);
5455                 if (IS_VALLEYVIEW(dev))
5456                         vlv_initial_power_sequencer_setup(intel_dp);
5457                 else
5458                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5459                 pps_unlock(intel_dp);
5460         }
5461
5462         intel_dp_aux_init(intel_dp, intel_connector);
5463
5464         /* init MST on ports that can support it */
5465         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5466                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5467                         intel_dp_mst_encoder_init(intel_dig_port,
5468                                                   intel_connector->base.base.id);
5469                 }
5470         }
5471
5472         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5473                 drm_dp_aux_unregister(&intel_dp->aux);
5474                 if (is_edp(intel_dp)) {
5475                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5476                         /*
5477                          * vdd might still be enabled do to the delayed vdd off.
5478                          * Make sure vdd is actually turned off here.
5479                          */
5480                         pps_lock(intel_dp);
5481                         edp_panel_vdd_off_sync(intel_dp);
5482                         pps_unlock(intel_dp);
5483                 }
5484                 drm_connector_unregister(connector);
5485                 drm_connector_cleanup(connector);
5486                 return false;
5487         }
5488
5489         intel_dp_add_properties(intel_dp, connector);
5490
5491         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5492          * 0xd.  Failure to do so will result in spurious interrupts being
5493          * generated on the port when a cable is not attached.
5494          */
5495         if (IS_G4X(dev) && !IS_GM45(dev)) {
5496                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5497                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5498         }
5499
5500         return true;
5501 }
5502
5503 void
5504 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5505 {
5506         struct drm_i915_private *dev_priv = dev->dev_private;
5507         struct intel_digital_port *intel_dig_port;
5508         struct intel_encoder *intel_encoder;
5509         struct drm_encoder *encoder;
5510         struct intel_connector *intel_connector;
5511
5512         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5513         if (!intel_dig_port)
5514                 return;
5515
5516         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5517         if (!intel_connector) {
5518                 kfree(intel_dig_port);
5519                 return;
5520         }
5521
5522         intel_encoder = &intel_dig_port->base;
5523         encoder = &intel_encoder->base;
5524
5525         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5526                          DRM_MODE_ENCODER_TMDS);
5527
5528         intel_encoder->compute_config = intel_dp_compute_config;
5529         intel_encoder->disable = intel_disable_dp;
5530         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5531         intel_encoder->get_config = intel_dp_get_config;
5532         intel_encoder->suspend = intel_dp_encoder_suspend;
5533         if (IS_CHERRYVIEW(dev)) {
5534                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5535                 intel_encoder->pre_enable = chv_pre_enable_dp;
5536                 intel_encoder->enable = vlv_enable_dp;
5537                 intel_encoder->post_disable = chv_post_disable_dp;
5538         } else if (IS_VALLEYVIEW(dev)) {
5539                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5540                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5541                 intel_encoder->enable = vlv_enable_dp;
5542                 intel_encoder->post_disable = vlv_post_disable_dp;
5543         } else {
5544                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5545                 intel_encoder->enable = g4x_enable_dp;
5546                 if (INTEL_INFO(dev)->gen >= 5)
5547                         intel_encoder->post_disable = ilk_post_disable_dp;
5548         }
5549
5550         intel_dig_port->port = port;
5551         intel_dig_port->dp.output_reg = output_reg;
5552
5553         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5554         if (IS_CHERRYVIEW(dev)) {
5555                 if (port == PORT_D)
5556                         intel_encoder->crtc_mask = 1 << 2;
5557                 else
5558                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5559         } else {
5560                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5561         }
5562         intel_encoder->cloneable = 0;
5563         intel_encoder->hot_plug = intel_dp_hot_plug;
5564
5565         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5566         dev_priv->hpd_irq_port[port] = intel_dig_port;
5567
5568         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5569                 drm_encoder_cleanup(encoder);
5570                 kfree(intel_dig_port);
5571                 kfree(intel_connector);
5572         }
5573 }
5574
5575 void intel_dp_mst_suspend(struct drm_device *dev)
5576 {
5577         struct drm_i915_private *dev_priv = dev->dev_private;
5578         int i;
5579
5580         /* disable MST */
5581         for (i = 0; i < I915_MAX_PORTS; i++) {
5582                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5583                 if (!intel_dig_port)
5584                         continue;
5585
5586                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5587                         if (!intel_dig_port->dp.can_mst)
5588                                 continue;
5589                         if (intel_dig_port->dp.is_mst)
5590                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5591                 }
5592         }
5593 }
5594
5595 void intel_dp_mst_resume(struct drm_device *dev)
5596 {
5597         struct drm_i915_private *dev_priv = dev->dev_private;
5598         int i;
5599
5600         for (i = 0; i < I915_MAX_PORTS; i++) {
5601                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5602                 if (!intel_dig_port)
5603                         continue;
5604                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5605                         int ret;
5606
5607                         if (!intel_dig_port->dp.can_mst)
5608                                 continue;
5609
5610                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5611                         if (ret != 0) {
5612                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5613                         }
5614                 }
5615         }
5616 }