]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int link_bw;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { DP_LINK_BW_1_62,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { DP_LINK_BW_2_7,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { DP_LINK_BW_1_62,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { DP_LINK_BW_2_7,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { DP_LINK_BW_1_62,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { DP_LINK_BW_2_7,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int skl_rates[] = { 162000, 216000, 270000,
95                                   324000, 432000, 540000 };
96 static const int default_rates[] = { 162000, 270000, 540000 };
97
98 /**
99  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100  * @intel_dp: DP struct
101  *
102  * If a CPU or PCH DP output is attached to an eDP panel, this function
103  * will return true, and false otherwise.
104  */
105 static bool is_edp(struct intel_dp *intel_dp)
106 {
107         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
108
109         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
110 }
111
112 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
113 {
114         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
115
116         return intel_dig_port->base.base.dev;
117 }
118
119 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
120 {
121         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
122 }
123
124 static void intel_dp_link_down(struct intel_dp *intel_dp);
125 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
126 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
127 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
128 static void vlv_steal_power_sequencer(struct drm_device *dev,
129                                       enum pipe pipe);
130
131 static int
132 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
133 {
134         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
135
136         switch (max_link_bw) {
137         case DP_LINK_BW_1_62:
138         case DP_LINK_BW_2_7:
139         case DP_LINK_BW_5_4:
140                 break;
141         default:
142                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
143                      max_link_bw);
144                 max_link_bw = DP_LINK_BW_1_62;
145                 break;
146         }
147         return max_link_bw;
148 }
149
150 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
151 {
152         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
153         struct drm_device *dev = intel_dig_port->base.base.dev;
154         u8 source_max, sink_max;
155
156         source_max = 4;
157         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
158             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
159                 source_max = 2;
160
161         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
162
163         return min(source_max, sink_max);
164 }
165
166 /*
167  * The units on the numbers in the next two are... bizarre.  Examples will
168  * make it clearer; this one parallels an example in the eDP spec.
169  *
170  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
171  *
172  *     270000 * 1 * 8 / 10 == 216000
173  *
174  * The actual data capacity of that configuration is 2.16Gbit/s, so the
175  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
176  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
177  * 119000.  At 18bpp that's 2142000 kilobits per second.
178  *
179  * Thus the strange-looking division by 10 in intel_dp_link_required, to
180  * get the result in decakilobits instead of kilobits.
181  */
182
183 static int
184 intel_dp_link_required(int pixel_clock, int bpp)
185 {
186         return (pixel_clock * bpp + 9) / 10;
187 }
188
189 static int
190 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
191 {
192         return (max_link_clock * max_lanes * 8) / 10;
193 }
194
195 static enum drm_mode_status
196 intel_dp_mode_valid(struct drm_connector *connector,
197                     struct drm_display_mode *mode)
198 {
199         struct intel_dp *intel_dp = intel_attached_dp(connector);
200         struct intel_connector *intel_connector = to_intel_connector(connector);
201         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
202         int target_clock = mode->clock;
203         int max_rate, mode_rate, max_lanes, max_link_clock;
204
205         if (is_edp(intel_dp) && fixed_mode) {
206                 if (mode->hdisplay > fixed_mode->hdisplay)
207                         return MODE_PANEL;
208
209                 if (mode->vdisplay > fixed_mode->vdisplay)
210                         return MODE_PANEL;
211
212                 target_clock = fixed_mode->clock;
213         }
214
215         max_link_clock = intel_dp_max_link_rate(intel_dp);
216         max_lanes = intel_dp_max_lane_count(intel_dp);
217
218         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
219         mode_rate = intel_dp_link_required(target_clock, 18);
220
221         if (mode_rate > max_rate)
222                 return MODE_CLOCK_HIGH;
223
224         if (mode->clock < 10000)
225                 return MODE_CLOCK_LOW;
226
227         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
228                 return MODE_H_ILLEGAL;
229
230         return MODE_OK;
231 }
232
233 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
234 {
235         int     i;
236         uint32_t v = 0;
237
238         if (src_bytes > 4)
239                 src_bytes = 4;
240         for (i = 0; i < src_bytes; i++)
241                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242         return v;
243 }
244
245 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
246 {
247         int i;
248         if (dst_bytes > 4)
249                 dst_bytes = 4;
250         for (i = 0; i < dst_bytes; i++)
251                 dst[i] = src >> ((3-i) * 8);
252 }
253
254 /* hrawclock is 1/4 the FSB frequency */
255 static int
256 intel_hrawclk(struct drm_device *dev)
257 {
258         struct drm_i915_private *dev_priv = dev->dev_private;
259         uint32_t clkcfg;
260
261         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
262         if (IS_VALLEYVIEW(dev))
263                 return 200;
264
265         clkcfg = I915_READ(CLKCFG);
266         switch (clkcfg & CLKCFG_FSB_MASK) {
267         case CLKCFG_FSB_400:
268                 return 100;
269         case CLKCFG_FSB_533:
270                 return 133;
271         case CLKCFG_FSB_667:
272                 return 166;
273         case CLKCFG_FSB_800:
274                 return 200;
275         case CLKCFG_FSB_1067:
276                 return 266;
277         case CLKCFG_FSB_1333:
278                 return 333;
279         /* these two are just a guess; one of them might be right */
280         case CLKCFG_FSB_1600:
281         case CLKCFG_FSB_1600_ALT:
282                 return 400;
283         default:
284                 return 133;
285         }
286 }
287
288 static void
289 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
290                                     struct intel_dp *intel_dp);
291 static void
292 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
293                                               struct intel_dp *intel_dp);
294
295 static void pps_lock(struct intel_dp *intel_dp)
296 {
297         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
298         struct intel_encoder *encoder = &intel_dig_port->base;
299         struct drm_device *dev = encoder->base.dev;
300         struct drm_i915_private *dev_priv = dev->dev_private;
301         enum intel_display_power_domain power_domain;
302
303         /*
304          * See vlv_power_sequencer_reset() why we need
305          * a power domain reference here.
306          */
307         power_domain = intel_display_port_power_domain(encoder);
308         intel_display_power_get(dev_priv, power_domain);
309
310         mutex_lock(&dev_priv->pps_mutex);
311 }
312
313 static void pps_unlock(struct intel_dp *intel_dp)
314 {
315         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
316         struct intel_encoder *encoder = &intel_dig_port->base;
317         struct drm_device *dev = encoder->base.dev;
318         struct drm_i915_private *dev_priv = dev->dev_private;
319         enum intel_display_power_domain power_domain;
320
321         mutex_unlock(&dev_priv->pps_mutex);
322
323         power_domain = intel_display_port_power_domain(encoder);
324         intel_display_power_put(dev_priv, power_domain);
325 }
326
327 static void
328 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
329 {
330         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
331         struct drm_device *dev = intel_dig_port->base.base.dev;
332         struct drm_i915_private *dev_priv = dev->dev_private;
333         enum pipe pipe = intel_dp->pps_pipe;
334         bool pll_enabled;
335         uint32_t DP;
336
337         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
338                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
339                  pipe_name(pipe), port_name(intel_dig_port->port)))
340                 return;
341
342         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
343                       pipe_name(pipe), port_name(intel_dig_port->port));
344
345         /* Preserve the BIOS-computed detected bit. This is
346          * supposed to be read-only.
347          */
348         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
349         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
350         DP |= DP_PORT_WIDTH(1);
351         DP |= DP_LINK_TRAIN_PAT_1;
352
353         if (IS_CHERRYVIEW(dev))
354                 DP |= DP_PIPE_SELECT_CHV(pipe);
355         else if (pipe == PIPE_B)
356                 DP |= DP_PIPEB_SELECT;
357
358         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
359
360         /*
361          * The DPLL for the pipe must be enabled for this to work.
362          * So enable temporarily it if it's not already enabled.
363          */
364         if (!pll_enabled)
365                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
366                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
367
368         /*
369          * Similar magic as in intel_dp_enable_port().
370          * We _must_ do this port enable + disable trick
371          * to make this power seqeuencer lock onto the port.
372          * Otherwise even VDD force bit won't work.
373          */
374         I915_WRITE(intel_dp->output_reg, DP);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
381         POSTING_READ(intel_dp->output_reg);
382
383         if (!pll_enabled)
384                 vlv_force_pll_off(dev, pipe);
385 }
386
387 static enum pipe
388 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
389 {
390         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
391         struct drm_device *dev = intel_dig_port->base.base.dev;
392         struct drm_i915_private *dev_priv = dev->dev_private;
393         struct intel_encoder *encoder;
394         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
395         enum pipe pipe;
396
397         lockdep_assert_held(&dev_priv->pps_mutex);
398
399         /* We should never land here with regular DP ports */
400         WARN_ON(!is_edp(intel_dp));
401
402         if (intel_dp->pps_pipe != INVALID_PIPE)
403                 return intel_dp->pps_pipe;
404
405         /*
406          * We don't have power sequencer currently.
407          * Pick one that's not used by other ports.
408          */
409         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
410                             base.head) {
411                 struct intel_dp *tmp;
412
413                 if (encoder->type != INTEL_OUTPUT_EDP)
414                         continue;
415
416                 tmp = enc_to_intel_dp(&encoder->base);
417
418                 if (tmp->pps_pipe != INVALID_PIPE)
419                         pipes &= ~(1 << tmp->pps_pipe);
420         }
421
422         /*
423          * Didn't find one. This should not happen since there
424          * are two power sequencers and up to two eDP ports.
425          */
426         if (WARN_ON(pipes == 0))
427                 pipe = PIPE_A;
428         else
429                 pipe = ffs(pipes) - 1;
430
431         vlv_steal_power_sequencer(dev, pipe);
432         intel_dp->pps_pipe = pipe;
433
434         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
435                       pipe_name(intel_dp->pps_pipe),
436                       port_name(intel_dig_port->port));
437
438         /* init power sequencer on this pipe and port */
439         intel_dp_init_panel_power_sequencer(dev, intel_dp);
440         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
441
442         /*
443          * Even vdd force doesn't work until we've made
444          * the power sequencer lock in on the port.
445          */
446         vlv_power_sequencer_kick(intel_dp);
447
448         return intel_dp->pps_pipe;
449 }
450
451 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
452                                enum pipe pipe);
453
454 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
455                                enum pipe pipe)
456 {
457         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
458 }
459
460 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
461                                 enum pipe pipe)
462 {
463         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
464 }
465
466 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
467                          enum pipe pipe)
468 {
469         return true;
470 }
471
472 static enum pipe
473 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
474                      enum port port,
475                      vlv_pipe_check pipe_check)
476 {
477         enum pipe pipe;
478
479         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
480                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
481                         PANEL_PORT_SELECT_MASK;
482
483                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
484                         continue;
485
486                 if (!pipe_check(dev_priv, pipe))
487                         continue;
488
489                 return pipe;
490         }
491
492         return INVALID_PIPE;
493 }
494
495 static void
496 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
497 {
498         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
499         struct drm_device *dev = intel_dig_port->base.base.dev;
500         struct drm_i915_private *dev_priv = dev->dev_private;
501         enum port port = intel_dig_port->port;
502
503         lockdep_assert_held(&dev_priv->pps_mutex);
504
505         /* try to find a pipe with this port selected */
506         /* first pick one where the panel is on */
507         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
508                                                   vlv_pipe_has_pp_on);
509         /* didn't find one? pick one where vdd is on */
510         if (intel_dp->pps_pipe == INVALID_PIPE)
511                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512                                                           vlv_pipe_has_vdd_on);
513         /* didn't find one? pick one with just the correct port */
514         if (intel_dp->pps_pipe == INVALID_PIPE)
515                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
516                                                           vlv_pipe_any);
517
518         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
519         if (intel_dp->pps_pipe == INVALID_PIPE) {
520                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
521                               port_name(port));
522                 return;
523         }
524
525         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
526                       port_name(port), pipe_name(intel_dp->pps_pipe));
527
528         intel_dp_init_panel_power_sequencer(dev, intel_dp);
529         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
530 }
531
532 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
533 {
534         struct drm_device *dev = dev_priv->dev;
535         struct intel_encoder *encoder;
536
537         if (WARN_ON(!IS_VALLEYVIEW(dev)))
538                 return;
539
540         /*
541          * We can't grab pps_mutex here due to deadlock with power_domain
542          * mutex when power_domain functions are called while holding pps_mutex.
543          * That also means that in order to use pps_pipe the code needs to
544          * hold both a power domain reference and pps_mutex, and the power domain
545          * reference get/put must be done while _not_ holding pps_mutex.
546          * pps_{lock,unlock}() do these steps in the correct order, so one
547          * should use them always.
548          */
549
550         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
551                 struct intel_dp *intel_dp;
552
553                 if (encoder->type != INTEL_OUTPUT_EDP)
554                         continue;
555
556                 intel_dp = enc_to_intel_dp(&encoder->base);
557                 intel_dp->pps_pipe = INVALID_PIPE;
558         }
559 }
560
561 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
562 {
563         struct drm_device *dev = intel_dp_to_dev(intel_dp);
564
565         if (HAS_PCH_SPLIT(dev))
566                 return PCH_PP_CONTROL;
567         else
568                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
569 }
570
571 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
572 {
573         struct drm_device *dev = intel_dp_to_dev(intel_dp);
574
575         if (HAS_PCH_SPLIT(dev))
576                 return PCH_PP_STATUS;
577         else
578                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
579 }
580
581 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
582    This function only applicable when panel PM state is not to be tracked */
583 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
584                               void *unused)
585 {
586         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
587                                                  edp_notifier);
588         struct drm_device *dev = intel_dp_to_dev(intel_dp);
589         struct drm_i915_private *dev_priv = dev->dev_private;
590         u32 pp_div;
591         u32 pp_ctrl_reg, pp_div_reg;
592
593         if (!is_edp(intel_dp) || code != SYS_RESTART)
594                 return 0;
595
596         pps_lock(intel_dp);
597
598         if (IS_VALLEYVIEW(dev)) {
599                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
600
601                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
602                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
603                 pp_div = I915_READ(pp_div_reg);
604                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
605
606                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
607                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
608                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
609                 msleep(intel_dp->panel_power_cycle_delay);
610         }
611
612         pps_unlock(intel_dp);
613
614         return 0;
615 }
616
617 static bool edp_have_panel_power(struct intel_dp *intel_dp)
618 {
619         struct drm_device *dev = intel_dp_to_dev(intel_dp);
620         struct drm_i915_private *dev_priv = dev->dev_private;
621
622         lockdep_assert_held(&dev_priv->pps_mutex);
623
624         if (IS_VALLEYVIEW(dev) &&
625             intel_dp->pps_pipe == INVALID_PIPE)
626                 return false;
627
628         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
629 }
630
631 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
632 {
633         struct drm_device *dev = intel_dp_to_dev(intel_dp);
634         struct drm_i915_private *dev_priv = dev->dev_private;
635
636         lockdep_assert_held(&dev_priv->pps_mutex);
637
638         if (IS_VALLEYVIEW(dev) &&
639             intel_dp->pps_pipe == INVALID_PIPE)
640                 return false;
641
642         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 }
644
645 static void
646 intel_dp_check_edp(struct intel_dp *intel_dp)
647 {
648         struct drm_device *dev = intel_dp_to_dev(intel_dp);
649         struct drm_i915_private *dev_priv = dev->dev_private;
650
651         if (!is_edp(intel_dp))
652                 return;
653
654         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
655                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
656                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
657                               I915_READ(_pp_stat_reg(intel_dp)),
658                               I915_READ(_pp_ctrl_reg(intel_dp)));
659         }
660 }
661
662 static uint32_t
663 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
664 {
665         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666         struct drm_device *dev = intel_dig_port->base.base.dev;
667         struct drm_i915_private *dev_priv = dev->dev_private;
668         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669         uint32_t status;
670         bool done;
671
672 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
673         if (has_aux_irq)
674                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
675                                           msecs_to_jiffies_timeout(10));
676         else
677                 done = wait_for_atomic(C, 10) == 0;
678         if (!done)
679                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
680                           has_aux_irq);
681 #undef C
682
683         return status;
684 }
685
686 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 {
688         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689         struct drm_device *dev = intel_dig_port->base.base.dev;
690
691         /*
692          * The clock divider is based off the hrawclk, and would like to run at
693          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
694          */
695         return index ? 0 : intel_hrawclk(dev) / 2;
696 }
697
698 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
699 {
700         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701         struct drm_device *dev = intel_dig_port->base.base.dev;
702         struct drm_i915_private *dev_priv = dev->dev_private;
703
704         if (index)
705                 return 0;
706
707         if (intel_dig_port->port == PORT_A) {
708                 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
709         } else {
710                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
711         }
712 }
713
714 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715 {
716         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
717         struct drm_device *dev = intel_dig_port->base.base.dev;
718         struct drm_i915_private *dev_priv = dev->dev_private;
719
720         if (intel_dig_port->port == PORT_A) {
721                 if (index)
722                         return 0;
723                 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
724         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
725                 /* Workaround for non-ULT HSW */
726                 switch (index) {
727                 case 0: return 63;
728                 case 1: return 72;
729                 default: return 0;
730                 }
731         } else  {
732                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
733         }
734 }
735
736 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737 {
738         return index ? 0 : 100;
739 }
740
741 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
742 {
743         /*
744          * SKL doesn't need us to program the AUX clock divider (Hardware will
745          * derive the clock from CDCLK automatically). We still implement the
746          * get_aux_clock_divider vfunc to plug-in into the existing code.
747          */
748         return index ? 0 : 1;
749 }
750
751 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
752                                       bool has_aux_irq,
753                                       int send_bytes,
754                                       uint32_t aux_clock_divider)
755 {
756         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
757         struct drm_device *dev = intel_dig_port->base.base.dev;
758         uint32_t precharge, timeout;
759
760         if (IS_GEN6(dev))
761                 precharge = 3;
762         else
763                 precharge = 5;
764
765         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
766                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767         else
768                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769
770         return DP_AUX_CH_CTL_SEND_BUSY |
771                DP_AUX_CH_CTL_DONE |
772                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
773                DP_AUX_CH_CTL_TIME_OUT_ERROR |
774                timeout |
775                DP_AUX_CH_CTL_RECEIVE_ERROR |
776                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
778                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
779 }
780
781 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
782                                       bool has_aux_irq,
783                                       int send_bytes,
784                                       uint32_t unused)
785 {
786         return DP_AUX_CH_CTL_SEND_BUSY |
787                DP_AUX_CH_CTL_DONE |
788                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
789                DP_AUX_CH_CTL_TIME_OUT_ERROR |
790                DP_AUX_CH_CTL_TIME_OUT_1600us |
791                DP_AUX_CH_CTL_RECEIVE_ERROR |
792                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
793                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
794 }
795
796 static int
797 intel_dp_aux_ch(struct intel_dp *intel_dp,
798                 const uint8_t *send, int send_bytes,
799                 uint8_t *recv, int recv_size)
800 {
801         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
802         struct drm_device *dev = intel_dig_port->base.base.dev;
803         struct drm_i915_private *dev_priv = dev->dev_private;
804         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
805         uint32_t ch_data = ch_ctl + 4;
806         uint32_t aux_clock_divider;
807         int i, ret, recv_bytes;
808         uint32_t status;
809         int try, clock = 0;
810         bool has_aux_irq = HAS_AUX_IRQ(dev);
811         bool vdd;
812
813         pps_lock(intel_dp);
814
815         /*
816          * We will be called with VDD already enabled for dpcd/edid/oui reads.
817          * In such cases we want to leave VDD enabled and it's up to upper layers
818          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
819          * ourselves.
820          */
821         vdd = edp_panel_vdd_on(intel_dp);
822
823         /* dp aux is extremely sensitive to irq latency, hence request the
824          * lowest possible wakeup latency and so prevent the cpu from going into
825          * deep sleep states.
826          */
827         pm_qos_update_request(&dev_priv->pm_qos, 0);
828
829         intel_dp_check_edp(intel_dp);
830
831         intel_aux_display_runtime_get(dev_priv);
832
833         /* Try to wait for any previous AUX channel activity */
834         for (try = 0; try < 3; try++) {
835                 status = I915_READ_NOTRACE(ch_ctl);
836                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
837                         break;
838                 msleep(1);
839         }
840
841         if (try == 3) {
842                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
843                      I915_READ(ch_ctl));
844                 ret = -EBUSY;
845                 goto out;
846         }
847
848         /* Only 5 data registers! */
849         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
850                 ret = -E2BIG;
851                 goto out;
852         }
853
854         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
855                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
856                                                           has_aux_irq,
857                                                           send_bytes,
858                                                           aux_clock_divider);
859
860                 /* Must try at least 3 times according to DP spec */
861                 for (try = 0; try < 5; try++) {
862                         /* Load the send data into the aux channel data registers */
863                         for (i = 0; i < send_bytes; i += 4)
864                                 I915_WRITE(ch_data + i,
865                                            intel_dp_pack_aux(send + i,
866                                                              send_bytes - i));
867
868                         /* Send the command and wait for it to complete */
869                         I915_WRITE(ch_ctl, send_ctl);
870
871                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872
873                         /* Clear done status and any errors */
874                         I915_WRITE(ch_ctl,
875                                    status |
876                                    DP_AUX_CH_CTL_DONE |
877                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
878                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
879
880                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
881                                 continue;
882
883                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
884                          *   400us delay required for errors and timeouts
885                          *   Timeout errors from the HW already meet this
886                          *   requirement so skip to next iteration
887                          */
888                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
889                                 usleep_range(400, 500);
890                                 continue;
891                         }
892                         if (status & DP_AUX_CH_CTL_DONE)
893                                 goto done;
894                 }
895         }
896
897         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
898                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
899                 ret = -EBUSY;
900                 goto out;
901         }
902
903 done:
904         /* Check for timeout or receive error.
905          * Timeouts occur when the sink is not connected
906          */
907         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
908                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
909                 ret = -EIO;
910                 goto out;
911         }
912
913         /* Timeouts occur when the device isn't connected, so they're
914          * "normal" -- don't fill the kernel log with these */
915         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
916                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
917                 ret = -ETIMEDOUT;
918                 goto out;
919         }
920
921         /* Unload any bytes sent back from the other side */
922         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
923                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
924         if (recv_bytes > recv_size)
925                 recv_bytes = recv_size;
926
927         for (i = 0; i < recv_bytes; i += 4)
928                 intel_dp_unpack_aux(I915_READ(ch_data + i),
929                                     recv + i, recv_bytes - i);
930
931         ret = recv_bytes;
932 out:
933         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
934         intel_aux_display_runtime_put(dev_priv);
935
936         if (vdd)
937                 edp_panel_vdd_off(intel_dp, false);
938
939         pps_unlock(intel_dp);
940
941         return ret;
942 }
943
944 #define BARE_ADDRESS_SIZE       3
945 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
946 static ssize_t
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
948 {
949         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950         uint8_t txbuf[20], rxbuf[20];
951         size_t txsize, rxsize;
952         int ret;
953
954         txbuf[0] = (msg->request << 4) |
955                 ((msg->address >> 16) & 0xf);
956         txbuf[1] = (msg->address >> 8) & 0xff;
957         txbuf[2] = msg->address & 0xff;
958         txbuf[3] = msg->size - 1;
959
960         switch (msg->request & ~DP_AUX_I2C_MOT) {
961         case DP_AUX_NATIVE_WRITE:
962         case DP_AUX_I2C_WRITE:
963                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
964                 rxsize = 2; /* 0 or 1 data bytes */
965
966                 if (WARN_ON(txsize > 20))
967                         return -E2BIG;
968
969                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
970
971                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
972                 if (ret > 0) {
973                         msg->reply = rxbuf[0] >> 4;
974
975                         if (ret > 1) {
976                                 /* Number of bytes written in a short write. */
977                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
978                         } else {
979                                 /* Return payload size. */
980                                 ret = msg->size;
981                         }
982                 }
983                 break;
984
985         case DP_AUX_NATIVE_READ:
986         case DP_AUX_I2C_READ:
987                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
988                 rxsize = msg->size + 1;
989
990                 if (WARN_ON(rxsize > 20))
991                         return -E2BIG;
992
993                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994                 if (ret > 0) {
995                         msg->reply = rxbuf[0] >> 4;
996                         /*
997                          * Assume happy day, and copy the data. The caller is
998                          * expected to check msg->reply before touching it.
999                          *
1000                          * Return payload size.
1001                          */
1002                         ret--;
1003                         memcpy(msg->buffer, rxbuf + 1, ret);
1004                 }
1005                 break;
1006
1007         default:
1008                 ret = -EINVAL;
1009                 break;
1010         }
1011
1012         return ret;
1013 }
1014
1015 static void
1016 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1017 {
1018         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1019         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1020         enum port port = intel_dig_port->port;
1021         const char *name = NULL;
1022         int ret;
1023
1024         switch (port) {
1025         case PORT_A:
1026                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1027                 name = "DPDDC-A";
1028                 break;
1029         case PORT_B:
1030                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1031                 name = "DPDDC-B";
1032                 break;
1033         case PORT_C:
1034                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1035                 name = "DPDDC-C";
1036                 break;
1037         case PORT_D:
1038                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1039                 name = "DPDDC-D";
1040                 break;
1041         default:
1042                 BUG();
1043         }
1044
1045         /*
1046          * The AUX_CTL register is usually DP_CTL + 0x10.
1047          *
1048          * On Haswell and Broadwell though:
1049          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1050          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1051          *
1052          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1053          */
1054         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1055                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1056
1057         intel_dp->aux.name = name;
1058         intel_dp->aux.dev = dev->dev;
1059         intel_dp->aux.transfer = intel_dp_aux_transfer;
1060
1061         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1062                       connector->base.kdev->kobj.name);
1063
1064         ret = drm_dp_aux_register(&intel_dp->aux);
1065         if (ret < 0) {
1066                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1067                           name, ret);
1068                 return;
1069         }
1070
1071         ret = sysfs_create_link(&connector->base.kdev->kobj,
1072                                 &intel_dp->aux.ddc.dev.kobj,
1073                                 intel_dp->aux.ddc.dev.kobj.name);
1074         if (ret < 0) {
1075                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1076                 drm_dp_aux_unregister(&intel_dp->aux);
1077         }
1078 }
1079
1080 static void
1081 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1082 {
1083         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1084
1085         if (!intel_connector->mst_port)
1086                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1087                                   intel_dp->aux.ddc.dev.kobj.name);
1088         intel_connector_unregister(intel_connector);
1089 }
1090
1091 static void
1092 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1093 {
1094         u32 ctrl1;
1095
1096         memset(&pipe_config->dpll_hw_state, 0,
1097                sizeof(pipe_config->dpll_hw_state));
1098
1099         pipe_config->ddi_pll_sel = SKL_DPLL0;
1100         pipe_config->dpll_hw_state.cfgcr1 = 0;
1101         pipe_config->dpll_hw_state.cfgcr2 = 0;
1102
1103         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1104         switch (link_clock / 2) {
1105         case 81000:
1106                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1107                                               SKL_DPLL0);
1108                 break;
1109         case 135000:
1110                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1111                                               SKL_DPLL0);
1112                 break;
1113         case 270000:
1114                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1115                                               SKL_DPLL0);
1116                 break;
1117         case 162000:
1118                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1119                                               SKL_DPLL0);
1120                 break;
1121         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1122         results in CDCLK change. Need to handle the change of CDCLK by
1123         disabling pipes and re-enabling them */
1124         case 108000:
1125                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1126                                               SKL_DPLL0);
1127                 break;
1128         case 216000:
1129                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1130                                               SKL_DPLL0);
1131                 break;
1132
1133         }
1134         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1135 }
1136
1137 static void
1138 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1139 {
1140         memset(&pipe_config->dpll_hw_state, 0,
1141                sizeof(pipe_config->dpll_hw_state));
1142
1143         switch (link_bw) {
1144         case DP_LINK_BW_1_62:
1145                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1146                 break;
1147         case DP_LINK_BW_2_7:
1148                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1149                 break;
1150         case DP_LINK_BW_5_4:
1151                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1152                 break;
1153         }
1154 }
1155
1156 static int
1157 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1158 {
1159         if (intel_dp->num_sink_rates) {
1160                 *sink_rates = intel_dp->sink_rates;
1161                 return intel_dp->num_sink_rates;
1162         }
1163
1164         *sink_rates = default_rates;
1165
1166         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1167 }
1168
1169 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1170 {
1171         /* WaDisableHBR2:skl */
1172         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1173                 return false;
1174
1175         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1176             (INTEL_INFO(dev)->gen >= 9))
1177                 return true;
1178         else
1179                 return false;
1180 }
1181
1182 static int
1183 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1184 {
1185         if (IS_SKYLAKE(dev)) {
1186                 *source_rates = skl_rates;
1187                 return ARRAY_SIZE(skl_rates);
1188         }
1189
1190         *source_rates = default_rates;
1191
1192         /* This depends on the fact that 5.4 is last value in the array */
1193         if (intel_dp_source_supports_hbr2(dev))
1194                 return (DP_LINK_BW_5_4 >> 3) + 1;
1195         else
1196                 return (DP_LINK_BW_2_7 >> 3) + 1;
1197 }
1198
1199 static void
1200 intel_dp_set_clock(struct intel_encoder *encoder,
1201                    struct intel_crtc_state *pipe_config, int link_bw)
1202 {
1203         struct drm_device *dev = encoder->base.dev;
1204         const struct dp_link_dpll *divisor = NULL;
1205         int i, count = 0;
1206
1207         if (IS_G4X(dev)) {
1208                 divisor = gen4_dpll;
1209                 count = ARRAY_SIZE(gen4_dpll);
1210         } else if (HAS_PCH_SPLIT(dev)) {
1211                 divisor = pch_dpll;
1212                 count = ARRAY_SIZE(pch_dpll);
1213         } else if (IS_CHERRYVIEW(dev)) {
1214                 divisor = chv_dpll;
1215                 count = ARRAY_SIZE(chv_dpll);
1216         } else if (IS_VALLEYVIEW(dev)) {
1217                 divisor = vlv_dpll;
1218                 count = ARRAY_SIZE(vlv_dpll);
1219         }
1220
1221         if (divisor && count) {
1222                 for (i = 0; i < count; i++) {
1223                         if (link_bw == divisor[i].link_bw) {
1224                                 pipe_config->dpll = divisor[i].dpll;
1225                                 pipe_config->clock_set = true;
1226                                 break;
1227                         }
1228                 }
1229         }
1230 }
1231
1232 static int intersect_rates(const int *source_rates, int source_len,
1233                            const int *sink_rates, int sink_len,
1234                            int *common_rates)
1235 {
1236         int i = 0, j = 0, k = 0;
1237
1238         while (i < source_len && j < sink_len) {
1239                 if (source_rates[i] == sink_rates[j]) {
1240                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1241                                 return k;
1242                         common_rates[k] = source_rates[i];
1243                         ++k;
1244                         ++i;
1245                         ++j;
1246                 } else if (source_rates[i] < sink_rates[j]) {
1247                         ++i;
1248                 } else {
1249                         ++j;
1250                 }
1251         }
1252         return k;
1253 }
1254
1255 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1256                                  int *common_rates)
1257 {
1258         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1259         const int *source_rates, *sink_rates;
1260         int source_len, sink_len;
1261
1262         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1263         source_len = intel_dp_source_rates(dev, &source_rates);
1264
1265         return intersect_rates(source_rates, source_len,
1266                                sink_rates, sink_len,
1267                                common_rates);
1268 }
1269
1270 static void snprintf_int_array(char *str, size_t len,
1271                                const int *array, int nelem)
1272 {
1273         int i;
1274
1275         str[0] = '\0';
1276
1277         for (i = 0; i < nelem; i++) {
1278                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1279                 if (r >= len)
1280                         return;
1281                 str += r;
1282                 len -= r;
1283         }
1284 }
1285
1286 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1287 {
1288         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1289         const int *source_rates, *sink_rates;
1290         int source_len, sink_len, common_len;
1291         int common_rates[DP_MAX_SUPPORTED_RATES];
1292         char str[128]; /* FIXME: too big for stack? */
1293
1294         if ((drm_debug & DRM_UT_KMS) == 0)
1295                 return;
1296
1297         source_len = intel_dp_source_rates(dev, &source_rates);
1298         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1299         DRM_DEBUG_KMS("source rates: %s\n", str);
1300
1301         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1302         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1303         DRM_DEBUG_KMS("sink rates: %s\n", str);
1304
1305         common_len = intel_dp_common_rates(intel_dp, common_rates);
1306         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1307         DRM_DEBUG_KMS("common rates: %s\n", str);
1308 }
1309
1310 static int rate_to_index(int find, const int *rates)
1311 {
1312         int i = 0;
1313
1314         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1315                 if (find == rates[i])
1316                         break;
1317
1318         return i;
1319 }
1320
1321 int
1322 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1323 {
1324         int rates[DP_MAX_SUPPORTED_RATES] = {};
1325         int len;
1326
1327         len = intel_dp_common_rates(intel_dp, rates);
1328         if (WARN_ON(len <= 0))
1329                 return 162000;
1330
1331         return rates[rate_to_index(0, rates) - 1];
1332 }
1333
1334 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1335 {
1336         return rate_to_index(rate, intel_dp->sink_rates);
1337 }
1338
1339 bool
1340 intel_dp_compute_config(struct intel_encoder *encoder,
1341                         struct intel_crtc_state *pipe_config)
1342 {
1343         struct drm_device *dev = encoder->base.dev;
1344         struct drm_i915_private *dev_priv = dev->dev_private;
1345         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1346         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1347         enum port port = dp_to_dig_port(intel_dp)->port;
1348         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1349         struct intel_connector *intel_connector = intel_dp->attached_connector;
1350         int lane_count, clock;
1351         int min_lane_count = 1;
1352         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1353         /* Conveniently, the link BW constants become indices with a shift...*/
1354         int min_clock = 0;
1355         int max_clock;
1356         int bpp, mode_rate;
1357         int link_avail, link_clock;
1358         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1359         int common_len;
1360
1361         common_len = intel_dp_common_rates(intel_dp, common_rates);
1362
1363         /* No common link rates between source and sink */
1364         WARN_ON(common_len <= 0);
1365
1366         max_clock = common_len - 1;
1367
1368         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1369                 pipe_config->has_pch_encoder = true;
1370
1371         pipe_config->has_dp_encoder = true;
1372         pipe_config->has_drrs = false;
1373         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1374
1375         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1376                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1377                                        adjusted_mode);
1378
1379                 if (INTEL_INFO(dev)->gen >= 9) {
1380                         int ret;
1381                         ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1382                         if (ret)
1383                                 return ret;
1384                 }
1385
1386                 if (!HAS_PCH_SPLIT(dev))
1387                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1388                                                  intel_connector->panel.fitting_mode);
1389                 else
1390                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1391                                                 intel_connector->panel.fitting_mode);
1392         }
1393
1394         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1395                 return false;
1396
1397         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1398                       "max bw %d pixel clock %iKHz\n",
1399                       max_lane_count, common_rates[max_clock],
1400                       adjusted_mode->crtc_clock);
1401
1402         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1403          * bpc in between. */
1404         bpp = pipe_config->pipe_bpp;
1405         if (is_edp(intel_dp)) {
1406                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1407                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1408                                       dev_priv->vbt.edp_bpp);
1409                         bpp = dev_priv->vbt.edp_bpp;
1410                 }
1411
1412                 /*
1413                  * Use the maximum clock and number of lanes the eDP panel
1414                  * advertizes being capable of. The panels are generally
1415                  * designed to support only a single clock and lane
1416                  * configuration, and typically these values correspond to the
1417                  * native resolution of the panel.
1418                  */
1419                 min_lane_count = max_lane_count;
1420                 min_clock = max_clock;
1421         }
1422
1423         for (; bpp >= 6*3; bpp -= 2*3) {
1424                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1425                                                    bpp);
1426
1427                 for (clock = min_clock; clock <= max_clock; clock++) {
1428                         for (lane_count = min_lane_count;
1429                                 lane_count <= max_lane_count;
1430                                 lane_count <<= 1) {
1431
1432                                 link_clock = common_rates[clock];
1433                                 link_avail = intel_dp_max_data_rate(link_clock,
1434                                                                     lane_count);
1435
1436                                 if (mode_rate <= link_avail) {
1437                                         goto found;
1438                                 }
1439                         }
1440                 }
1441         }
1442
1443         return false;
1444
1445 found:
1446         if (intel_dp->color_range_auto) {
1447                 /*
1448                  * See:
1449                  * CEA-861-E - 5.1 Default Encoding Parameters
1450                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1451                  */
1452                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1453                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1454                 else
1455                         intel_dp->color_range = 0;
1456         }
1457
1458         if (intel_dp->color_range)
1459                 pipe_config->limited_color_range = true;
1460
1461         intel_dp->lane_count = lane_count;
1462
1463         if (intel_dp->num_sink_rates) {
1464                 intel_dp->link_bw = 0;
1465                 intel_dp->rate_select =
1466                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1467         } else {
1468                 intel_dp->link_bw =
1469                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1470                 intel_dp->rate_select = 0;
1471         }
1472
1473         pipe_config->pipe_bpp = bpp;
1474         pipe_config->port_clock = common_rates[clock];
1475
1476         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1477                       intel_dp->link_bw, intel_dp->lane_count,
1478                       pipe_config->port_clock, bpp);
1479         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1480                       mode_rate, link_avail);
1481
1482         intel_link_compute_m_n(bpp, lane_count,
1483                                adjusted_mode->crtc_clock,
1484                                pipe_config->port_clock,
1485                                &pipe_config->dp_m_n);
1486
1487         if (intel_connector->panel.downclock_mode != NULL &&
1488                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1489                         pipe_config->has_drrs = true;
1490                         intel_link_compute_m_n(bpp, lane_count,
1491                                 intel_connector->panel.downclock_mode->clock,
1492                                 pipe_config->port_clock,
1493                                 &pipe_config->dp_m2_n2);
1494         }
1495
1496         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1497                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1498         else if (IS_BROXTON(dev))
1499                 /* handled in ddi */;
1500         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1501                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1502         else
1503                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1504
1505         return true;
1506 }
1507
1508 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1509 {
1510         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1511         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1512         struct drm_device *dev = crtc->base.dev;
1513         struct drm_i915_private *dev_priv = dev->dev_private;
1514         u32 dpa_ctl;
1515
1516         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1517                       crtc->config->port_clock);
1518         dpa_ctl = I915_READ(DP_A);
1519         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1520
1521         if (crtc->config->port_clock == 162000) {
1522                 /* For a long time we've carried around a ILK-DevA w/a for the
1523                  * 160MHz clock. If we're really unlucky, it's still required.
1524                  */
1525                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1526                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1527                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1528         } else {
1529                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1530                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1531         }
1532
1533         I915_WRITE(DP_A, dpa_ctl);
1534
1535         POSTING_READ(DP_A);
1536         udelay(500);
1537 }
1538
1539 static void intel_dp_prepare(struct intel_encoder *encoder)
1540 {
1541         struct drm_device *dev = encoder->base.dev;
1542         struct drm_i915_private *dev_priv = dev->dev_private;
1543         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1544         enum port port = dp_to_dig_port(intel_dp)->port;
1545         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1546         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1547
1548         /*
1549          * There are four kinds of DP registers:
1550          *
1551          *      IBX PCH
1552          *      SNB CPU
1553          *      IVB CPU
1554          *      CPT PCH
1555          *
1556          * IBX PCH and CPU are the same for almost everything,
1557          * except that the CPU DP PLL is configured in this
1558          * register
1559          *
1560          * CPT PCH is quite different, having many bits moved
1561          * to the TRANS_DP_CTL register instead. That
1562          * configuration happens (oddly) in ironlake_pch_enable
1563          */
1564
1565         /* Preserve the BIOS-computed detected bit. This is
1566          * supposed to be read-only.
1567          */
1568         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1569
1570         /* Handle DP bits in common between all three register formats */
1571         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1572         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1573
1574         if (crtc->config->has_audio)
1575                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1576
1577         /* Split out the IBX/CPU vs CPT settings */
1578
1579         if (IS_GEN7(dev) && port == PORT_A) {
1580                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1581                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1582                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1583                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1584                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1585
1586                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1587                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1588
1589                 intel_dp->DP |= crtc->pipe << 29;
1590         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1591                 u32 trans_dp;
1592
1593                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1594
1595                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1596                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1597                         trans_dp |= TRANS_DP_ENH_FRAMING;
1598                 else
1599                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1600                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1601         } else {
1602                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1603                         intel_dp->DP |= intel_dp->color_range;
1604
1605                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1606                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1607                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1608                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1609                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1610
1611                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1612                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1613
1614                 if (IS_CHERRYVIEW(dev))
1615                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1616                 else if (crtc->pipe == PIPE_B)
1617                         intel_dp->DP |= DP_PIPEB_SELECT;
1618         }
1619 }
1620
1621 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1622 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1623
1624 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1625 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1626
1627 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1628 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1629
1630 static void wait_panel_status(struct intel_dp *intel_dp,
1631                                        u32 mask,
1632                                        u32 value)
1633 {
1634         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1635         struct drm_i915_private *dev_priv = dev->dev_private;
1636         u32 pp_stat_reg, pp_ctrl_reg;
1637
1638         lockdep_assert_held(&dev_priv->pps_mutex);
1639
1640         pp_stat_reg = _pp_stat_reg(intel_dp);
1641         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1642
1643         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1644                         mask, value,
1645                         I915_READ(pp_stat_reg),
1646                         I915_READ(pp_ctrl_reg));
1647
1648         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1649                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1650                                 I915_READ(pp_stat_reg),
1651                                 I915_READ(pp_ctrl_reg));
1652         }
1653
1654         DRM_DEBUG_KMS("Wait complete\n");
1655 }
1656
1657 static void wait_panel_on(struct intel_dp *intel_dp)
1658 {
1659         DRM_DEBUG_KMS("Wait for panel power on\n");
1660         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1661 }
1662
1663 static void wait_panel_off(struct intel_dp *intel_dp)
1664 {
1665         DRM_DEBUG_KMS("Wait for panel power off time\n");
1666         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1667 }
1668
1669 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1670 {
1671         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1672
1673         /* When we disable the VDD override bit last we have to do the manual
1674          * wait. */
1675         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1676                                        intel_dp->panel_power_cycle_delay);
1677
1678         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1679 }
1680
1681 static void wait_backlight_on(struct intel_dp *intel_dp)
1682 {
1683         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1684                                        intel_dp->backlight_on_delay);
1685 }
1686
1687 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1688 {
1689         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1690                                        intel_dp->backlight_off_delay);
1691 }
1692
1693 /* Read the current pp_control value, unlocking the register if it
1694  * is locked
1695  */
1696
1697 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1698 {
1699         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1700         struct drm_i915_private *dev_priv = dev->dev_private;
1701         u32 control;
1702
1703         lockdep_assert_held(&dev_priv->pps_mutex);
1704
1705         control = I915_READ(_pp_ctrl_reg(intel_dp));
1706         control &= ~PANEL_UNLOCK_MASK;
1707         control |= PANEL_UNLOCK_REGS;
1708         return control;
1709 }
1710
1711 /*
1712  * Must be paired with edp_panel_vdd_off().
1713  * Must hold pps_mutex around the whole on/off sequence.
1714  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1715  */
1716 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1717 {
1718         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1719         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1720         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1721         struct drm_i915_private *dev_priv = dev->dev_private;
1722         enum intel_display_power_domain power_domain;
1723         u32 pp;
1724         u32 pp_stat_reg, pp_ctrl_reg;
1725         bool need_to_disable = !intel_dp->want_panel_vdd;
1726
1727         lockdep_assert_held(&dev_priv->pps_mutex);
1728
1729         if (!is_edp(intel_dp))
1730                 return false;
1731
1732         cancel_delayed_work(&intel_dp->panel_vdd_work);
1733         intel_dp->want_panel_vdd = true;
1734
1735         if (edp_have_panel_vdd(intel_dp))
1736                 return need_to_disable;
1737
1738         power_domain = intel_display_port_power_domain(intel_encoder);
1739         intel_display_power_get(dev_priv, power_domain);
1740
1741         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1742                       port_name(intel_dig_port->port));
1743
1744         if (!edp_have_panel_power(intel_dp))
1745                 wait_panel_power_cycle(intel_dp);
1746
1747         pp = ironlake_get_pp_control(intel_dp);
1748         pp |= EDP_FORCE_VDD;
1749
1750         pp_stat_reg = _pp_stat_reg(intel_dp);
1751         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1752
1753         I915_WRITE(pp_ctrl_reg, pp);
1754         POSTING_READ(pp_ctrl_reg);
1755         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1756                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1757         /*
1758          * If the panel wasn't on, delay before accessing aux channel
1759          */
1760         if (!edp_have_panel_power(intel_dp)) {
1761                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1762                               port_name(intel_dig_port->port));
1763                 msleep(intel_dp->panel_power_up_delay);
1764         }
1765
1766         return need_to_disable;
1767 }
1768
1769 /*
1770  * Must be paired with intel_edp_panel_vdd_off() or
1771  * intel_edp_panel_off().
1772  * Nested calls to these functions are not allowed since
1773  * we drop the lock. Caller must use some higher level
1774  * locking to prevent nested calls from other threads.
1775  */
1776 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1777 {
1778         bool vdd;
1779
1780         if (!is_edp(intel_dp))
1781                 return;
1782
1783         pps_lock(intel_dp);
1784         vdd = edp_panel_vdd_on(intel_dp);
1785         pps_unlock(intel_dp);
1786
1787         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1788              port_name(dp_to_dig_port(intel_dp)->port));
1789 }
1790
1791 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1792 {
1793         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1794         struct drm_i915_private *dev_priv = dev->dev_private;
1795         struct intel_digital_port *intel_dig_port =
1796                 dp_to_dig_port(intel_dp);
1797         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1798         enum intel_display_power_domain power_domain;
1799         u32 pp;
1800         u32 pp_stat_reg, pp_ctrl_reg;
1801
1802         lockdep_assert_held(&dev_priv->pps_mutex);
1803
1804         WARN_ON(intel_dp->want_panel_vdd);
1805
1806         if (!edp_have_panel_vdd(intel_dp))
1807                 return;
1808
1809         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1810                       port_name(intel_dig_port->port));
1811
1812         pp = ironlake_get_pp_control(intel_dp);
1813         pp &= ~EDP_FORCE_VDD;
1814
1815         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1816         pp_stat_reg = _pp_stat_reg(intel_dp);
1817
1818         I915_WRITE(pp_ctrl_reg, pp);
1819         POSTING_READ(pp_ctrl_reg);
1820
1821         /* Make sure sequencer is idle before allowing subsequent activity */
1822         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1823         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1824
1825         if ((pp & POWER_TARGET_ON) == 0)
1826                 intel_dp->last_power_cycle = jiffies;
1827
1828         power_domain = intel_display_port_power_domain(intel_encoder);
1829         intel_display_power_put(dev_priv, power_domain);
1830 }
1831
1832 static void edp_panel_vdd_work(struct work_struct *__work)
1833 {
1834         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1835                                                  struct intel_dp, panel_vdd_work);
1836
1837         pps_lock(intel_dp);
1838         if (!intel_dp->want_panel_vdd)
1839                 edp_panel_vdd_off_sync(intel_dp);
1840         pps_unlock(intel_dp);
1841 }
1842
1843 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1844 {
1845         unsigned long delay;
1846
1847         /*
1848          * Queue the timer to fire a long time from now (relative to the power
1849          * down delay) to keep the panel power up across a sequence of
1850          * operations.
1851          */
1852         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1853         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1854 }
1855
1856 /*
1857  * Must be paired with edp_panel_vdd_on().
1858  * Must hold pps_mutex around the whole on/off sequence.
1859  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1860  */
1861 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1862 {
1863         struct drm_i915_private *dev_priv =
1864                 intel_dp_to_dev(intel_dp)->dev_private;
1865
1866         lockdep_assert_held(&dev_priv->pps_mutex);
1867
1868         if (!is_edp(intel_dp))
1869                 return;
1870
1871         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1872              port_name(dp_to_dig_port(intel_dp)->port));
1873
1874         intel_dp->want_panel_vdd = false;
1875
1876         if (sync)
1877                 edp_panel_vdd_off_sync(intel_dp);
1878         else
1879                 edp_panel_vdd_schedule_off(intel_dp);
1880 }
1881
1882 static void edp_panel_on(struct intel_dp *intel_dp)
1883 {
1884         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1885         struct drm_i915_private *dev_priv = dev->dev_private;
1886         u32 pp;
1887         u32 pp_ctrl_reg;
1888
1889         lockdep_assert_held(&dev_priv->pps_mutex);
1890
1891         if (!is_edp(intel_dp))
1892                 return;
1893
1894         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1895                       port_name(dp_to_dig_port(intel_dp)->port));
1896
1897         if (WARN(edp_have_panel_power(intel_dp),
1898                  "eDP port %c panel power already on\n",
1899                  port_name(dp_to_dig_port(intel_dp)->port)))
1900                 return;
1901
1902         wait_panel_power_cycle(intel_dp);
1903
1904         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1905         pp = ironlake_get_pp_control(intel_dp);
1906         if (IS_GEN5(dev)) {
1907                 /* ILK workaround: disable reset around power sequence */
1908                 pp &= ~PANEL_POWER_RESET;
1909                 I915_WRITE(pp_ctrl_reg, pp);
1910                 POSTING_READ(pp_ctrl_reg);
1911         }
1912
1913         pp |= POWER_TARGET_ON;
1914         if (!IS_GEN5(dev))
1915                 pp |= PANEL_POWER_RESET;
1916
1917         I915_WRITE(pp_ctrl_reg, pp);
1918         POSTING_READ(pp_ctrl_reg);
1919
1920         wait_panel_on(intel_dp);
1921         intel_dp->last_power_on = jiffies;
1922
1923         if (IS_GEN5(dev)) {
1924                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1925                 I915_WRITE(pp_ctrl_reg, pp);
1926                 POSTING_READ(pp_ctrl_reg);
1927         }
1928 }
1929
1930 void intel_edp_panel_on(struct intel_dp *intel_dp)
1931 {
1932         if (!is_edp(intel_dp))
1933                 return;
1934
1935         pps_lock(intel_dp);
1936         edp_panel_on(intel_dp);
1937         pps_unlock(intel_dp);
1938 }
1939
1940
1941 static void edp_panel_off(struct intel_dp *intel_dp)
1942 {
1943         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1944         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1945         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1946         struct drm_i915_private *dev_priv = dev->dev_private;
1947         enum intel_display_power_domain power_domain;
1948         u32 pp;
1949         u32 pp_ctrl_reg;
1950
1951         lockdep_assert_held(&dev_priv->pps_mutex);
1952
1953         if (!is_edp(intel_dp))
1954                 return;
1955
1956         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1957                       port_name(dp_to_dig_port(intel_dp)->port));
1958
1959         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1960              port_name(dp_to_dig_port(intel_dp)->port));
1961
1962         pp = ironlake_get_pp_control(intel_dp);
1963         /* We need to switch off panel power _and_ force vdd, for otherwise some
1964          * panels get very unhappy and cease to work. */
1965         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1966                 EDP_BLC_ENABLE);
1967
1968         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1969
1970         intel_dp->want_panel_vdd = false;
1971
1972         I915_WRITE(pp_ctrl_reg, pp);
1973         POSTING_READ(pp_ctrl_reg);
1974
1975         intel_dp->last_power_cycle = jiffies;
1976         wait_panel_off(intel_dp);
1977
1978         /* We got a reference when we enabled the VDD. */
1979         power_domain = intel_display_port_power_domain(intel_encoder);
1980         intel_display_power_put(dev_priv, power_domain);
1981 }
1982
1983 void intel_edp_panel_off(struct intel_dp *intel_dp)
1984 {
1985         if (!is_edp(intel_dp))
1986                 return;
1987
1988         pps_lock(intel_dp);
1989         edp_panel_off(intel_dp);
1990         pps_unlock(intel_dp);
1991 }
1992
1993 /* Enable backlight in the panel power control. */
1994 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1995 {
1996         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997         struct drm_device *dev = intel_dig_port->base.base.dev;
1998         struct drm_i915_private *dev_priv = dev->dev_private;
1999         u32 pp;
2000         u32 pp_ctrl_reg;
2001
2002         /*
2003          * If we enable the backlight right away following a panel power
2004          * on, we may see slight flicker as the panel syncs with the eDP
2005          * link.  So delay a bit to make sure the image is solid before
2006          * allowing it to appear.
2007          */
2008         wait_backlight_on(intel_dp);
2009
2010         pps_lock(intel_dp);
2011
2012         pp = ironlake_get_pp_control(intel_dp);
2013         pp |= EDP_BLC_ENABLE;
2014
2015         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2016
2017         I915_WRITE(pp_ctrl_reg, pp);
2018         POSTING_READ(pp_ctrl_reg);
2019
2020         pps_unlock(intel_dp);
2021 }
2022
2023 /* Enable backlight PWM and backlight PP control. */
2024 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2025 {
2026         if (!is_edp(intel_dp))
2027                 return;
2028
2029         DRM_DEBUG_KMS("\n");
2030
2031         intel_panel_enable_backlight(intel_dp->attached_connector);
2032         _intel_edp_backlight_on(intel_dp);
2033 }
2034
2035 /* Disable backlight in the panel power control. */
2036 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2037 {
2038         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2039         struct drm_i915_private *dev_priv = dev->dev_private;
2040         u32 pp;
2041         u32 pp_ctrl_reg;
2042
2043         if (!is_edp(intel_dp))
2044                 return;
2045
2046         pps_lock(intel_dp);
2047
2048         pp = ironlake_get_pp_control(intel_dp);
2049         pp &= ~EDP_BLC_ENABLE;
2050
2051         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2052
2053         I915_WRITE(pp_ctrl_reg, pp);
2054         POSTING_READ(pp_ctrl_reg);
2055
2056         pps_unlock(intel_dp);
2057
2058         intel_dp->last_backlight_off = jiffies;
2059         edp_wait_backlight_off(intel_dp);
2060 }
2061
2062 /* Disable backlight PP control and backlight PWM. */
2063 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2064 {
2065         if (!is_edp(intel_dp))
2066                 return;
2067
2068         DRM_DEBUG_KMS("\n");
2069
2070         _intel_edp_backlight_off(intel_dp);
2071         intel_panel_disable_backlight(intel_dp->attached_connector);
2072 }
2073
2074 /*
2075  * Hook for controlling the panel power control backlight through the bl_power
2076  * sysfs attribute. Take care to handle multiple calls.
2077  */
2078 static void intel_edp_backlight_power(struct intel_connector *connector,
2079                                       bool enable)
2080 {
2081         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2082         bool is_enabled;
2083
2084         pps_lock(intel_dp);
2085         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2086         pps_unlock(intel_dp);
2087
2088         if (is_enabled == enable)
2089                 return;
2090
2091         DRM_DEBUG_KMS("panel power control backlight %s\n",
2092                       enable ? "enable" : "disable");
2093
2094         if (enable)
2095                 _intel_edp_backlight_on(intel_dp);
2096         else
2097                 _intel_edp_backlight_off(intel_dp);
2098 }
2099
2100 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2101 {
2102         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2103         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2104         struct drm_device *dev = crtc->dev;
2105         struct drm_i915_private *dev_priv = dev->dev_private;
2106         u32 dpa_ctl;
2107
2108         assert_pipe_disabled(dev_priv,
2109                              to_intel_crtc(crtc)->pipe);
2110
2111         DRM_DEBUG_KMS("\n");
2112         dpa_ctl = I915_READ(DP_A);
2113         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2114         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2115
2116         /* We don't adjust intel_dp->DP while tearing down the link, to
2117          * facilitate link retraining (e.g. after hotplug). Hence clear all
2118          * enable bits here to ensure that we don't enable too much. */
2119         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2120         intel_dp->DP |= DP_PLL_ENABLE;
2121         I915_WRITE(DP_A, intel_dp->DP);
2122         POSTING_READ(DP_A);
2123         udelay(200);
2124 }
2125
2126 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2127 {
2128         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2129         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2130         struct drm_device *dev = crtc->dev;
2131         struct drm_i915_private *dev_priv = dev->dev_private;
2132         u32 dpa_ctl;
2133
2134         assert_pipe_disabled(dev_priv,
2135                              to_intel_crtc(crtc)->pipe);
2136
2137         dpa_ctl = I915_READ(DP_A);
2138         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2139              "dp pll off, should be on\n");
2140         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2141
2142         /* We can't rely on the value tracked for the DP register in
2143          * intel_dp->DP because link_down must not change that (otherwise link
2144          * re-training will fail. */
2145         dpa_ctl &= ~DP_PLL_ENABLE;
2146         I915_WRITE(DP_A, dpa_ctl);
2147         POSTING_READ(DP_A);
2148         udelay(200);
2149 }
2150
2151 /* If the sink supports it, try to set the power state appropriately */
2152 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2153 {
2154         int ret, i;
2155
2156         /* Should have a valid DPCD by this point */
2157         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2158                 return;
2159
2160         if (mode != DRM_MODE_DPMS_ON) {
2161                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2162                                          DP_SET_POWER_D3);
2163         } else {
2164                 /*
2165                  * When turning on, we need to retry for 1ms to give the sink
2166                  * time to wake up.
2167                  */
2168                 for (i = 0; i < 3; i++) {
2169                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2170                                                  DP_SET_POWER_D0);
2171                         if (ret == 1)
2172                                 break;
2173                         msleep(1);
2174                 }
2175         }
2176
2177         if (ret != 1)
2178                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2179                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2180 }
2181
2182 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2183                                   enum pipe *pipe)
2184 {
2185         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2186         enum port port = dp_to_dig_port(intel_dp)->port;
2187         struct drm_device *dev = encoder->base.dev;
2188         struct drm_i915_private *dev_priv = dev->dev_private;
2189         enum intel_display_power_domain power_domain;
2190         u32 tmp;
2191
2192         power_domain = intel_display_port_power_domain(encoder);
2193         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2194                 return false;
2195
2196         tmp = I915_READ(intel_dp->output_reg);
2197
2198         if (!(tmp & DP_PORT_EN))
2199                 return false;
2200
2201         if (IS_GEN7(dev) && port == PORT_A) {
2202                 *pipe = PORT_TO_PIPE_CPT(tmp);
2203         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2204                 enum pipe p;
2205
2206                 for_each_pipe(dev_priv, p) {
2207                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2208                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2209                                 *pipe = p;
2210                                 return true;
2211                         }
2212                 }
2213
2214                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2215                               intel_dp->output_reg);
2216         } else if (IS_CHERRYVIEW(dev)) {
2217                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2218         } else {
2219                 *pipe = PORT_TO_PIPE(tmp);
2220         }
2221
2222         return true;
2223 }
2224
2225 static void intel_dp_get_config(struct intel_encoder *encoder,
2226                                 struct intel_crtc_state *pipe_config)
2227 {
2228         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2229         u32 tmp, flags = 0;
2230         struct drm_device *dev = encoder->base.dev;
2231         struct drm_i915_private *dev_priv = dev->dev_private;
2232         enum port port = dp_to_dig_port(intel_dp)->port;
2233         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2234         int dotclock;
2235
2236         tmp = I915_READ(intel_dp->output_reg);
2237
2238         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2239
2240         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2241                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2242                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2243                         flags |= DRM_MODE_FLAG_PHSYNC;
2244                 else
2245                         flags |= DRM_MODE_FLAG_NHSYNC;
2246
2247                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2248                         flags |= DRM_MODE_FLAG_PVSYNC;
2249                 else
2250                         flags |= DRM_MODE_FLAG_NVSYNC;
2251         } else {
2252                 if (tmp & DP_SYNC_HS_HIGH)
2253                         flags |= DRM_MODE_FLAG_PHSYNC;
2254                 else
2255                         flags |= DRM_MODE_FLAG_NHSYNC;
2256
2257                 if (tmp & DP_SYNC_VS_HIGH)
2258                         flags |= DRM_MODE_FLAG_PVSYNC;
2259                 else
2260                         flags |= DRM_MODE_FLAG_NVSYNC;
2261         }
2262
2263         pipe_config->base.adjusted_mode.flags |= flags;
2264
2265         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2266             tmp & DP_COLOR_RANGE_16_235)
2267                 pipe_config->limited_color_range = true;
2268
2269         pipe_config->has_dp_encoder = true;
2270
2271         intel_dp_get_m_n(crtc, pipe_config);
2272
2273         if (port == PORT_A) {
2274                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2275                         pipe_config->port_clock = 162000;
2276                 else
2277                         pipe_config->port_clock = 270000;
2278         }
2279
2280         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2281                                             &pipe_config->dp_m_n);
2282
2283         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2284                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2285
2286         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2287
2288         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2289             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2290                 /*
2291                  * This is a big fat ugly hack.
2292                  *
2293                  * Some machines in UEFI boot mode provide us a VBT that has 18
2294                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2295                  * unknown we fail to light up. Yet the same BIOS boots up with
2296                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2297                  * max, not what it tells us to use.
2298                  *
2299                  * Note: This will still be broken if the eDP panel is not lit
2300                  * up by the BIOS, and thus we can't get the mode at module
2301                  * load.
2302                  */
2303                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2304                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2305                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2306         }
2307 }
2308
2309 static void intel_disable_dp(struct intel_encoder *encoder)
2310 {
2311         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312         struct drm_device *dev = encoder->base.dev;
2313         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2314
2315         if (crtc->config->has_audio)
2316                 intel_audio_codec_disable(encoder);
2317
2318         if (HAS_PSR(dev) && !HAS_DDI(dev))
2319                 intel_psr_disable(intel_dp);
2320
2321         /* Make sure the panel is off before trying to change the mode. But also
2322          * ensure that we have vdd while we switch off the panel. */
2323         intel_edp_panel_vdd_on(intel_dp);
2324         intel_edp_backlight_off(intel_dp);
2325         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2326         intel_edp_panel_off(intel_dp);
2327
2328         /* disable the port before the pipe on g4x */
2329         if (INTEL_INFO(dev)->gen < 5)
2330                 intel_dp_link_down(intel_dp);
2331 }
2332
2333 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2334 {
2335         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2336         enum port port = dp_to_dig_port(intel_dp)->port;
2337
2338         intel_dp_link_down(intel_dp);
2339         if (port == PORT_A)
2340                 ironlake_edp_pll_off(intel_dp);
2341 }
2342
2343 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2344 {
2345         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2346
2347         intel_dp_link_down(intel_dp);
2348 }
2349
2350 static void chv_post_disable_dp(struct intel_encoder *encoder)
2351 {
2352         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2353         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2354         struct drm_device *dev = encoder->base.dev;
2355         struct drm_i915_private *dev_priv = dev->dev_private;
2356         struct intel_crtc *intel_crtc =
2357                 to_intel_crtc(encoder->base.crtc);
2358         enum dpio_channel ch = vlv_dport_to_channel(dport);
2359         enum pipe pipe = intel_crtc->pipe;
2360         u32 val;
2361
2362         intel_dp_link_down(intel_dp);
2363
2364         mutex_lock(&dev_priv->sb_lock);
2365
2366         /* Propagate soft reset to data lane reset */
2367         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2368         val |= CHV_PCS_REQ_SOFTRESET_EN;
2369         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2370
2371         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2372         val |= CHV_PCS_REQ_SOFTRESET_EN;
2373         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2374
2375         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2376         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2377         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2378
2379         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2380         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2381         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2382
2383         mutex_unlock(&dev_priv->sb_lock);
2384 }
2385
2386 static void
2387 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2388                          uint32_t *DP,
2389                          uint8_t dp_train_pat)
2390 {
2391         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2392         struct drm_device *dev = intel_dig_port->base.base.dev;
2393         struct drm_i915_private *dev_priv = dev->dev_private;
2394         enum port port = intel_dig_port->port;
2395
2396         if (HAS_DDI(dev)) {
2397                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2398
2399                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2400                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2401                 else
2402                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2403
2404                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2405                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2406                 case DP_TRAINING_PATTERN_DISABLE:
2407                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2408
2409                         break;
2410                 case DP_TRAINING_PATTERN_1:
2411                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2412                         break;
2413                 case DP_TRAINING_PATTERN_2:
2414                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2415                         break;
2416                 case DP_TRAINING_PATTERN_3:
2417                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2418                         break;
2419                 }
2420                 I915_WRITE(DP_TP_CTL(port), temp);
2421
2422         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2423                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2424                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2425
2426                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2427                 case DP_TRAINING_PATTERN_DISABLE:
2428                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2429                         break;
2430                 case DP_TRAINING_PATTERN_1:
2431                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2432                         break;
2433                 case DP_TRAINING_PATTERN_2:
2434                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2435                         break;
2436                 case DP_TRAINING_PATTERN_3:
2437                         DRM_ERROR("DP training pattern 3 not supported\n");
2438                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2439                         break;
2440                 }
2441
2442         } else {
2443                 if (IS_CHERRYVIEW(dev))
2444                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2445                 else
2446                         *DP &= ~DP_LINK_TRAIN_MASK;
2447
2448                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2449                 case DP_TRAINING_PATTERN_DISABLE:
2450                         *DP |= DP_LINK_TRAIN_OFF;
2451                         break;
2452                 case DP_TRAINING_PATTERN_1:
2453                         *DP |= DP_LINK_TRAIN_PAT_1;
2454                         break;
2455                 case DP_TRAINING_PATTERN_2:
2456                         *DP |= DP_LINK_TRAIN_PAT_2;
2457                         break;
2458                 case DP_TRAINING_PATTERN_3:
2459                         if (IS_CHERRYVIEW(dev)) {
2460                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2461                         } else {
2462                                 DRM_ERROR("DP training pattern 3 not supported\n");
2463                                 *DP |= DP_LINK_TRAIN_PAT_2;
2464                         }
2465                         break;
2466                 }
2467         }
2468 }
2469
2470 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2471 {
2472         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2473         struct drm_i915_private *dev_priv = dev->dev_private;
2474
2475         /* enable with pattern 1 (as per spec) */
2476         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2477                                  DP_TRAINING_PATTERN_1);
2478
2479         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2480         POSTING_READ(intel_dp->output_reg);
2481
2482         /*
2483          * Magic for VLV/CHV. We _must_ first set up the register
2484          * without actually enabling the port, and then do another
2485          * write to enable the port. Otherwise link training will
2486          * fail when the power sequencer is freshly used for this port.
2487          */
2488         intel_dp->DP |= DP_PORT_EN;
2489
2490         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2491         POSTING_READ(intel_dp->output_reg);
2492 }
2493
2494 static void intel_enable_dp(struct intel_encoder *encoder)
2495 {
2496         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2497         struct drm_device *dev = encoder->base.dev;
2498         struct drm_i915_private *dev_priv = dev->dev_private;
2499         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2500         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2501         unsigned int lane_mask = 0x0;
2502
2503         if (WARN_ON(dp_reg & DP_PORT_EN))
2504                 return;
2505
2506         pps_lock(intel_dp);
2507
2508         if (IS_VALLEYVIEW(dev))
2509                 vlv_init_panel_power_sequencer(intel_dp);
2510
2511         intel_dp_enable_port(intel_dp);
2512
2513         edp_panel_vdd_on(intel_dp);
2514         edp_panel_on(intel_dp);
2515         edp_panel_vdd_off(intel_dp, true);
2516
2517         pps_unlock(intel_dp);
2518
2519         if (IS_VALLEYVIEW(dev))
2520                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2521                                     lane_mask);
2522
2523         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2524         intel_dp_start_link_train(intel_dp);
2525         intel_dp_complete_link_train(intel_dp);
2526         intel_dp_stop_link_train(intel_dp);
2527
2528         if (crtc->config->has_audio) {
2529                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2530                                  pipe_name(crtc->pipe));
2531                 intel_audio_codec_enable(encoder);
2532         }
2533 }
2534
2535 static void g4x_enable_dp(struct intel_encoder *encoder)
2536 {
2537         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2538
2539         intel_enable_dp(encoder);
2540         intel_edp_backlight_on(intel_dp);
2541 }
2542
2543 static void vlv_enable_dp(struct intel_encoder *encoder)
2544 {
2545         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2546
2547         intel_edp_backlight_on(intel_dp);
2548         intel_psr_enable(intel_dp);
2549 }
2550
2551 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2552 {
2553         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2554         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2555
2556         intel_dp_prepare(encoder);
2557
2558         /* Only ilk+ has port A */
2559         if (dport->port == PORT_A) {
2560                 ironlake_set_pll_cpu_edp(intel_dp);
2561                 ironlake_edp_pll_on(intel_dp);
2562         }
2563 }
2564
2565 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2566 {
2567         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2568         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2569         enum pipe pipe = intel_dp->pps_pipe;
2570         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2571
2572         edp_panel_vdd_off_sync(intel_dp);
2573
2574         /*
2575          * VLV seems to get confused when multiple power seqeuencers
2576          * have the same port selected (even if only one has power/vdd
2577          * enabled). The failure manifests as vlv_wait_port_ready() failing
2578          * CHV on the other hand doesn't seem to mind having the same port
2579          * selected in multiple power seqeuencers, but let's clear the
2580          * port select always when logically disconnecting a power sequencer
2581          * from a port.
2582          */
2583         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2584                       pipe_name(pipe), port_name(intel_dig_port->port));
2585         I915_WRITE(pp_on_reg, 0);
2586         POSTING_READ(pp_on_reg);
2587
2588         intel_dp->pps_pipe = INVALID_PIPE;
2589 }
2590
2591 static void vlv_steal_power_sequencer(struct drm_device *dev,
2592                                       enum pipe pipe)
2593 {
2594         struct drm_i915_private *dev_priv = dev->dev_private;
2595         struct intel_encoder *encoder;
2596
2597         lockdep_assert_held(&dev_priv->pps_mutex);
2598
2599         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2600                 return;
2601
2602         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2603                             base.head) {
2604                 struct intel_dp *intel_dp;
2605                 enum port port;
2606
2607                 if (encoder->type != INTEL_OUTPUT_EDP)
2608                         continue;
2609
2610                 intel_dp = enc_to_intel_dp(&encoder->base);
2611                 port = dp_to_dig_port(intel_dp)->port;
2612
2613                 if (intel_dp->pps_pipe != pipe)
2614                         continue;
2615
2616                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2617                               pipe_name(pipe), port_name(port));
2618
2619                 WARN(encoder->connectors_active,
2620                      "stealing pipe %c power sequencer from active eDP port %c\n",
2621                      pipe_name(pipe), port_name(port));
2622
2623                 /* make sure vdd is off before we steal it */
2624                 vlv_detach_power_sequencer(intel_dp);
2625         }
2626 }
2627
2628 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2629 {
2630         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2631         struct intel_encoder *encoder = &intel_dig_port->base;
2632         struct drm_device *dev = encoder->base.dev;
2633         struct drm_i915_private *dev_priv = dev->dev_private;
2634         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2635
2636         lockdep_assert_held(&dev_priv->pps_mutex);
2637
2638         if (!is_edp(intel_dp))
2639                 return;
2640
2641         if (intel_dp->pps_pipe == crtc->pipe)
2642                 return;
2643
2644         /*
2645          * If another power sequencer was being used on this
2646          * port previously make sure to turn off vdd there while
2647          * we still have control of it.
2648          */
2649         if (intel_dp->pps_pipe != INVALID_PIPE)
2650                 vlv_detach_power_sequencer(intel_dp);
2651
2652         /*
2653          * We may be stealing the power
2654          * sequencer from another port.
2655          */
2656         vlv_steal_power_sequencer(dev, crtc->pipe);
2657
2658         /* now it's all ours */
2659         intel_dp->pps_pipe = crtc->pipe;
2660
2661         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2662                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2663
2664         /* init power sequencer on this pipe and port */
2665         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2666         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2667 }
2668
2669 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2670 {
2671         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2672         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2673         struct drm_device *dev = encoder->base.dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2676         enum dpio_channel port = vlv_dport_to_channel(dport);
2677         int pipe = intel_crtc->pipe;
2678         u32 val;
2679
2680         mutex_lock(&dev_priv->sb_lock);
2681
2682         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2683         val = 0;
2684         if (pipe)
2685                 val |= (1<<21);
2686         else
2687                 val &= ~(1<<21);
2688         val |= 0x001000c4;
2689         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2690         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2691         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2692
2693         mutex_unlock(&dev_priv->sb_lock);
2694
2695         intel_enable_dp(encoder);
2696 }
2697
2698 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2699 {
2700         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2701         struct drm_device *dev = encoder->base.dev;
2702         struct drm_i915_private *dev_priv = dev->dev_private;
2703         struct intel_crtc *intel_crtc =
2704                 to_intel_crtc(encoder->base.crtc);
2705         enum dpio_channel port = vlv_dport_to_channel(dport);
2706         int pipe = intel_crtc->pipe;
2707
2708         intel_dp_prepare(encoder);
2709
2710         /* Program Tx lane resets to default */
2711         mutex_lock(&dev_priv->sb_lock);
2712         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2713                          DPIO_PCS_TX_LANE2_RESET |
2714                          DPIO_PCS_TX_LANE1_RESET);
2715         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2716                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2717                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2718                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2719                                  DPIO_PCS_CLK_SOFT_RESET);
2720
2721         /* Fix up inter-pair skew failure */
2722         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2723         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2724         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2725         mutex_unlock(&dev_priv->sb_lock);
2726 }
2727
2728 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2729 {
2730         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2731         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2732         struct drm_device *dev = encoder->base.dev;
2733         struct drm_i915_private *dev_priv = dev->dev_private;
2734         struct intel_crtc *intel_crtc =
2735                 to_intel_crtc(encoder->base.crtc);
2736         enum dpio_channel ch = vlv_dport_to_channel(dport);
2737         int pipe = intel_crtc->pipe;
2738         int data, i, stagger;
2739         u32 val;
2740
2741         mutex_lock(&dev_priv->sb_lock);
2742
2743         /* allow hardware to manage TX FIFO reset source */
2744         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2745         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2746         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2747
2748         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2749         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2750         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2751
2752         /* Deassert soft data lane reset*/
2753         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2754         val |= CHV_PCS_REQ_SOFTRESET_EN;
2755         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2756
2757         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2758         val |= CHV_PCS_REQ_SOFTRESET_EN;
2759         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2760
2761         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2762         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2763         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2764
2765         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2766         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2767         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2768
2769         /* Program Tx lane latency optimal setting*/
2770         for (i = 0; i < 4; i++) {
2771                 /* Set the upar bit */
2772                 data = (i == 1) ? 0x0 : 0x1;
2773                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2774                                 data << DPIO_UPAR_SHIFT);
2775         }
2776
2777         /* Data lane stagger programming */
2778         if (intel_crtc->config->port_clock > 270000)
2779                 stagger = 0x18;
2780         else if (intel_crtc->config->port_clock > 135000)
2781                 stagger = 0xd;
2782         else if (intel_crtc->config->port_clock > 67500)
2783                 stagger = 0x7;
2784         else if (intel_crtc->config->port_clock > 33750)
2785                 stagger = 0x4;
2786         else
2787                 stagger = 0x2;
2788
2789         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2790         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2791         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2792
2793         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2794         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2795         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2796
2797         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2798                        DPIO_LANESTAGGER_STRAP(stagger) |
2799                        DPIO_LANESTAGGER_STRAP_OVRD |
2800                        DPIO_TX1_STAGGER_MASK(0x1f) |
2801                        DPIO_TX1_STAGGER_MULT(6) |
2802                        DPIO_TX2_STAGGER_MULT(0));
2803
2804         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2805                        DPIO_LANESTAGGER_STRAP(stagger) |
2806                        DPIO_LANESTAGGER_STRAP_OVRD |
2807                        DPIO_TX1_STAGGER_MASK(0x1f) |
2808                        DPIO_TX1_STAGGER_MULT(7) |
2809                        DPIO_TX2_STAGGER_MULT(5));
2810
2811         mutex_unlock(&dev_priv->sb_lock);
2812
2813         intel_enable_dp(encoder);
2814 }
2815
2816 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2817 {
2818         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2819         struct drm_device *dev = encoder->base.dev;
2820         struct drm_i915_private *dev_priv = dev->dev_private;
2821         struct intel_crtc *intel_crtc =
2822                 to_intel_crtc(encoder->base.crtc);
2823         enum dpio_channel ch = vlv_dport_to_channel(dport);
2824         enum pipe pipe = intel_crtc->pipe;
2825         u32 val;
2826
2827         intel_dp_prepare(encoder);
2828
2829         mutex_lock(&dev_priv->sb_lock);
2830
2831         /* program left/right clock distribution */
2832         if (pipe != PIPE_B) {
2833                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2834                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2835                 if (ch == DPIO_CH0)
2836                         val |= CHV_BUFLEFTENA1_FORCE;
2837                 if (ch == DPIO_CH1)
2838                         val |= CHV_BUFRIGHTENA1_FORCE;
2839                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2840         } else {
2841                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2842                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2843                 if (ch == DPIO_CH0)
2844                         val |= CHV_BUFLEFTENA2_FORCE;
2845                 if (ch == DPIO_CH1)
2846                         val |= CHV_BUFRIGHTENA2_FORCE;
2847                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2848         }
2849
2850         /* program clock channel usage */
2851         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2852         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2853         if (pipe != PIPE_B)
2854                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2855         else
2856                 val |= CHV_PCS_USEDCLKCHANNEL;
2857         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2858
2859         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2860         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2861         if (pipe != PIPE_B)
2862                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2863         else
2864                 val |= CHV_PCS_USEDCLKCHANNEL;
2865         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2866
2867         /*
2868          * This a a bit weird since generally CL
2869          * matches the pipe, but here we need to
2870          * pick the CL based on the port.
2871          */
2872         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2873         if (pipe != PIPE_B)
2874                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2875         else
2876                 val |= CHV_CMN_USEDCLKCHANNEL;
2877         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2878
2879         mutex_unlock(&dev_priv->sb_lock);
2880 }
2881
2882 /*
2883  * Native read with retry for link status and receiver capability reads for
2884  * cases where the sink may still be asleep.
2885  *
2886  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2887  * supposed to retry 3 times per the spec.
2888  */
2889 static ssize_t
2890 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2891                         void *buffer, size_t size)
2892 {
2893         ssize_t ret;
2894         int i;
2895
2896         /*
2897          * Sometime we just get the same incorrect byte repeated
2898          * over the entire buffer. Doing just one throw away read
2899          * initially seems to "solve" it.
2900          */
2901         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2902
2903         for (i = 0; i < 3; i++) {
2904                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2905                 if (ret == size)
2906                         return ret;
2907                 msleep(1);
2908         }
2909
2910         return ret;
2911 }
2912
2913 /*
2914  * Fetch AUX CH registers 0x202 - 0x207 which contain
2915  * link status information
2916  */
2917 static bool
2918 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2919 {
2920         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2921                                        DP_LANE0_1_STATUS,
2922                                        link_status,
2923                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2924 }
2925
2926 /* These are source-specific values. */
2927 static uint8_t
2928 intel_dp_voltage_max(struct intel_dp *intel_dp)
2929 {
2930         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2931         struct drm_i915_private *dev_priv = dev->dev_private;
2932         enum port port = dp_to_dig_port(intel_dp)->port;
2933
2934         if (IS_BROXTON(dev))
2935                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2936         else if (INTEL_INFO(dev)->gen >= 9) {
2937                 if (dev_priv->edp_low_vswing && port == PORT_A)
2938                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2939                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2940         } else if (IS_VALLEYVIEW(dev))
2941                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2942         else if (IS_GEN7(dev) && port == PORT_A)
2943                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2944         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2945                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2946         else
2947                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2948 }
2949
2950 static uint8_t
2951 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2952 {
2953         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2954         enum port port = dp_to_dig_port(intel_dp)->port;
2955
2956         if (INTEL_INFO(dev)->gen >= 9) {
2957                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2958                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2959                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2960                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2961                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2962                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2963                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2964                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2965                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2966                 default:
2967                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2968                 }
2969         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2970                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2971                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2972                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2973                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2974                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2975                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2976                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2977                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2978                 default:
2979                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2980                 }
2981         } else if (IS_VALLEYVIEW(dev)) {
2982                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2983                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2984                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2985                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2986                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2987                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2988                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2989                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2990                 default:
2991                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2992                 }
2993         } else if (IS_GEN7(dev) && port == PORT_A) {
2994                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2995                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2996                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2997                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2998                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2999                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3000                 default:
3001                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3002                 }
3003         } else {
3004                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3005                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3006                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3008                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3009                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3010                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3012                 default:
3013                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3014                 }
3015         }
3016 }
3017
3018 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3019 {
3020         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3021         struct drm_i915_private *dev_priv = dev->dev_private;
3022         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3023         struct intel_crtc *intel_crtc =
3024                 to_intel_crtc(dport->base.base.crtc);
3025         unsigned long demph_reg_value, preemph_reg_value,
3026                 uniqtranscale_reg_value;
3027         uint8_t train_set = intel_dp->train_set[0];
3028         enum dpio_channel port = vlv_dport_to_channel(dport);
3029         int pipe = intel_crtc->pipe;
3030
3031         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3032         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3033                 preemph_reg_value = 0x0004000;
3034                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3035                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036                         demph_reg_value = 0x2B405555;
3037                         uniqtranscale_reg_value = 0x552AB83A;
3038                         break;
3039                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3040                         demph_reg_value = 0x2B404040;
3041                         uniqtranscale_reg_value = 0x5548B83A;
3042                         break;
3043                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3044                         demph_reg_value = 0x2B245555;
3045                         uniqtranscale_reg_value = 0x5560B83A;
3046                         break;
3047                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3048                         demph_reg_value = 0x2B405555;
3049                         uniqtranscale_reg_value = 0x5598DA3A;
3050                         break;
3051                 default:
3052                         return 0;
3053                 }
3054                 break;
3055         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3056                 preemph_reg_value = 0x0002000;
3057                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3058                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3059                         demph_reg_value = 0x2B404040;
3060                         uniqtranscale_reg_value = 0x5552B83A;
3061                         break;
3062                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3063                         demph_reg_value = 0x2B404848;
3064                         uniqtranscale_reg_value = 0x5580B83A;
3065                         break;
3066                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3067                         demph_reg_value = 0x2B404040;
3068                         uniqtranscale_reg_value = 0x55ADDA3A;
3069                         break;
3070                 default:
3071                         return 0;
3072                 }
3073                 break;
3074         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3075                 preemph_reg_value = 0x0000000;
3076                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3078                         demph_reg_value = 0x2B305555;
3079                         uniqtranscale_reg_value = 0x5570B83A;
3080                         break;
3081                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3082                         demph_reg_value = 0x2B2B4040;
3083                         uniqtranscale_reg_value = 0x55ADDA3A;
3084                         break;
3085                 default:
3086                         return 0;
3087                 }
3088                 break;
3089         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3090                 preemph_reg_value = 0x0006000;
3091                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3092                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3093                         demph_reg_value = 0x1B405555;
3094                         uniqtranscale_reg_value = 0x55ADDA3A;
3095                         break;
3096                 default:
3097                         return 0;
3098                 }
3099                 break;
3100         default:
3101                 return 0;
3102         }
3103
3104         mutex_lock(&dev_priv->sb_lock);
3105         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3106         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3107         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3108                          uniqtranscale_reg_value);
3109         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3110         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3111         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3112         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3113         mutex_unlock(&dev_priv->sb_lock);
3114
3115         return 0;
3116 }
3117
3118 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3119 {
3120         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3121         struct drm_i915_private *dev_priv = dev->dev_private;
3122         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3123         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3124         u32 deemph_reg_value, margin_reg_value, val;
3125         uint8_t train_set = intel_dp->train_set[0];
3126         enum dpio_channel ch = vlv_dport_to_channel(dport);
3127         enum pipe pipe = intel_crtc->pipe;
3128         int i;
3129
3130         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3131         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3132                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3133                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3134                         deemph_reg_value = 128;
3135                         margin_reg_value = 52;
3136                         break;
3137                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3138                         deemph_reg_value = 128;
3139                         margin_reg_value = 77;
3140                         break;
3141                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3142                         deemph_reg_value = 128;
3143                         margin_reg_value = 102;
3144                         break;
3145                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3146                         deemph_reg_value = 128;
3147                         margin_reg_value = 154;
3148                         /* FIXME extra to set for 1200 */
3149                         break;
3150                 default:
3151                         return 0;
3152                 }
3153                 break;
3154         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3155                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3157                         deemph_reg_value = 85;
3158                         margin_reg_value = 78;
3159                         break;
3160                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3161                         deemph_reg_value = 85;
3162                         margin_reg_value = 116;
3163                         break;
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3165                         deemph_reg_value = 85;
3166                         margin_reg_value = 154;
3167                         break;
3168                 default:
3169                         return 0;
3170                 }
3171                 break;
3172         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3173                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3174                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3175                         deemph_reg_value = 64;
3176                         margin_reg_value = 104;
3177                         break;
3178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3179                         deemph_reg_value = 64;
3180                         margin_reg_value = 154;
3181                         break;
3182                 default:
3183                         return 0;
3184                 }
3185                 break;
3186         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3187                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3188                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3189                         deemph_reg_value = 43;
3190                         margin_reg_value = 154;
3191                         break;
3192                 default:
3193                         return 0;
3194                 }
3195                 break;
3196         default:
3197                 return 0;
3198         }
3199
3200         mutex_lock(&dev_priv->sb_lock);
3201
3202         /* Clear calc init */
3203         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3204         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3205         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3206         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3207         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3208
3209         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3210         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3211         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3212         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3213         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3214
3215         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3216         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3217         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3218         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3219
3220         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3221         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3222         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3223         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3224
3225         /* Program swing deemph */
3226         for (i = 0; i < 4; i++) {
3227                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3228                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3229                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3230                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3231         }
3232
3233         /* Program swing margin */
3234         for (i = 0; i < 4; i++) {
3235                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3236                 val &= ~DPIO_SWING_MARGIN000_MASK;
3237                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3238                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3239         }
3240
3241         /* Disable unique transition scale */
3242         for (i = 0; i < 4; i++) {
3243                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3244                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3245                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3246         }
3247
3248         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3249                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3250                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3251                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3252
3253                 /*
3254                  * The document said it needs to set bit 27 for ch0 and bit 26
3255                  * for ch1. Might be a typo in the doc.
3256                  * For now, for this unique transition scale selection, set bit
3257                  * 27 for ch0 and ch1.
3258                  */
3259                 for (i = 0; i < 4; i++) {
3260                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3261                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3262                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3263                 }
3264
3265                 for (i = 0; i < 4; i++) {
3266                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3267                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3268                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3269                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3270                 }
3271         }
3272
3273         /* Start swing calculation */
3274         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3275         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3276         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3277
3278         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3279         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3280         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3281
3282         /* LRC Bypass */
3283         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3284         val |= DPIO_LRC_BYPASS;
3285         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3286
3287         mutex_unlock(&dev_priv->sb_lock);
3288
3289         return 0;
3290 }
3291
3292 static void
3293 intel_get_adjust_train(struct intel_dp *intel_dp,
3294                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3295 {
3296         uint8_t v = 0;
3297         uint8_t p = 0;
3298         int lane;
3299         uint8_t voltage_max;
3300         uint8_t preemph_max;
3301
3302         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3303                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3304                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3305
3306                 if (this_v > v)
3307                         v = this_v;
3308                 if (this_p > p)
3309                         p = this_p;
3310         }
3311
3312         voltage_max = intel_dp_voltage_max(intel_dp);
3313         if (v >= voltage_max)
3314                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3315
3316         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3317         if (p >= preemph_max)
3318                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3319
3320         for (lane = 0; lane < 4; lane++)
3321                 intel_dp->train_set[lane] = v | p;
3322 }
3323
3324 static uint32_t
3325 gen4_signal_levels(uint8_t train_set)
3326 {
3327         uint32_t        signal_levels = 0;
3328
3329         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3331         default:
3332                 signal_levels |= DP_VOLTAGE_0_4;
3333                 break;
3334         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3335                 signal_levels |= DP_VOLTAGE_0_6;
3336                 break;
3337         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3338                 signal_levels |= DP_VOLTAGE_0_8;
3339                 break;
3340         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3341                 signal_levels |= DP_VOLTAGE_1_2;
3342                 break;
3343         }
3344         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3345         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3346         default:
3347                 signal_levels |= DP_PRE_EMPHASIS_0;
3348                 break;
3349         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3350                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3351                 break;
3352         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3353                 signal_levels |= DP_PRE_EMPHASIS_6;
3354                 break;
3355         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3356                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3357                 break;
3358         }
3359         return signal_levels;
3360 }
3361
3362 /* Gen6's DP voltage swing and pre-emphasis control */
3363 static uint32_t
3364 gen6_edp_signal_levels(uint8_t train_set)
3365 {
3366         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3367                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3368         switch (signal_levels) {
3369         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3370         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3371                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3372         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3373                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3374         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3375         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3376                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3377         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3378         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3379                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3380         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3381         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3382                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3383         default:
3384                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3385                               "0x%x\n", signal_levels);
3386                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3387         }
3388 }
3389
3390 /* Gen7's DP voltage swing and pre-emphasis control */
3391 static uint32_t
3392 gen7_edp_signal_levels(uint8_t train_set)
3393 {
3394         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3395                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3396         switch (signal_levels) {
3397         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3398                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3399         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3400                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3401         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3402                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3403
3404         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3405                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3406         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3407                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3408
3409         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3410                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3411         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3412                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3413
3414         default:
3415                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3416                               "0x%x\n", signal_levels);
3417                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3418         }
3419 }
3420
3421 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3422 static uint32_t
3423 hsw_signal_levels(uint8_t train_set)
3424 {
3425         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3426                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3427         switch (signal_levels) {
3428         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3429                 return DDI_BUF_TRANS_SELECT(0);
3430         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3431                 return DDI_BUF_TRANS_SELECT(1);
3432         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3433                 return DDI_BUF_TRANS_SELECT(2);
3434         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3435                 return DDI_BUF_TRANS_SELECT(3);
3436
3437         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3438                 return DDI_BUF_TRANS_SELECT(4);
3439         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3440                 return DDI_BUF_TRANS_SELECT(5);
3441         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3442                 return DDI_BUF_TRANS_SELECT(6);
3443
3444         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3445                 return DDI_BUF_TRANS_SELECT(7);
3446         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3447                 return DDI_BUF_TRANS_SELECT(8);
3448
3449         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3450                 return DDI_BUF_TRANS_SELECT(9);
3451         default:
3452                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3453                               "0x%x\n", signal_levels);
3454                 return DDI_BUF_TRANS_SELECT(0);
3455         }
3456 }
3457
3458 static void bxt_signal_levels(struct intel_dp *intel_dp)
3459 {
3460         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3461         enum port port = dport->port;
3462         struct drm_device *dev = dport->base.base.dev;
3463         struct intel_encoder *encoder = &dport->base;
3464         uint8_t train_set = intel_dp->train_set[0];
3465         uint32_t level = 0;
3466
3467         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3468                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3469         switch (signal_levels) {
3470         default:
3471                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3472         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3473                 level = 0;
3474                 break;
3475         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3476                 level = 1;
3477                 break;
3478         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3479                 level = 2;
3480                 break;
3481         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3482                 level = 3;
3483                 break;
3484         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3485                 level = 4;
3486                 break;
3487         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3488                 level = 5;
3489                 break;
3490         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3491                 level = 6;
3492                 break;
3493         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3494                 level = 7;
3495                 break;
3496         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3497                 level = 8;
3498                 break;
3499         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500                 level = 9;
3501                 break;
3502         }
3503
3504         bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3505 }
3506
3507 /* Properly updates "DP" with the correct signal levels. */
3508 static void
3509 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3510 {
3511         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3512         enum port port = intel_dig_port->port;
3513         struct drm_device *dev = intel_dig_port->base.base.dev;
3514         uint32_t signal_levels, mask;
3515         uint8_t train_set = intel_dp->train_set[0];
3516
3517         if (IS_BROXTON(dev)) {
3518                 signal_levels = 0;
3519                 bxt_signal_levels(intel_dp);
3520                 mask = 0;
3521         } else if (HAS_DDI(dev)) {
3522                 signal_levels = hsw_signal_levels(train_set);
3523                 mask = DDI_BUF_EMP_MASK;
3524         } else if (IS_CHERRYVIEW(dev)) {
3525                 signal_levels = chv_signal_levels(intel_dp);
3526                 mask = 0;
3527         } else if (IS_VALLEYVIEW(dev)) {
3528                 signal_levels = vlv_signal_levels(intel_dp);
3529                 mask = 0;
3530         } else if (IS_GEN7(dev) && port == PORT_A) {
3531                 signal_levels = gen7_edp_signal_levels(train_set);
3532                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3533         } else if (IS_GEN6(dev) && port == PORT_A) {
3534                 signal_levels = gen6_edp_signal_levels(train_set);
3535                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3536         } else {
3537                 signal_levels = gen4_signal_levels(train_set);
3538                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3539         }
3540
3541         if (mask)
3542                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3543
3544         DRM_DEBUG_KMS("Using vswing level %d\n",
3545                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3546         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3547                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3548                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3549
3550         *DP = (*DP & ~mask) | signal_levels;
3551 }
3552
3553 static bool
3554 intel_dp_set_link_train(struct intel_dp *intel_dp,
3555                         uint32_t *DP,
3556                         uint8_t dp_train_pat)
3557 {
3558         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3559         struct drm_device *dev = intel_dig_port->base.base.dev;
3560         struct drm_i915_private *dev_priv = dev->dev_private;
3561         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3562         int ret, len;
3563
3564         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3565
3566         I915_WRITE(intel_dp->output_reg, *DP);
3567         POSTING_READ(intel_dp->output_reg);
3568
3569         buf[0] = dp_train_pat;
3570         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3571             DP_TRAINING_PATTERN_DISABLE) {
3572                 /* don't write DP_TRAINING_LANEx_SET on disable */
3573                 len = 1;
3574         } else {
3575                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3576                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3577                 len = intel_dp->lane_count + 1;
3578         }
3579
3580         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3581                                 buf, len);
3582
3583         return ret == len;
3584 }
3585
3586 static bool
3587 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3588                         uint8_t dp_train_pat)
3589 {
3590         if (!intel_dp->train_set_valid)
3591                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3592         intel_dp_set_signal_levels(intel_dp, DP);
3593         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3594 }
3595
3596 static bool
3597 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3598                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3599 {
3600         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3601         struct drm_device *dev = intel_dig_port->base.base.dev;
3602         struct drm_i915_private *dev_priv = dev->dev_private;
3603         int ret;
3604
3605         intel_get_adjust_train(intel_dp, link_status);
3606         intel_dp_set_signal_levels(intel_dp, DP);
3607
3608         I915_WRITE(intel_dp->output_reg, *DP);
3609         POSTING_READ(intel_dp->output_reg);
3610
3611         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3612                                 intel_dp->train_set, intel_dp->lane_count);
3613
3614         return ret == intel_dp->lane_count;
3615 }
3616
3617 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3618 {
3619         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3620         struct drm_device *dev = intel_dig_port->base.base.dev;
3621         struct drm_i915_private *dev_priv = dev->dev_private;
3622         enum port port = intel_dig_port->port;
3623         uint32_t val;
3624
3625         if (!HAS_DDI(dev))
3626                 return;
3627
3628         val = I915_READ(DP_TP_CTL(port));
3629         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3630         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3631         I915_WRITE(DP_TP_CTL(port), val);
3632
3633         /*
3634          * On PORT_A we can have only eDP in SST mode. There the only reason
3635          * we need to set idle transmission mode is to work around a HW issue
3636          * where we enable the pipe while not in idle link-training mode.
3637          * In this case there is requirement to wait for a minimum number of
3638          * idle patterns to be sent.
3639          */
3640         if (port == PORT_A)
3641                 return;
3642
3643         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3644                      1))
3645                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3646 }
3647
3648 /* Enable corresponding port and start training pattern 1 */
3649 void
3650 intel_dp_start_link_train(struct intel_dp *intel_dp)
3651 {
3652         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3653         struct drm_device *dev = encoder->dev;
3654         int i;
3655         uint8_t voltage;
3656         int voltage_tries, loop_tries;
3657         uint32_t DP = intel_dp->DP;
3658         uint8_t link_config[2];
3659
3660         if (HAS_DDI(dev))
3661                 intel_ddi_prepare_link_retrain(encoder);
3662
3663         /* Write the link configuration data */
3664         link_config[0] = intel_dp->link_bw;
3665         link_config[1] = intel_dp->lane_count;
3666         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3667                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3668         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3669         if (intel_dp->num_sink_rates)
3670                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3671                                 &intel_dp->rate_select, 1);
3672
3673         link_config[0] = 0;
3674         link_config[1] = DP_SET_ANSI_8B10B;
3675         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3676
3677         DP |= DP_PORT_EN;
3678
3679         /* clock recovery */
3680         if (!intel_dp_reset_link_train(intel_dp, &DP,
3681                                        DP_TRAINING_PATTERN_1 |
3682                                        DP_LINK_SCRAMBLING_DISABLE)) {
3683                 DRM_ERROR("failed to enable link training\n");
3684                 return;
3685         }
3686
3687         voltage = 0xff;
3688         voltage_tries = 0;
3689         loop_tries = 0;
3690         for (;;) {
3691                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3692
3693                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3694                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3695                         DRM_ERROR("failed to get link status\n");
3696                         break;
3697                 }
3698
3699                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3700                         DRM_DEBUG_KMS("clock recovery OK\n");
3701                         break;
3702                 }
3703
3704                 /*
3705                  * if we used previously trained voltage and pre-emphasis values
3706                  * and we don't get clock recovery, reset link training values
3707                  */
3708                 if (intel_dp->train_set_valid) {
3709                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3710                         /* clear the flag as we are not reusing train set */
3711                         intel_dp->train_set_valid = false;
3712                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3713                                                        DP_TRAINING_PATTERN_1 |
3714                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3715                                 DRM_ERROR("failed to enable link training\n");
3716                                 return;
3717                         }
3718                         continue;
3719                 }
3720
3721                 /* Check to see if we've tried the max voltage */
3722                 for (i = 0; i < intel_dp->lane_count; i++)
3723                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3724                                 break;
3725                 if (i == intel_dp->lane_count) {
3726                         ++loop_tries;
3727                         if (loop_tries == 5) {
3728                                 DRM_ERROR("too many full retries, give up\n");
3729                                 break;
3730                         }
3731                         intel_dp_reset_link_train(intel_dp, &DP,
3732                                                   DP_TRAINING_PATTERN_1 |
3733                                                   DP_LINK_SCRAMBLING_DISABLE);
3734                         voltage_tries = 0;
3735                         continue;
3736                 }
3737
3738                 /* Check to see if we've tried the same voltage 5 times */
3739                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3740                         ++voltage_tries;
3741                         if (voltage_tries == 5) {
3742                                 DRM_ERROR("too many voltage retries, give up\n");
3743                                 break;
3744                         }
3745                 } else
3746                         voltage_tries = 0;
3747                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3748
3749                 /* Update training set as requested by target */
3750                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3751                         DRM_ERROR("failed to update link training\n");
3752                         break;
3753                 }
3754         }
3755
3756         intel_dp->DP = DP;
3757 }
3758
3759 void
3760 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3761 {
3762         bool channel_eq = false;
3763         int tries, cr_tries;
3764         uint32_t DP = intel_dp->DP;
3765         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3766
3767         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3768         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3769                 training_pattern = DP_TRAINING_PATTERN_3;
3770
3771         /* channel equalization */
3772         if (!intel_dp_set_link_train(intel_dp, &DP,
3773                                      training_pattern |
3774                                      DP_LINK_SCRAMBLING_DISABLE)) {
3775                 DRM_ERROR("failed to start channel equalization\n");
3776                 return;
3777         }
3778
3779         tries = 0;
3780         cr_tries = 0;
3781         channel_eq = false;
3782         for (;;) {
3783                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3784
3785                 if (cr_tries > 5) {
3786                         DRM_ERROR("failed to train DP, aborting\n");
3787                         break;
3788                 }
3789
3790                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3791                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3792                         DRM_ERROR("failed to get link status\n");
3793                         break;
3794                 }
3795
3796                 /* Make sure clock is still ok */
3797                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3798                         intel_dp->train_set_valid = false;
3799                         intel_dp_start_link_train(intel_dp);
3800                         intel_dp_set_link_train(intel_dp, &DP,
3801                                                 training_pattern |
3802                                                 DP_LINK_SCRAMBLING_DISABLE);
3803                         cr_tries++;
3804                         continue;
3805                 }
3806
3807                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3808                         channel_eq = true;
3809                         break;
3810                 }
3811
3812                 /* Try 5 times, then try clock recovery if that fails */
3813                 if (tries > 5) {
3814                         intel_dp->train_set_valid = false;
3815                         intel_dp_start_link_train(intel_dp);
3816                         intel_dp_set_link_train(intel_dp, &DP,
3817                                                 training_pattern |
3818                                                 DP_LINK_SCRAMBLING_DISABLE);
3819                         tries = 0;
3820                         cr_tries++;
3821                         continue;
3822                 }
3823
3824                 /* Update training set as requested by target */
3825                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3826                         DRM_ERROR("failed to update link training\n");
3827                         break;
3828                 }
3829                 ++tries;
3830         }
3831
3832         intel_dp_set_idle_link_train(intel_dp);
3833
3834         intel_dp->DP = DP;
3835
3836         if (channel_eq) {
3837                 intel_dp->train_set_valid = true;
3838                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3839         }
3840 }
3841
3842 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3843 {
3844         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3845                                 DP_TRAINING_PATTERN_DISABLE);
3846 }
3847
3848 static void
3849 intel_dp_link_down(struct intel_dp *intel_dp)
3850 {
3851         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3852         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3853         enum port port = intel_dig_port->port;
3854         struct drm_device *dev = intel_dig_port->base.base.dev;
3855         struct drm_i915_private *dev_priv = dev->dev_private;
3856         uint32_t DP = intel_dp->DP;
3857
3858         if (WARN_ON(HAS_DDI(dev)))
3859                 return;
3860
3861         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3862                 return;
3863
3864         DRM_DEBUG_KMS("\n");
3865
3866         if ((IS_GEN7(dev) && port == PORT_A) ||
3867             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3868                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3869                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3870         } else {
3871                 if (IS_CHERRYVIEW(dev))
3872                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3873                 else
3874                         DP &= ~DP_LINK_TRAIN_MASK;
3875                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3876         }
3877         I915_WRITE(intel_dp->output_reg, DP);
3878         POSTING_READ(intel_dp->output_reg);
3879
3880         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3881         I915_WRITE(intel_dp->output_reg, DP);
3882         POSTING_READ(intel_dp->output_reg);
3883
3884         /*
3885          * HW workaround for IBX, we need to move the port
3886          * to transcoder A after disabling it to allow the
3887          * matching HDMI port to be enabled on transcoder A.
3888          */
3889         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3890                 /* always enable with pattern 1 (as per spec) */
3891                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3892                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3893                 I915_WRITE(intel_dp->output_reg, DP);
3894                 POSTING_READ(intel_dp->output_reg);
3895
3896                 DP &= ~DP_PORT_EN;
3897                 I915_WRITE(intel_dp->output_reg, DP);
3898                 POSTING_READ(intel_dp->output_reg);
3899         }
3900
3901         msleep(intel_dp->panel_power_down_delay);
3902 }
3903
3904 static bool
3905 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3906 {
3907         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3908         struct drm_device *dev = dig_port->base.base.dev;
3909         struct drm_i915_private *dev_priv = dev->dev_private;
3910         uint8_t rev;
3911
3912         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3913                                     sizeof(intel_dp->dpcd)) < 0)
3914                 return false; /* aux transfer failed */
3915
3916         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3917
3918         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3919                 return false; /* DPCD not present */
3920
3921         /* Check if the panel supports PSR */
3922         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3923         if (is_edp(intel_dp)) {
3924                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3925                                         intel_dp->psr_dpcd,
3926                                         sizeof(intel_dp->psr_dpcd));
3927                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3928                         dev_priv->psr.sink_support = true;
3929                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3930                 }
3931
3932                 if (INTEL_INFO(dev)->gen >= 9 &&
3933                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3934                         uint8_t frame_sync_cap;
3935
3936                         dev_priv->psr.sink_support = true;
3937                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3938                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3939                                         &frame_sync_cap, 1);
3940                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3941                         /* PSR2 needs frame sync as well */
3942                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3943                         DRM_DEBUG_KMS("PSR2 %s on sink",
3944                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3945                 }
3946         }
3947
3948         /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3949          * have support for TP3 hence that check is used along with dpcd check
3950          * to ensure TP3 can be enabled.
3951          * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3952          * supported but still not enabled.
3953          */
3954         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3955             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3956             intel_dp_source_supports_hbr2(dev)) {
3957                 intel_dp->use_tps3 = true;
3958                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3959         } else
3960                 intel_dp->use_tps3 = false;
3961
3962         /* Intermediate frequency support */
3963         if (is_edp(intel_dp) &&
3964             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3965             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3966             (rev >= 0x03)) { /* eDp v1.4 or higher */
3967                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3968                 int i;
3969
3970                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3971                                 DP_SUPPORTED_LINK_RATES,
3972                                 sink_rates,
3973                                 sizeof(sink_rates));
3974
3975                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3976                         int val = le16_to_cpu(sink_rates[i]);
3977
3978                         if (val == 0)
3979                                 break;
3980
3981                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3982                         intel_dp->sink_rates[i] = (val * 200) / 10;
3983                 }
3984                 intel_dp->num_sink_rates = i;
3985         }
3986
3987         intel_dp_print_rates(intel_dp);
3988
3989         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3990               DP_DWN_STRM_PORT_PRESENT))
3991                 return true; /* native DP sink */
3992
3993         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3994                 return true; /* no per-port downstream info */
3995
3996         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3997                                     intel_dp->downstream_ports,
3998                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3999                 return false; /* downstream port status fetch failed */
4000
4001         return true;
4002 }
4003
4004 static void
4005 intel_dp_probe_oui(struct intel_dp *intel_dp)
4006 {
4007         u8 buf[3];
4008
4009         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4010                 return;
4011
4012         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4013                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4014                               buf[0], buf[1], buf[2]);
4015
4016         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4017                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4018                               buf[0], buf[1], buf[2]);
4019 }
4020
4021 static bool
4022 intel_dp_probe_mst(struct intel_dp *intel_dp)
4023 {
4024         u8 buf[1];
4025
4026         if (!intel_dp->can_mst)
4027                 return false;
4028
4029         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4030                 return false;
4031
4032         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4033                 if (buf[0] & DP_MST_CAP) {
4034                         DRM_DEBUG_KMS("Sink is MST capable\n");
4035                         intel_dp->is_mst = true;
4036                 } else {
4037                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4038                         intel_dp->is_mst = false;
4039                 }
4040         }
4041
4042         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4043         return intel_dp->is_mst;
4044 }
4045
4046 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4047 {
4048         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4049         struct drm_device *dev = intel_dig_port->base.base.dev;
4050         struct intel_crtc *intel_crtc =
4051                 to_intel_crtc(intel_dig_port->base.base.crtc);
4052         u8 buf;
4053         int test_crc_count;
4054         int attempts = 6;
4055         int ret = 0;
4056
4057         hsw_disable_ips(intel_crtc);
4058
4059         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4060                 ret = -EIO;
4061                 goto out;
4062         }
4063
4064         if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4065                 ret = -ENOTTY;
4066                 goto out;
4067         }
4068
4069         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4070                 ret = -EIO;
4071                 goto out;
4072         }
4073
4074         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4075                                 buf | DP_TEST_SINK_START) < 0) {
4076                 ret = -EIO;
4077                 goto out;
4078         }
4079
4080         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4081                 ret = -EIO;
4082                 goto out;
4083         }
4084
4085         test_crc_count = buf & DP_TEST_COUNT_MASK;
4086
4087         do {
4088                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4089                                       DP_TEST_SINK_MISC, &buf) < 0) {
4090                         ret = -EIO;
4091                         goto out;
4092                 }
4093                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4094         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4095
4096         if (attempts == 0) {
4097                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4098                 ret = -ETIMEDOUT;
4099                 goto out;
4100         }
4101
4102         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4103                 ret = -EIO;
4104                 goto out;
4105         }
4106
4107         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4108                 ret = -EIO;
4109                 goto out;
4110         }
4111         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4112                                buf & ~DP_TEST_SINK_START) < 0) {
4113                 ret = -EIO;
4114                 goto out;
4115         }
4116 out:
4117         hsw_enable_ips(intel_crtc);
4118         return ret;
4119 }
4120
4121 static bool
4122 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4123 {
4124         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4125                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4126                                        sink_irq_vector, 1) == 1;
4127 }
4128
4129 static bool
4130 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4131 {
4132         int ret;
4133
4134         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4135                                              DP_SINK_COUNT_ESI,
4136                                              sink_irq_vector, 14);
4137         if (ret != 14)
4138                 return false;
4139
4140         return true;
4141 }
4142
4143 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4144 {
4145         uint8_t test_result = DP_TEST_ACK;
4146         return test_result;
4147 }
4148
4149 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4150 {
4151         uint8_t test_result = DP_TEST_NAK;
4152         return test_result;
4153 }
4154
4155 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4156 {
4157         uint8_t test_result = DP_TEST_NAK;
4158         struct intel_connector *intel_connector = intel_dp->attached_connector;
4159         struct drm_connector *connector = &intel_connector->base;
4160
4161         if (intel_connector->detect_edid == NULL ||
4162             connector->edid_corrupt ||
4163             intel_dp->aux.i2c_defer_count > 6) {
4164                 /* Check EDID read for NACKs, DEFERs and corruption
4165                  * (DP CTS 1.2 Core r1.1)
4166                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4167                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4168                  *    4.2.2.6 : EDID corruption detected
4169                  * Use failsafe mode for all cases
4170                  */
4171                 if (intel_dp->aux.i2c_nack_count > 0 ||
4172                         intel_dp->aux.i2c_defer_count > 0)
4173                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4174                                       intel_dp->aux.i2c_nack_count,
4175                                       intel_dp->aux.i2c_defer_count);
4176                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4177         } else {
4178                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4179                                         DP_TEST_EDID_CHECKSUM,
4180                                         &intel_connector->detect_edid->checksum,
4181                                         1))
4182                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4183
4184                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4185                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4186         }
4187
4188         /* Set test active flag here so userspace doesn't interrupt things */
4189         intel_dp->compliance_test_active = 1;
4190
4191         return test_result;
4192 }
4193
4194 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4195 {
4196         uint8_t test_result = DP_TEST_NAK;
4197         return test_result;
4198 }
4199
4200 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4201 {
4202         uint8_t response = DP_TEST_NAK;
4203         uint8_t rxdata = 0;
4204         int status = 0;
4205
4206         intel_dp->compliance_test_active = 0;
4207         intel_dp->compliance_test_type = 0;
4208         intel_dp->compliance_test_data = 0;
4209
4210         intel_dp->aux.i2c_nack_count = 0;
4211         intel_dp->aux.i2c_defer_count = 0;
4212
4213         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4214         if (status <= 0) {
4215                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4216                 goto update_status;
4217         }
4218
4219         switch (rxdata) {
4220         case DP_TEST_LINK_TRAINING:
4221                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4222                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4223                 response = intel_dp_autotest_link_training(intel_dp);
4224                 break;
4225         case DP_TEST_LINK_VIDEO_PATTERN:
4226                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4227                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4228                 response = intel_dp_autotest_video_pattern(intel_dp);
4229                 break;
4230         case DP_TEST_LINK_EDID_READ:
4231                 DRM_DEBUG_KMS("EDID test requested\n");
4232                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4233                 response = intel_dp_autotest_edid(intel_dp);
4234                 break;
4235         case DP_TEST_LINK_PHY_TEST_PATTERN:
4236                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4237                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4238                 response = intel_dp_autotest_phy_pattern(intel_dp);
4239                 break;
4240         default:
4241                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4242                 break;
4243         }
4244
4245 update_status:
4246         status = drm_dp_dpcd_write(&intel_dp->aux,
4247                                    DP_TEST_RESPONSE,
4248                                    &response, 1);
4249         if (status <= 0)
4250                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4251 }
4252
4253 static int
4254 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4255 {
4256         bool bret;
4257
4258         if (intel_dp->is_mst) {
4259                 u8 esi[16] = { 0 };
4260                 int ret = 0;
4261                 int retry;
4262                 bool handled;
4263                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4264 go_again:
4265                 if (bret == true) {
4266
4267                         /* check link status - esi[10] = 0x200c */
4268                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4269                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4270                                 intel_dp_start_link_train(intel_dp);
4271                                 intel_dp_complete_link_train(intel_dp);
4272                                 intel_dp_stop_link_train(intel_dp);
4273                         }
4274
4275                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4276                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4277
4278                         if (handled) {
4279                                 for (retry = 0; retry < 3; retry++) {
4280                                         int wret;
4281                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4282                                                                  DP_SINK_COUNT_ESI+1,
4283                                                                  &esi[1], 3);
4284                                         if (wret == 3) {
4285                                                 break;
4286                                         }
4287                                 }
4288
4289                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4290                                 if (bret == true) {
4291                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4292                                         goto go_again;
4293                                 }
4294                         } else
4295                                 ret = 0;
4296
4297                         return ret;
4298                 } else {
4299                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4300                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4301                         intel_dp->is_mst = false;
4302                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4303                         /* send a hotplug event */
4304                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4305                 }
4306         }
4307         return -EINVAL;
4308 }
4309
4310 /*
4311  * According to DP spec
4312  * 5.1.2:
4313  *  1. Read DPCD
4314  *  2. Configure link according to Receiver Capabilities
4315  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4316  *  4. Check link status on receipt of hot-plug interrupt
4317  */
4318 static void
4319 intel_dp_check_link_status(struct intel_dp *intel_dp)
4320 {
4321         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4322         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4323         u8 sink_irq_vector;
4324         u8 link_status[DP_LINK_STATUS_SIZE];
4325
4326         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4327
4328         if (!intel_encoder->connectors_active)
4329                 return;
4330
4331         if (WARN_ON(!intel_encoder->base.crtc))
4332                 return;
4333
4334         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4335                 return;
4336
4337         /* Try to read receiver status if the link appears to be up */
4338         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4339                 return;
4340         }
4341
4342         /* Now read the DPCD to see if it's actually running */
4343         if (!intel_dp_get_dpcd(intel_dp)) {
4344                 return;
4345         }
4346
4347         /* Try to read the source of the interrupt */
4348         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4349             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4350                 /* Clear interrupt source */
4351                 drm_dp_dpcd_writeb(&intel_dp->aux,
4352                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4353                                    sink_irq_vector);
4354
4355                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4356                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4357                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4358                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4359         }
4360
4361         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4362                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4363                               intel_encoder->base.name);
4364                 intel_dp_start_link_train(intel_dp);
4365                 intel_dp_complete_link_train(intel_dp);
4366                 intel_dp_stop_link_train(intel_dp);
4367         }
4368 }
4369
4370 /* XXX this is probably wrong for multiple downstream ports */
4371 static enum drm_connector_status
4372 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4373 {
4374         uint8_t *dpcd = intel_dp->dpcd;
4375         uint8_t type;
4376
4377         if (!intel_dp_get_dpcd(intel_dp))
4378                 return connector_status_disconnected;
4379
4380         /* if there's no downstream port, we're done */
4381         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4382                 return connector_status_connected;
4383
4384         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4385         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4386             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4387                 uint8_t reg;
4388
4389                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4390                                             &reg, 1) < 0)
4391                         return connector_status_unknown;
4392
4393                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4394                                               : connector_status_disconnected;
4395         }
4396
4397         /* If no HPD, poke DDC gently */
4398         if (drm_probe_ddc(&intel_dp->aux.ddc))
4399                 return connector_status_connected;
4400
4401         /* Well we tried, say unknown for unreliable port types */
4402         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4403                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4404                 if (type == DP_DS_PORT_TYPE_VGA ||
4405                     type == DP_DS_PORT_TYPE_NON_EDID)
4406                         return connector_status_unknown;
4407         } else {
4408                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4409                         DP_DWN_STRM_PORT_TYPE_MASK;
4410                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4411                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4412                         return connector_status_unknown;
4413         }
4414
4415         /* Anything else is out of spec, warn and ignore */
4416         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4417         return connector_status_disconnected;
4418 }
4419
4420 static enum drm_connector_status
4421 edp_detect(struct intel_dp *intel_dp)
4422 {
4423         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4424         enum drm_connector_status status;
4425
4426         status = intel_panel_detect(dev);
4427         if (status == connector_status_unknown)
4428                 status = connector_status_connected;
4429
4430         return status;
4431 }
4432
4433 static enum drm_connector_status
4434 ironlake_dp_detect(struct intel_dp *intel_dp)
4435 {
4436         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4437         struct drm_i915_private *dev_priv = dev->dev_private;
4438         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4439
4440         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4441                 return connector_status_disconnected;
4442
4443         return intel_dp_detect_dpcd(intel_dp);
4444 }
4445
4446 static int g4x_digital_port_connected(struct drm_device *dev,
4447                                        struct intel_digital_port *intel_dig_port)
4448 {
4449         struct drm_i915_private *dev_priv = dev->dev_private;
4450         uint32_t bit;
4451
4452         if (IS_VALLEYVIEW(dev)) {
4453                 switch (intel_dig_port->port) {
4454                 case PORT_B:
4455                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4456                         break;
4457                 case PORT_C:
4458                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4459                         break;
4460                 case PORT_D:
4461                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4462                         break;
4463                 default:
4464                         return -EINVAL;
4465                 }
4466         } else {
4467                 switch (intel_dig_port->port) {
4468                 case PORT_B:
4469                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4470                         break;
4471                 case PORT_C:
4472                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4473                         break;
4474                 case PORT_D:
4475                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4476                         break;
4477                 default:
4478                         return -EINVAL;
4479                 }
4480         }
4481
4482         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4483                 return 0;
4484         return 1;
4485 }
4486
4487 static enum drm_connector_status
4488 g4x_dp_detect(struct intel_dp *intel_dp)
4489 {
4490         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4491         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4492         int ret;
4493
4494         /* Can't disconnect eDP, but you can close the lid... */
4495         if (is_edp(intel_dp)) {
4496                 enum drm_connector_status status;
4497
4498                 status = intel_panel_detect(dev);
4499                 if (status == connector_status_unknown)
4500                         status = connector_status_connected;
4501                 return status;
4502         }
4503
4504         ret = g4x_digital_port_connected(dev, intel_dig_port);
4505         if (ret == -EINVAL)
4506                 return connector_status_unknown;
4507         else if (ret == 0)
4508                 return connector_status_disconnected;
4509
4510         return intel_dp_detect_dpcd(intel_dp);
4511 }
4512
4513 static struct edid *
4514 intel_dp_get_edid(struct intel_dp *intel_dp)
4515 {
4516         struct intel_connector *intel_connector = intel_dp->attached_connector;
4517
4518         /* use cached edid if we have one */
4519         if (intel_connector->edid) {
4520                 /* invalid edid */
4521                 if (IS_ERR(intel_connector->edid))
4522                         return NULL;
4523
4524                 return drm_edid_duplicate(intel_connector->edid);
4525         } else
4526                 return drm_get_edid(&intel_connector->base,
4527                                     &intel_dp->aux.ddc);
4528 }
4529
4530 static void
4531 intel_dp_set_edid(struct intel_dp *intel_dp)
4532 {
4533         struct intel_connector *intel_connector = intel_dp->attached_connector;
4534         struct edid *edid;
4535
4536         edid = intel_dp_get_edid(intel_dp);
4537         intel_connector->detect_edid = edid;
4538
4539         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4540                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4541         else
4542                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4543 }
4544
4545 static void
4546 intel_dp_unset_edid(struct intel_dp *intel_dp)
4547 {
4548         struct intel_connector *intel_connector = intel_dp->attached_connector;
4549
4550         kfree(intel_connector->detect_edid);
4551         intel_connector->detect_edid = NULL;
4552
4553         intel_dp->has_audio = false;
4554 }
4555
4556 static enum intel_display_power_domain
4557 intel_dp_power_get(struct intel_dp *dp)
4558 {
4559         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4560         enum intel_display_power_domain power_domain;
4561
4562         power_domain = intel_display_port_power_domain(encoder);
4563         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4564
4565         return power_domain;
4566 }
4567
4568 static void
4569 intel_dp_power_put(struct intel_dp *dp,
4570                    enum intel_display_power_domain power_domain)
4571 {
4572         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4573         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4574 }
4575
4576 static enum drm_connector_status
4577 intel_dp_detect(struct drm_connector *connector, bool force)
4578 {
4579         struct intel_dp *intel_dp = intel_attached_dp(connector);
4580         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4581         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4582         struct drm_device *dev = connector->dev;
4583         enum drm_connector_status status;
4584         enum intel_display_power_domain power_domain;
4585         bool ret;
4586         u8 sink_irq_vector;
4587
4588         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4589                       connector->base.id, connector->name);
4590         intel_dp_unset_edid(intel_dp);
4591
4592         if (intel_dp->is_mst) {
4593                 /* MST devices are disconnected from a monitor POV */
4594                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4595                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4596                 return connector_status_disconnected;
4597         }
4598
4599         power_domain = intel_dp_power_get(intel_dp);
4600
4601         /* Can't disconnect eDP, but you can close the lid... */
4602         if (is_edp(intel_dp))
4603                 status = edp_detect(intel_dp);
4604         else if (HAS_PCH_SPLIT(dev))
4605                 status = ironlake_dp_detect(intel_dp);
4606         else
4607                 status = g4x_dp_detect(intel_dp);
4608         if (status != connector_status_connected)
4609                 goto out;
4610
4611         intel_dp_probe_oui(intel_dp);
4612
4613         ret = intel_dp_probe_mst(intel_dp);
4614         if (ret) {
4615                 /* if we are in MST mode then this connector
4616                    won't appear connected or have anything with EDID on it */
4617                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4618                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4619                 status = connector_status_disconnected;
4620                 goto out;
4621         }
4622
4623         intel_dp_set_edid(intel_dp);
4624
4625         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4626                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4627         status = connector_status_connected;
4628
4629         /* Try to read the source of the interrupt */
4630         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4631             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4632                 /* Clear interrupt source */
4633                 drm_dp_dpcd_writeb(&intel_dp->aux,
4634                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4635                                    sink_irq_vector);
4636
4637                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4638                         intel_dp_handle_test_request(intel_dp);
4639                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4640                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4641         }
4642
4643 out:
4644         intel_dp_power_put(intel_dp, power_domain);
4645         return status;
4646 }
4647
4648 static void
4649 intel_dp_force(struct drm_connector *connector)
4650 {
4651         struct intel_dp *intel_dp = intel_attached_dp(connector);
4652         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4653         enum intel_display_power_domain power_domain;
4654
4655         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4656                       connector->base.id, connector->name);
4657         intel_dp_unset_edid(intel_dp);
4658
4659         if (connector->status != connector_status_connected)
4660                 return;
4661
4662         power_domain = intel_dp_power_get(intel_dp);
4663
4664         intel_dp_set_edid(intel_dp);
4665
4666         intel_dp_power_put(intel_dp, power_domain);
4667
4668         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4669                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4670 }
4671
4672 static int intel_dp_get_modes(struct drm_connector *connector)
4673 {
4674         struct intel_connector *intel_connector = to_intel_connector(connector);
4675         struct edid *edid;
4676
4677         edid = intel_connector->detect_edid;
4678         if (edid) {
4679                 int ret = intel_connector_update_modes(connector, edid);
4680                 if (ret)
4681                         return ret;
4682         }
4683
4684         /* if eDP has no EDID, fall back to fixed mode */
4685         if (is_edp(intel_attached_dp(connector)) &&
4686             intel_connector->panel.fixed_mode) {
4687                 struct drm_display_mode *mode;
4688
4689                 mode = drm_mode_duplicate(connector->dev,
4690                                           intel_connector->panel.fixed_mode);
4691                 if (mode) {
4692                         drm_mode_probed_add(connector, mode);
4693                         return 1;
4694                 }
4695         }
4696
4697         return 0;
4698 }
4699
4700 static bool
4701 intel_dp_detect_audio(struct drm_connector *connector)
4702 {
4703         bool has_audio = false;
4704         struct edid *edid;
4705
4706         edid = to_intel_connector(connector)->detect_edid;
4707         if (edid)
4708                 has_audio = drm_detect_monitor_audio(edid);
4709
4710         return has_audio;
4711 }
4712
4713 static int
4714 intel_dp_set_property(struct drm_connector *connector,
4715                       struct drm_property *property,
4716                       uint64_t val)
4717 {
4718         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4719         struct intel_connector *intel_connector = to_intel_connector(connector);
4720         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4721         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4722         int ret;
4723
4724         ret = drm_object_property_set_value(&connector->base, property, val);
4725         if (ret)
4726                 return ret;
4727
4728         if (property == dev_priv->force_audio_property) {
4729                 int i = val;
4730                 bool has_audio;
4731
4732                 if (i == intel_dp->force_audio)
4733                         return 0;
4734
4735                 intel_dp->force_audio = i;
4736
4737                 if (i == HDMI_AUDIO_AUTO)
4738                         has_audio = intel_dp_detect_audio(connector);
4739                 else
4740                         has_audio = (i == HDMI_AUDIO_ON);
4741
4742                 if (has_audio == intel_dp->has_audio)
4743                         return 0;
4744
4745                 intel_dp->has_audio = has_audio;
4746                 goto done;
4747         }
4748
4749         if (property == dev_priv->broadcast_rgb_property) {
4750                 bool old_auto = intel_dp->color_range_auto;
4751                 uint32_t old_range = intel_dp->color_range;
4752
4753                 switch (val) {
4754                 case INTEL_BROADCAST_RGB_AUTO:
4755                         intel_dp->color_range_auto = true;
4756                         break;
4757                 case INTEL_BROADCAST_RGB_FULL:
4758                         intel_dp->color_range_auto = false;
4759                         intel_dp->color_range = 0;
4760                         break;
4761                 case INTEL_BROADCAST_RGB_LIMITED:
4762                         intel_dp->color_range_auto = false;
4763                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4764                         break;
4765                 default:
4766                         return -EINVAL;
4767                 }
4768
4769                 if (old_auto == intel_dp->color_range_auto &&
4770                     old_range == intel_dp->color_range)
4771                         return 0;
4772
4773                 goto done;
4774         }
4775
4776         if (is_edp(intel_dp) &&
4777             property == connector->dev->mode_config.scaling_mode_property) {
4778                 if (val == DRM_MODE_SCALE_NONE) {
4779                         DRM_DEBUG_KMS("no scaling not supported\n");
4780                         return -EINVAL;
4781                 }
4782
4783                 if (intel_connector->panel.fitting_mode == val) {
4784                         /* the eDP scaling property is not changed */
4785                         return 0;
4786                 }
4787                 intel_connector->panel.fitting_mode = val;
4788
4789                 goto done;
4790         }
4791
4792         return -EINVAL;
4793
4794 done:
4795         if (intel_encoder->base.crtc)
4796                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4797
4798         return 0;
4799 }
4800
4801 static void
4802 intel_dp_connector_destroy(struct drm_connector *connector)
4803 {
4804         struct intel_connector *intel_connector = to_intel_connector(connector);
4805
4806         kfree(intel_connector->detect_edid);
4807
4808         if (!IS_ERR_OR_NULL(intel_connector->edid))
4809                 kfree(intel_connector->edid);
4810
4811         /* Can't call is_edp() since the encoder may have been destroyed
4812          * already. */
4813         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4814                 intel_panel_fini(&intel_connector->panel);
4815
4816         drm_connector_cleanup(connector);
4817         kfree(connector);
4818 }
4819
4820 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4821 {
4822         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4823         struct intel_dp *intel_dp = &intel_dig_port->dp;
4824
4825         drm_dp_aux_unregister(&intel_dp->aux);
4826         intel_dp_mst_encoder_cleanup(intel_dig_port);
4827         if (is_edp(intel_dp)) {
4828                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4829                 /*
4830                  * vdd might still be enabled do to the delayed vdd off.
4831                  * Make sure vdd is actually turned off here.
4832                  */
4833                 pps_lock(intel_dp);
4834                 edp_panel_vdd_off_sync(intel_dp);
4835                 pps_unlock(intel_dp);
4836
4837                 if (intel_dp->edp_notifier.notifier_call) {
4838                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4839                         intel_dp->edp_notifier.notifier_call = NULL;
4840                 }
4841         }
4842         drm_encoder_cleanup(encoder);
4843         kfree(intel_dig_port);
4844 }
4845
4846 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4847 {
4848         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4849
4850         if (!is_edp(intel_dp))
4851                 return;
4852
4853         /*
4854          * vdd might still be enabled do to the delayed vdd off.
4855          * Make sure vdd is actually turned off here.
4856          */
4857         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4858         pps_lock(intel_dp);
4859         edp_panel_vdd_off_sync(intel_dp);
4860         pps_unlock(intel_dp);
4861 }
4862
4863 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4864 {
4865         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4866         struct drm_device *dev = intel_dig_port->base.base.dev;
4867         struct drm_i915_private *dev_priv = dev->dev_private;
4868         enum intel_display_power_domain power_domain;
4869
4870         lockdep_assert_held(&dev_priv->pps_mutex);
4871
4872         if (!edp_have_panel_vdd(intel_dp))
4873                 return;
4874
4875         /*
4876          * The VDD bit needs a power domain reference, so if the bit is
4877          * already enabled when we boot or resume, grab this reference and
4878          * schedule a vdd off, so we don't hold on to the reference
4879          * indefinitely.
4880          */
4881         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4882         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4883         intel_display_power_get(dev_priv, power_domain);
4884
4885         edp_panel_vdd_schedule_off(intel_dp);
4886 }
4887
4888 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4889 {
4890         struct intel_dp *intel_dp;
4891
4892         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4893                 return;
4894
4895         intel_dp = enc_to_intel_dp(encoder);
4896
4897         pps_lock(intel_dp);
4898
4899         /*
4900          * Read out the current power sequencer assignment,
4901          * in case the BIOS did something with it.
4902          */
4903         if (IS_VALLEYVIEW(encoder->dev))
4904                 vlv_initial_power_sequencer_setup(intel_dp);
4905
4906         intel_edp_panel_vdd_sanitize(intel_dp);
4907
4908         pps_unlock(intel_dp);
4909 }
4910
4911 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4912         .dpms = intel_connector_dpms,
4913         .detect = intel_dp_detect,
4914         .force = intel_dp_force,
4915         .fill_modes = drm_helper_probe_single_connector_modes,
4916         .set_property = intel_dp_set_property,
4917         .atomic_get_property = intel_connector_atomic_get_property,
4918         .destroy = intel_dp_connector_destroy,
4919         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4920         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4921 };
4922
4923 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4924         .get_modes = intel_dp_get_modes,
4925         .mode_valid = intel_dp_mode_valid,
4926         .best_encoder = intel_best_encoder,
4927 };
4928
4929 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4930         .reset = intel_dp_encoder_reset,
4931         .destroy = intel_dp_encoder_destroy,
4932 };
4933
4934 void
4935 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4936 {
4937         return;
4938 }
4939
4940 enum irqreturn
4941 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4942 {
4943         struct intel_dp *intel_dp = &intel_dig_port->dp;
4944         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4945         struct drm_device *dev = intel_dig_port->base.base.dev;
4946         struct drm_i915_private *dev_priv = dev->dev_private;
4947         enum intel_display_power_domain power_domain;
4948         enum irqreturn ret = IRQ_NONE;
4949
4950         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4951                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4952
4953         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4954                 /*
4955                  * vdd off can generate a long pulse on eDP which
4956                  * would require vdd on to handle it, and thus we
4957                  * would end up in an endless cycle of
4958                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4959                  */
4960                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4961                               port_name(intel_dig_port->port));
4962                 return IRQ_HANDLED;
4963         }
4964
4965         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4966                       port_name(intel_dig_port->port),
4967                       long_hpd ? "long" : "short");
4968
4969         power_domain = intel_display_port_power_domain(intel_encoder);
4970         intel_display_power_get(dev_priv, power_domain);
4971
4972         if (long_hpd) {
4973                 /* indicate that we need to restart link training */
4974                 intel_dp->train_set_valid = false;
4975
4976                 if (HAS_PCH_SPLIT(dev)) {
4977                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4978                                 goto mst_fail;
4979                 } else {
4980                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4981                                 goto mst_fail;
4982                 }
4983
4984                 if (!intel_dp_get_dpcd(intel_dp)) {
4985                         goto mst_fail;
4986                 }
4987
4988                 intel_dp_probe_oui(intel_dp);
4989
4990                 if (!intel_dp_probe_mst(intel_dp))
4991                         goto mst_fail;
4992
4993         } else {
4994                 if (intel_dp->is_mst) {
4995                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4996                                 goto mst_fail;
4997                 }
4998
4999                 if (!intel_dp->is_mst) {
5000                         /*
5001                          * we'll check the link status via the normal hot plug path later -
5002                          * but for short hpds we should check it now
5003                          */
5004                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5005                         intel_dp_check_link_status(intel_dp);
5006                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5007                 }
5008         }
5009
5010         ret = IRQ_HANDLED;
5011
5012         goto put_power;
5013 mst_fail:
5014         /* if we were in MST mode, and device is not there get out of MST mode */
5015         if (intel_dp->is_mst) {
5016                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5017                 intel_dp->is_mst = false;
5018                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5019         }
5020 put_power:
5021         intel_display_power_put(dev_priv, power_domain);
5022
5023         return ret;
5024 }
5025
5026 /* Return which DP Port should be selected for Transcoder DP control */
5027 int
5028 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5029 {
5030         struct drm_device *dev = crtc->dev;
5031         struct intel_encoder *intel_encoder;
5032         struct intel_dp *intel_dp;
5033
5034         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5035                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5036
5037                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5038                     intel_encoder->type == INTEL_OUTPUT_EDP)
5039                         return intel_dp->output_reg;
5040         }
5041
5042         return -1;
5043 }
5044
5045 /* check the VBT to see whether the eDP is on DP-D port */
5046 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5047 {
5048         struct drm_i915_private *dev_priv = dev->dev_private;
5049         union child_device_config *p_child;
5050         int i;
5051         static const short port_mapping[] = {
5052                 [PORT_B] = PORT_IDPB,
5053                 [PORT_C] = PORT_IDPC,
5054                 [PORT_D] = PORT_IDPD,
5055         };
5056
5057         if (port == PORT_A)
5058                 return true;
5059
5060         if (!dev_priv->vbt.child_dev_num)
5061                 return false;
5062
5063         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5064                 p_child = dev_priv->vbt.child_dev + i;
5065
5066                 if (p_child->common.dvo_port == port_mapping[port] &&
5067                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5068                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5069                         return true;
5070         }
5071         return false;
5072 }
5073
5074 void
5075 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5076 {
5077         struct intel_connector *intel_connector = to_intel_connector(connector);
5078
5079         intel_attach_force_audio_property(connector);
5080         intel_attach_broadcast_rgb_property(connector);
5081         intel_dp->color_range_auto = true;
5082
5083         if (is_edp(intel_dp)) {
5084                 drm_mode_create_scaling_mode_property(connector->dev);
5085                 drm_object_attach_property(
5086                         &connector->base,
5087                         connector->dev->mode_config.scaling_mode_property,
5088                         DRM_MODE_SCALE_ASPECT);
5089                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5090         }
5091 }
5092
5093 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5094 {
5095         intel_dp->last_power_cycle = jiffies;
5096         intel_dp->last_power_on = jiffies;
5097         intel_dp->last_backlight_off = jiffies;
5098 }
5099
5100 static void
5101 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5102                                     struct intel_dp *intel_dp)
5103 {
5104         struct drm_i915_private *dev_priv = dev->dev_private;
5105         struct edp_power_seq cur, vbt, spec,
5106                 *final = &intel_dp->pps_delays;
5107         u32 pp_on, pp_off, pp_div, pp;
5108         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5109
5110         lockdep_assert_held(&dev_priv->pps_mutex);
5111
5112         /* already initialized? */
5113         if (final->t11_t12 != 0)
5114                 return;
5115
5116         if (HAS_PCH_SPLIT(dev)) {
5117                 pp_ctrl_reg = PCH_PP_CONTROL;
5118                 pp_on_reg = PCH_PP_ON_DELAYS;
5119                 pp_off_reg = PCH_PP_OFF_DELAYS;
5120                 pp_div_reg = PCH_PP_DIVISOR;
5121         } else {
5122                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5123
5124                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5125                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5126                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5127                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5128         }
5129
5130         /* Workaround: Need to write PP_CONTROL with the unlock key as
5131          * the very first thing. */
5132         pp = ironlake_get_pp_control(intel_dp);
5133         I915_WRITE(pp_ctrl_reg, pp);
5134
5135         pp_on = I915_READ(pp_on_reg);
5136         pp_off = I915_READ(pp_off_reg);
5137         pp_div = I915_READ(pp_div_reg);
5138
5139         /* Pull timing values out of registers */
5140         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5141                 PANEL_POWER_UP_DELAY_SHIFT;
5142
5143         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5144                 PANEL_LIGHT_ON_DELAY_SHIFT;
5145
5146         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5147                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5148
5149         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5150                 PANEL_POWER_DOWN_DELAY_SHIFT;
5151
5152         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5153                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5154
5155         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5156                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5157
5158         vbt = dev_priv->vbt.edp_pps;
5159
5160         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5161          * our hw here, which are all in 100usec. */
5162         spec.t1_t3 = 210 * 10;
5163         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5164         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5165         spec.t10 = 500 * 10;
5166         /* This one is special and actually in units of 100ms, but zero
5167          * based in the hw (so we need to add 100 ms). But the sw vbt
5168          * table multiplies it with 1000 to make it in units of 100usec,
5169          * too. */
5170         spec.t11_t12 = (510 + 100) * 10;
5171
5172         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5173                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5174
5175         /* Use the max of the register settings and vbt. If both are
5176          * unset, fall back to the spec limits. */
5177 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5178                                        spec.field : \
5179                                        max(cur.field, vbt.field))
5180         assign_final(t1_t3);
5181         assign_final(t8);
5182         assign_final(t9);
5183         assign_final(t10);
5184         assign_final(t11_t12);
5185 #undef assign_final
5186
5187 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5188         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5189         intel_dp->backlight_on_delay = get_delay(t8);
5190         intel_dp->backlight_off_delay = get_delay(t9);
5191         intel_dp->panel_power_down_delay = get_delay(t10);
5192         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5193 #undef get_delay
5194
5195         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5196                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5197                       intel_dp->panel_power_cycle_delay);
5198
5199         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5200                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5201 }
5202
5203 static void
5204 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5205                                               struct intel_dp *intel_dp)
5206 {
5207         struct drm_i915_private *dev_priv = dev->dev_private;
5208         u32 pp_on, pp_off, pp_div, port_sel = 0;
5209         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5210         int pp_on_reg, pp_off_reg, pp_div_reg;
5211         enum port port = dp_to_dig_port(intel_dp)->port;
5212         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5213
5214         lockdep_assert_held(&dev_priv->pps_mutex);
5215
5216         if (HAS_PCH_SPLIT(dev)) {
5217                 pp_on_reg = PCH_PP_ON_DELAYS;
5218                 pp_off_reg = PCH_PP_OFF_DELAYS;
5219                 pp_div_reg = PCH_PP_DIVISOR;
5220         } else {
5221                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5222
5223                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5224                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5225                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5226         }
5227
5228         /*
5229          * And finally store the new values in the power sequencer. The
5230          * backlight delays are set to 1 because we do manual waits on them. For
5231          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5232          * we'll end up waiting for the backlight off delay twice: once when we
5233          * do the manual sleep, and once when we disable the panel and wait for
5234          * the PP_STATUS bit to become zero.
5235          */
5236         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5237                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5238         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5239                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5240         /* Compute the divisor for the pp clock, simply match the Bspec
5241          * formula. */
5242         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5243         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5244                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
5245
5246         /* Haswell doesn't have any port selection bits for the panel
5247          * power sequencer any more. */
5248         if (IS_VALLEYVIEW(dev)) {
5249                 port_sel = PANEL_PORT_SELECT_VLV(port);
5250         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5251                 if (port == PORT_A)
5252                         port_sel = PANEL_PORT_SELECT_DPA;
5253                 else
5254                         port_sel = PANEL_PORT_SELECT_DPD;
5255         }
5256
5257         pp_on |= port_sel;
5258
5259         I915_WRITE(pp_on_reg, pp_on);
5260         I915_WRITE(pp_off_reg, pp_off);
5261         I915_WRITE(pp_div_reg, pp_div);
5262
5263         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5264                       I915_READ(pp_on_reg),
5265                       I915_READ(pp_off_reg),
5266                       I915_READ(pp_div_reg));
5267 }
5268
5269 /**
5270  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5271  * @dev: DRM device
5272  * @refresh_rate: RR to be programmed
5273  *
5274  * This function gets called when refresh rate (RR) has to be changed from
5275  * one frequency to another. Switches can be between high and low RR
5276  * supported by the panel or to any other RR based on media playback (in
5277  * this case, RR value needs to be passed from user space).
5278  *
5279  * The caller of this function needs to take a lock on dev_priv->drrs.
5280  */
5281 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5282 {
5283         struct drm_i915_private *dev_priv = dev->dev_private;
5284         struct intel_encoder *encoder;
5285         struct intel_digital_port *dig_port = NULL;
5286         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5287         struct intel_crtc_state *config = NULL;
5288         struct intel_crtc *intel_crtc = NULL;
5289         u32 reg, val;
5290         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5291
5292         if (refresh_rate <= 0) {
5293                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5294                 return;
5295         }
5296
5297         if (intel_dp == NULL) {
5298                 DRM_DEBUG_KMS("DRRS not supported.\n");
5299                 return;
5300         }
5301
5302         /*
5303          * FIXME: This needs proper synchronization with psr state for some
5304          * platforms that cannot have PSR and DRRS enabled at the same time.
5305          */
5306
5307         dig_port = dp_to_dig_port(intel_dp);
5308         encoder = &dig_port->base;
5309         intel_crtc = to_intel_crtc(encoder->base.crtc);
5310
5311         if (!intel_crtc) {
5312                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5313                 return;
5314         }
5315
5316         config = intel_crtc->config;
5317
5318         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5319                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5320                 return;
5321         }
5322
5323         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5324                         refresh_rate)
5325                 index = DRRS_LOW_RR;
5326
5327         if (index == dev_priv->drrs.refresh_rate_type) {
5328                 DRM_DEBUG_KMS(
5329                         "DRRS requested for previously set RR...ignoring\n");
5330                 return;
5331         }
5332
5333         if (!intel_crtc->active) {
5334                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5335                 return;
5336         }
5337
5338         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5339                 switch (index) {
5340                 case DRRS_HIGH_RR:
5341                         intel_dp_set_m_n(intel_crtc, M1_N1);
5342                         break;
5343                 case DRRS_LOW_RR:
5344                         intel_dp_set_m_n(intel_crtc, M2_N2);
5345                         break;
5346                 case DRRS_MAX_RR:
5347                 default:
5348                         DRM_ERROR("Unsupported refreshrate type\n");
5349                 }
5350         } else if (INTEL_INFO(dev)->gen > 6) {
5351                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5352                 val = I915_READ(reg);
5353
5354                 if (index > DRRS_HIGH_RR) {
5355                         if (IS_VALLEYVIEW(dev))
5356                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5357                         else
5358                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5359                 } else {
5360                         if (IS_VALLEYVIEW(dev))
5361                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5362                         else
5363                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5364                 }
5365                 I915_WRITE(reg, val);
5366         }
5367
5368         dev_priv->drrs.refresh_rate_type = index;
5369
5370         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5371 }
5372
5373 /**
5374  * intel_edp_drrs_enable - init drrs struct if supported
5375  * @intel_dp: DP struct
5376  *
5377  * Initializes frontbuffer_bits and drrs.dp
5378  */
5379 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5380 {
5381         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5382         struct drm_i915_private *dev_priv = dev->dev_private;
5383         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5384         struct drm_crtc *crtc = dig_port->base.base.crtc;
5385         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5386
5387         if (!intel_crtc->config->has_drrs) {
5388                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5389                 return;
5390         }
5391
5392         mutex_lock(&dev_priv->drrs.mutex);
5393         if (WARN_ON(dev_priv->drrs.dp)) {
5394                 DRM_ERROR("DRRS already enabled\n");
5395                 goto unlock;
5396         }
5397
5398         dev_priv->drrs.busy_frontbuffer_bits = 0;
5399
5400         dev_priv->drrs.dp = intel_dp;
5401
5402 unlock:
5403         mutex_unlock(&dev_priv->drrs.mutex);
5404 }
5405
5406 /**
5407  * intel_edp_drrs_disable - Disable DRRS
5408  * @intel_dp: DP struct
5409  *
5410  */
5411 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5412 {
5413         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5414         struct drm_i915_private *dev_priv = dev->dev_private;
5415         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5416         struct drm_crtc *crtc = dig_port->base.base.crtc;
5417         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5418
5419         if (!intel_crtc->config->has_drrs)
5420                 return;
5421
5422         mutex_lock(&dev_priv->drrs.mutex);
5423         if (!dev_priv->drrs.dp) {
5424                 mutex_unlock(&dev_priv->drrs.mutex);
5425                 return;
5426         }
5427
5428         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5429                 intel_dp_set_drrs_state(dev_priv->dev,
5430                         intel_dp->attached_connector->panel.
5431                         fixed_mode->vrefresh);
5432
5433         dev_priv->drrs.dp = NULL;
5434         mutex_unlock(&dev_priv->drrs.mutex);
5435
5436         cancel_delayed_work_sync(&dev_priv->drrs.work);
5437 }
5438
5439 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5440 {
5441         struct drm_i915_private *dev_priv =
5442                 container_of(work, typeof(*dev_priv), drrs.work.work);
5443         struct intel_dp *intel_dp;
5444
5445         mutex_lock(&dev_priv->drrs.mutex);
5446
5447         intel_dp = dev_priv->drrs.dp;
5448
5449         if (!intel_dp)
5450                 goto unlock;
5451
5452         /*
5453          * The delayed work can race with an invalidate hence we need to
5454          * recheck.
5455          */
5456
5457         if (dev_priv->drrs.busy_frontbuffer_bits)
5458                 goto unlock;
5459
5460         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5461                 intel_dp_set_drrs_state(dev_priv->dev,
5462                         intel_dp->attached_connector->panel.
5463                         downclock_mode->vrefresh);
5464
5465 unlock:
5466         mutex_unlock(&dev_priv->drrs.mutex);
5467 }
5468
5469 /**
5470  * intel_edp_drrs_invalidate - Invalidate DRRS
5471  * @dev: DRM device
5472  * @frontbuffer_bits: frontbuffer plane tracking bits
5473  *
5474  * When there is a disturbance on screen (due to cursor movement/time
5475  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5476  * high RR.
5477  *
5478  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5479  */
5480 void intel_edp_drrs_invalidate(struct drm_device *dev,
5481                 unsigned frontbuffer_bits)
5482 {
5483         struct drm_i915_private *dev_priv = dev->dev_private;
5484         struct drm_crtc *crtc;
5485         enum pipe pipe;
5486
5487         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5488                 return;
5489
5490         cancel_delayed_work(&dev_priv->drrs.work);
5491
5492         mutex_lock(&dev_priv->drrs.mutex);
5493         if (!dev_priv->drrs.dp) {
5494                 mutex_unlock(&dev_priv->drrs.mutex);
5495                 return;
5496         }
5497
5498         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5499         pipe = to_intel_crtc(crtc)->pipe;
5500
5501         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5502                 intel_dp_set_drrs_state(dev_priv->dev,
5503                                 dev_priv->drrs.dp->attached_connector->panel.
5504                                 fixed_mode->vrefresh);
5505         }
5506
5507         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5508
5509         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5510         mutex_unlock(&dev_priv->drrs.mutex);
5511 }
5512
5513 /**
5514  * intel_edp_drrs_flush - Flush DRRS
5515  * @dev: DRM device
5516  * @frontbuffer_bits: frontbuffer plane tracking bits
5517  *
5518  * When there is no movement on screen, DRRS work can be scheduled.
5519  * This DRRS work is responsible for setting relevant registers after a
5520  * timeout of 1 second.
5521  *
5522  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5523  */
5524 void intel_edp_drrs_flush(struct drm_device *dev,
5525                 unsigned frontbuffer_bits)
5526 {
5527         struct drm_i915_private *dev_priv = dev->dev_private;
5528         struct drm_crtc *crtc;
5529         enum pipe pipe;
5530
5531         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5532                 return;
5533
5534         cancel_delayed_work(&dev_priv->drrs.work);
5535
5536         mutex_lock(&dev_priv->drrs.mutex);
5537         if (!dev_priv->drrs.dp) {
5538                 mutex_unlock(&dev_priv->drrs.mutex);
5539                 return;
5540         }
5541
5542         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5543         pipe = to_intel_crtc(crtc)->pipe;
5544         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5545
5546         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5547                         !dev_priv->drrs.busy_frontbuffer_bits)
5548                 schedule_delayed_work(&dev_priv->drrs.work,
5549                                 msecs_to_jiffies(1000));
5550         mutex_unlock(&dev_priv->drrs.mutex);
5551 }
5552
5553 /**
5554  * DOC: Display Refresh Rate Switching (DRRS)
5555  *
5556  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5557  * which enables swtching between low and high refresh rates,
5558  * dynamically, based on the usage scenario. This feature is applicable
5559  * for internal panels.
5560  *
5561  * Indication that the panel supports DRRS is given by the panel EDID, which
5562  * would list multiple refresh rates for one resolution.
5563  *
5564  * DRRS is of 2 types - static and seamless.
5565  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5566  * (may appear as a blink on screen) and is used in dock-undock scenario.
5567  * Seamless DRRS involves changing RR without any visual effect to the user
5568  * and can be used during normal system usage. This is done by programming
5569  * certain registers.
5570  *
5571  * Support for static/seamless DRRS may be indicated in the VBT based on
5572  * inputs from the panel spec.
5573  *
5574  * DRRS saves power by switching to low RR based on usage scenarios.
5575  *
5576  * eDP DRRS:-
5577  *        The implementation is based on frontbuffer tracking implementation.
5578  * When there is a disturbance on the screen triggered by user activity or a
5579  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5580  * When there is no movement on screen, after a timeout of 1 second, a switch
5581  * to low RR is made.
5582  *        For integration with frontbuffer tracking code,
5583  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5584  *
5585  * DRRS can be further extended to support other internal panels and also
5586  * the scenario of video playback wherein RR is set based on the rate
5587  * requested by userspace.
5588  */
5589
5590 /**
5591  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5592  * @intel_connector: eDP connector
5593  * @fixed_mode: preferred mode of panel
5594  *
5595  * This function is  called only once at driver load to initialize basic
5596  * DRRS stuff.
5597  *
5598  * Returns:
5599  * Downclock mode if panel supports it, else return NULL.
5600  * DRRS support is determined by the presence of downclock mode (apart
5601  * from VBT setting).
5602  */
5603 static struct drm_display_mode *
5604 intel_dp_drrs_init(struct intel_connector *intel_connector,
5605                 struct drm_display_mode *fixed_mode)
5606 {
5607         struct drm_connector *connector = &intel_connector->base;
5608         struct drm_device *dev = connector->dev;
5609         struct drm_i915_private *dev_priv = dev->dev_private;
5610         struct drm_display_mode *downclock_mode = NULL;
5611
5612         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5613         mutex_init(&dev_priv->drrs.mutex);
5614
5615         if (INTEL_INFO(dev)->gen <= 6) {
5616                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5617                 return NULL;
5618         }
5619
5620         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5621                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5622                 return NULL;
5623         }
5624
5625         downclock_mode = intel_find_panel_downclock
5626                                         (dev, fixed_mode, connector);
5627
5628         if (!downclock_mode) {
5629                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5630                 return NULL;
5631         }
5632
5633         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5634
5635         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5636         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5637         return downclock_mode;
5638 }
5639
5640 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5641                                      struct intel_connector *intel_connector)
5642 {
5643         struct drm_connector *connector = &intel_connector->base;
5644         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5645         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5646         struct drm_device *dev = intel_encoder->base.dev;
5647         struct drm_i915_private *dev_priv = dev->dev_private;
5648         struct drm_display_mode *fixed_mode = NULL;
5649         struct drm_display_mode *downclock_mode = NULL;
5650         bool has_dpcd;
5651         struct drm_display_mode *scan;
5652         struct edid *edid;
5653         enum pipe pipe = INVALID_PIPE;
5654
5655         if (!is_edp(intel_dp))
5656                 return true;
5657
5658         pps_lock(intel_dp);
5659         intel_edp_panel_vdd_sanitize(intel_dp);
5660         pps_unlock(intel_dp);
5661
5662         /* Cache DPCD and EDID for edp. */
5663         has_dpcd = intel_dp_get_dpcd(intel_dp);
5664
5665         if (has_dpcd) {
5666                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5667                         dev_priv->no_aux_handshake =
5668                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5669                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5670         } else {
5671                 /* if this fails, presume the device is a ghost */
5672                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5673                 return false;
5674         }
5675
5676         /* We now know it's not a ghost, init power sequence regs. */
5677         pps_lock(intel_dp);
5678         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5679         pps_unlock(intel_dp);
5680
5681         mutex_lock(&dev->mode_config.mutex);
5682         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5683         if (edid) {
5684                 if (drm_add_edid_modes(connector, edid)) {
5685                         drm_mode_connector_update_edid_property(connector,
5686                                                                 edid);
5687                         drm_edid_to_eld(connector, edid);
5688                 } else {
5689                         kfree(edid);
5690                         edid = ERR_PTR(-EINVAL);
5691                 }
5692         } else {
5693                 edid = ERR_PTR(-ENOENT);
5694         }
5695         intel_connector->edid = edid;
5696
5697         /* prefer fixed mode from EDID if available */
5698         list_for_each_entry(scan, &connector->probed_modes, head) {
5699                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5700                         fixed_mode = drm_mode_duplicate(dev, scan);
5701                         downclock_mode = intel_dp_drrs_init(
5702                                                 intel_connector, fixed_mode);
5703                         break;
5704                 }
5705         }
5706
5707         /* fallback to VBT if available for eDP */
5708         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5709                 fixed_mode = drm_mode_duplicate(dev,
5710                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5711                 if (fixed_mode)
5712                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5713         }
5714         mutex_unlock(&dev->mode_config.mutex);
5715
5716         if (IS_VALLEYVIEW(dev)) {
5717                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5718                 register_reboot_notifier(&intel_dp->edp_notifier);
5719
5720                 /*
5721                  * Figure out the current pipe for the initial backlight setup.
5722                  * If the current pipe isn't valid, try the PPS pipe, and if that
5723                  * fails just assume pipe A.
5724                  */
5725                 if (IS_CHERRYVIEW(dev))
5726                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5727                 else
5728                         pipe = PORT_TO_PIPE(intel_dp->DP);
5729
5730                 if (pipe != PIPE_A && pipe != PIPE_B)
5731                         pipe = intel_dp->pps_pipe;
5732
5733                 if (pipe != PIPE_A && pipe != PIPE_B)
5734                         pipe = PIPE_A;
5735
5736                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5737                               pipe_name(pipe));
5738         }
5739
5740         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5741         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5742         intel_panel_setup_backlight(connector, pipe);
5743
5744         return true;
5745 }
5746
5747 bool
5748 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5749                         struct intel_connector *intel_connector)
5750 {
5751         struct drm_connector *connector = &intel_connector->base;
5752         struct intel_dp *intel_dp = &intel_dig_port->dp;
5753         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5754         struct drm_device *dev = intel_encoder->base.dev;
5755         struct drm_i915_private *dev_priv = dev->dev_private;
5756         enum port port = intel_dig_port->port;
5757         int type;
5758
5759         intel_dp->pps_pipe = INVALID_PIPE;
5760
5761         /* intel_dp vfuncs */
5762         if (INTEL_INFO(dev)->gen >= 9)
5763                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5764         else if (IS_VALLEYVIEW(dev))
5765                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5766         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5767                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5768         else if (HAS_PCH_SPLIT(dev))
5769                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5770         else
5771                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5772
5773         if (INTEL_INFO(dev)->gen >= 9)
5774                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5775         else
5776                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5777
5778         /* Preserve the current hw state. */
5779         intel_dp->DP = I915_READ(intel_dp->output_reg);
5780         intel_dp->attached_connector = intel_connector;
5781
5782         if (intel_dp_is_edp(dev, port))
5783                 type = DRM_MODE_CONNECTOR_eDP;
5784         else
5785                 type = DRM_MODE_CONNECTOR_DisplayPort;
5786
5787         /*
5788          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5789          * for DP the encoder type can be set by the caller to
5790          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5791          */
5792         if (type == DRM_MODE_CONNECTOR_eDP)
5793                 intel_encoder->type = INTEL_OUTPUT_EDP;
5794
5795         /* eDP only on port B and/or C on vlv/chv */
5796         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5797                     port != PORT_B && port != PORT_C))
5798                 return false;
5799
5800         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5801                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5802                         port_name(port));
5803
5804         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5805         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5806
5807         connector->interlace_allowed = true;
5808         connector->doublescan_allowed = 0;
5809
5810         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5811                           edp_panel_vdd_work);
5812
5813         intel_connector_attach_encoder(intel_connector, intel_encoder);
5814         drm_connector_register(connector);
5815
5816         if (HAS_DDI(dev))
5817                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5818         else
5819                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5820         intel_connector->unregister = intel_dp_connector_unregister;
5821
5822         /* Set up the hotplug pin. */
5823         switch (port) {
5824         case PORT_A:
5825                 intel_encoder->hpd_pin = HPD_PORT_A;
5826                 break;
5827         case PORT_B:
5828                 intel_encoder->hpd_pin = HPD_PORT_B;
5829                 break;
5830         case PORT_C:
5831                 intel_encoder->hpd_pin = HPD_PORT_C;
5832                 break;
5833         case PORT_D:
5834                 intel_encoder->hpd_pin = HPD_PORT_D;
5835                 break;
5836         default:
5837                 BUG();
5838         }
5839
5840         if (is_edp(intel_dp)) {
5841                 pps_lock(intel_dp);
5842                 intel_dp_init_panel_power_timestamps(intel_dp);
5843                 if (IS_VALLEYVIEW(dev))
5844                         vlv_initial_power_sequencer_setup(intel_dp);
5845                 else
5846                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5847                 pps_unlock(intel_dp);
5848         }
5849
5850         intel_dp_aux_init(intel_dp, intel_connector);
5851
5852         /* init MST on ports that can support it */
5853         if (HAS_DP_MST(dev) &&
5854             (port == PORT_B || port == PORT_C || port == PORT_D))
5855                 intel_dp_mst_encoder_init(intel_dig_port,
5856                                           intel_connector->base.base.id);
5857
5858         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5859                 drm_dp_aux_unregister(&intel_dp->aux);
5860                 if (is_edp(intel_dp)) {
5861                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5862                         /*
5863                          * vdd might still be enabled do to the delayed vdd off.
5864                          * Make sure vdd is actually turned off here.
5865                          */
5866                         pps_lock(intel_dp);
5867                         edp_panel_vdd_off_sync(intel_dp);
5868                         pps_unlock(intel_dp);
5869                 }
5870                 drm_connector_unregister(connector);
5871                 drm_connector_cleanup(connector);
5872                 return false;
5873         }
5874
5875         intel_dp_add_properties(intel_dp, connector);
5876
5877         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5878          * 0xd.  Failure to do so will result in spurious interrupts being
5879          * generated on the port when a cable is not attached.
5880          */
5881         if (IS_G4X(dev) && !IS_GM45(dev)) {
5882                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5883                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5884         }
5885
5886         i915_debugfs_connector_add(connector);
5887
5888         return true;
5889 }
5890
5891 void
5892 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5893 {
5894         struct drm_i915_private *dev_priv = dev->dev_private;
5895         struct intel_digital_port *intel_dig_port;
5896         struct intel_encoder *intel_encoder;
5897         struct drm_encoder *encoder;
5898         struct intel_connector *intel_connector;
5899
5900         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5901         if (!intel_dig_port)
5902                 return;
5903
5904         intel_connector = intel_connector_alloc();
5905         if (!intel_connector) {
5906                 kfree(intel_dig_port);
5907                 return;
5908         }
5909
5910         intel_encoder = &intel_dig_port->base;
5911         encoder = &intel_encoder->base;
5912
5913         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5914                          DRM_MODE_ENCODER_TMDS);
5915
5916         intel_encoder->compute_config = intel_dp_compute_config;
5917         intel_encoder->disable = intel_disable_dp;
5918         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5919         intel_encoder->get_config = intel_dp_get_config;
5920         intel_encoder->suspend = intel_dp_encoder_suspend;
5921         if (IS_CHERRYVIEW(dev)) {
5922                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5923                 intel_encoder->pre_enable = chv_pre_enable_dp;
5924                 intel_encoder->enable = vlv_enable_dp;
5925                 intel_encoder->post_disable = chv_post_disable_dp;
5926         } else if (IS_VALLEYVIEW(dev)) {
5927                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5928                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5929                 intel_encoder->enable = vlv_enable_dp;
5930                 intel_encoder->post_disable = vlv_post_disable_dp;
5931         } else {
5932                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5933                 intel_encoder->enable = g4x_enable_dp;
5934                 if (INTEL_INFO(dev)->gen >= 5)
5935                         intel_encoder->post_disable = ilk_post_disable_dp;
5936         }
5937
5938         intel_dig_port->port = port;
5939         intel_dig_port->dp.output_reg = output_reg;
5940
5941         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5942         if (IS_CHERRYVIEW(dev)) {
5943                 if (port == PORT_D)
5944                         intel_encoder->crtc_mask = 1 << 2;
5945                 else
5946                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5947         } else {
5948                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5949         }
5950         intel_encoder->cloneable = 0;
5951         intel_encoder->hot_plug = intel_dp_hot_plug;
5952
5953         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5954         dev_priv->hpd_irq_port[port] = intel_dig_port;
5955
5956         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5957                 drm_encoder_cleanup(encoder);
5958                 kfree(intel_dig_port);
5959                 kfree(intel_connector);
5960         }
5961 }
5962
5963 void intel_dp_mst_suspend(struct drm_device *dev)
5964 {
5965         struct drm_i915_private *dev_priv = dev->dev_private;
5966         int i;
5967
5968         /* disable MST */
5969         for (i = 0; i < I915_MAX_PORTS; i++) {
5970                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5971                 if (!intel_dig_port)
5972                         continue;
5973
5974                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5975                         if (!intel_dig_port->dp.can_mst)
5976                                 continue;
5977                         if (intel_dig_port->dp.is_mst)
5978                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5979                 }
5980         }
5981 }
5982
5983 void intel_dp_mst_resume(struct drm_device *dev)
5984 {
5985         struct drm_i915_private *dev_priv = dev->dev_private;
5986         int i;
5987
5988         for (i = 0; i < I915_MAX_PORTS; i++) {
5989                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5990                 if (!intel_dig_port)
5991                         continue;
5992                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5993                         int ret;
5994
5995                         if (!intel_dig_port->dp.can_mst)
5996                                 continue;
5997
5998                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5999                         if (ret != 0) {
6000                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6001                         }
6002                 }
6003         }
6004 }