2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
49 static const struct dp_link_dpll gen4_dpll[] = {
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
56 static const struct dp_link_dpll pch_dpll[] = {
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
63 static const struct dp_link_dpll vlv_dpll[] = {
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
74 static const struct dp_link_dpll chv_dpll[] = {
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int default_rates[] = { 162000, 270000, 540000 };
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
99 static bool is_edp(struct intel_dp *intel_dp)
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
106 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110 return intel_dig_port->base.base.dev;
113 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
118 static void intel_dp_link_down(struct intel_dp *intel_dp);
119 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
120 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
121 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
122 static void vlv_steal_power_sequencer(struct drm_device *dev,
126 intel_dp_max_link_bw(struct intel_dp *intel_dp)
128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
129 struct drm_device *dev = intel_dp->attached_connector->base.dev;
131 switch (max_link_bw) {
132 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
136 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
137 /* WaDisableHBR2:skl */
138 max_link_bw = DP_LINK_BW_2_7;
139 else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
140 INTEL_INFO(dev)->gen >= 8) &&
141 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
142 max_link_bw = DP_LINK_BW_5_4;
144 max_link_bw = DP_LINK_BW_2_7;
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
149 max_link_bw = DP_LINK_BW_1_62;
155 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
168 return min(source_max, sink_max);
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
177 * 270000 * 1 * 8 / 10 == 216000
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
189 intel_dp_link_required(int pixel_clock, int bpp)
191 return (pixel_clock * bpp + 9) / 10;
195 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
197 return (max_link_clock * max_lanes * 8) / 10;
200 static enum drm_mode_status
201 intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
204 struct intel_dp *intel_dp = intel_attached_dp(connector);
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
214 if (mode->vdisplay > fixed_mode->vdisplay)
217 target_clock = fixed_mode->clock;
220 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
221 max_lanes = intel_dp_max_lane_count(intel_dp);
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
226 if (mode_rate > max_rate)
227 return MODE_CLOCK_HIGH;
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
238 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
250 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
259 /* hrawclock is 1/4 the FSB frequency */
261 intel_hrawclk(struct drm_device *dev)
263 struct drm_i915_private *dev_priv = dev->dev_private;
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
280 case CLKCFG_FSB_1067:
282 case CLKCFG_FSB_1333:
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
294 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
295 struct intel_dp *intel_dp);
297 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
298 struct intel_dp *intel_dp);
300 static void pps_lock(struct intel_dp *intel_dp)
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
315 mutex_lock(&dev_priv->pps_mutex);
318 static void pps_unlock(struct intel_dp *intel_dp)
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
326 mutex_unlock(&dev_priv->pps_mutex);
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
333 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
389 vlv_force_pll_off(dev, pipe);
393 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
402 lockdep_assert_held(&dev_priv->pps_mutex);
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
416 struct intel_dp *tmp;
418 if (encoder->type != INTEL_OUTPUT_EDP)
421 tmp = enc_to_intel_dp(&encoder->base);
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
431 if (WARN_ON(pipes == 0))
434 pipe = ffs(pipes) - 1;
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
443 /* init power sequencer on this pipe and port */
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
451 vlv_power_sequencer_kick(intel_dp);
453 return intel_dp->pps_pipe;
456 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
459 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
465 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
471 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
478 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
480 vlv_pipe_check pipe_check)
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
491 if (!pipe_check(dev_priv, pipe))
501 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
506 enum port port = intel_dig_port->port;
508 lockdep_assert_held(&dev_priv->pps_mutex);
510 /* try to find a pipe with this port selected */
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
537 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
558 if (encoder->type != INTEL_OUTPUT_EDP)
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
566 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
570 if (HAS_PCH_SPLIT(dev))
571 return PCH_PP_CONTROL;
573 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
580 if (HAS_PCH_SPLIT(dev))
581 return PCH_PP_STATUS;
583 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
586 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
587 This function only applicable when panel PM state is not to be tracked */
588 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
591 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
594 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_ctrl_reg, pp_div_reg;
598 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 if (IS_VALLEYVIEW(dev)) {
604 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
607 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
608 pp_div = I915_READ(pp_div_reg);
609 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611 /* 0x1F write to PP_DIV_REG sets max cycle delay */
612 I915_WRITE(pp_div_reg, pp_div | 0x1F);
613 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
614 msleep(intel_dp->panel_power_cycle_delay);
617 pps_unlock(intel_dp);
622 static bool edp_have_panel_power(struct intel_dp *intel_dp)
624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
625 struct drm_i915_private *dev_priv = dev->dev_private;
627 lockdep_assert_held(&dev_priv->pps_mutex);
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
633 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
636 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
639 struct drm_i915_private *dev_priv = dev->dev_private;
641 lockdep_assert_held(&dev_priv->pps_mutex);
643 if (IS_VALLEYVIEW(dev) &&
644 intel_dp->pps_pipe == INVALID_PIPE)
647 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
651 intel_dp_check_edp(struct intel_dp *intel_dp)
653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
654 struct drm_i915_private *dev_priv = dev->dev_private;
656 if (!is_edp(intel_dp))
659 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
660 WARN(1, "eDP powered off while attempting aux channel communication.\n");
661 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
662 I915_READ(_pp_stat_reg(intel_dp)),
663 I915_READ(_pp_ctrl_reg(intel_dp)));
668 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
671 struct drm_device *dev = intel_dig_port->base.base.dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
673 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
677 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
679 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
680 msecs_to_jiffies_timeout(10));
682 done = wait_for_atomic(C, 10) == 0;
684 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
691 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
694 struct drm_device *dev = intel_dig_port->base.base.dev;
697 * The clock divider is based off the hrawclk, and would like to run at
698 * 2MHz. So, take the hrawclk value and divide by 2 and use that
700 return index ? 0 : intel_hrawclk(dev) / 2;
703 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
711 if (intel_dig_port->port == PORT_A) {
712 if (IS_GEN6(dev) || IS_GEN7(dev))
713 return 200; /* SNB & IVB eDP input clock at 400Mhz */
715 return 225; /* eDP input clock at 450Mhz */
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
727 if (intel_dig_port->port == PORT_A) {
730 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 return index ? 0 : 100;
748 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
755 return index ? 0 : 1;
758 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
761 uint32_t aux_clock_divider)
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
777 return DP_AUX_CH_CTL_SEND_BUSY |
779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 DP_AUX_CH_CTL_RECEIVE_ERROR |
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
788 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
793 return DP_AUX_CH_CTL_SEND_BUSY |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 intel_dp_aux_ch(struct intel_dp *intel_dp,
805 const uint8_t *send, int send_bytes,
806 uint8_t *recv, int recv_size)
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
813 uint32_t aux_clock_divider;
814 int i, ret, recv_bytes;
817 bool has_aux_irq = HAS_AUX_IRQ(dev);
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
828 vdd = edp_panel_vdd_on(intel_dp);
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
836 intel_dp_check_edp(intel_dp);
838 intel_aux_display_runtime_get(dev_priv);
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
842 status = I915_READ_NOTRACE(ch_ctl);
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
849 WARN(1, "dp_aux_ch not started status 0x%08x\n",
855 /* Only 5 data registers! */
856 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
861 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
862 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
867 /* Must try at least 3 times according to DP spec */
868 for (try = 0; try < 5; try++) {
869 /* Load the send data into the aux channel data registers */
870 for (i = 0; i < send_bytes; i += 4)
871 I915_WRITE(ch_data + i,
872 intel_dp_pack_aux(send + i,
875 /* Send the command and wait for it to complete */
876 I915_WRITE(ch_ctl, send_ctl);
878 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
880 /* Clear done status and any errors */
884 DP_AUX_CH_CTL_TIME_OUT_ERROR |
885 DP_AUX_CH_CTL_RECEIVE_ERROR);
887 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
888 DP_AUX_CH_CTL_RECEIVE_ERROR))
890 if (status & DP_AUX_CH_CTL_DONE)
893 if (status & DP_AUX_CH_CTL_DONE)
897 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
898 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
903 /* Check for timeout or receive error.
904 * Timeouts occur when the sink is not connected
906 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
907 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
912 /* Timeouts occur when the device isn't connected, so they're
913 * "normal" -- don't fill the kernel log with these */
914 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
915 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
920 /* Unload any bytes sent back from the other side */
921 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
922 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
923 if (recv_bytes > recv_size)
924 recv_bytes = recv_size;
926 for (i = 0; i < recv_bytes; i += 4)
927 intel_dp_unpack_aux(I915_READ(ch_data + i),
928 recv + i, recv_bytes - i);
932 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
933 intel_aux_display_runtime_put(dev_priv);
936 edp_panel_vdd_off(intel_dp, false);
938 pps_unlock(intel_dp);
943 #define BARE_ADDRESS_SIZE 3
944 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
946 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
948 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
949 uint8_t txbuf[20], rxbuf[20];
950 size_t txsize, rxsize;
953 txbuf[0] = msg->request << 4;
954 txbuf[1] = msg->address >> 8;
955 txbuf[2] = msg->address & 0xff;
956 txbuf[3] = msg->size - 1;
958 switch (msg->request & ~DP_AUX_I2C_MOT) {
959 case DP_AUX_NATIVE_WRITE:
960 case DP_AUX_I2C_WRITE:
961 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
964 if (WARN_ON(txsize > 20))
967 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
969 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
971 msg->reply = rxbuf[0] >> 4;
973 /* Return payload size. */
978 case DP_AUX_NATIVE_READ:
979 case DP_AUX_I2C_READ:
980 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
981 rxsize = msg->size + 1;
983 if (WARN_ON(rxsize > 20))
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 msg->reply = rxbuf[0] >> 4;
990 * Assume happy day, and copy the data. The caller is
991 * expected to check msg->reply before touching it.
993 * Return payload size.
996 memcpy(msg->buffer, rxbuf + 1, ret);
1009 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1011 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1012 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1013 enum port port = intel_dig_port->port;
1014 const char *name = NULL;
1019 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1023 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1027 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1031 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1039 * The AUX_CTL register is usually DP_CTL + 0x10.
1041 * On Haswell and Broadwell though:
1042 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1043 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1045 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1047 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1048 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1050 intel_dp->aux.name = name;
1051 intel_dp->aux.dev = dev->dev;
1052 intel_dp->aux.transfer = intel_dp_aux_transfer;
1054 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1055 connector->base.kdev->kobj.name);
1057 ret = drm_dp_aux_register(&intel_dp->aux);
1059 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1064 ret = sysfs_create_link(&connector->base.kdev->kobj,
1065 &intel_dp->aux.ddc.dev.kobj,
1066 intel_dp->aux.ddc.dev.kobj.name);
1068 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1069 drm_dp_aux_unregister(&intel_dp->aux);
1074 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1076 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1078 if (!intel_connector->mst_port)
1079 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1080 intel_dp->aux.ddc.dev.kobj.name);
1081 intel_connector_unregister(intel_connector);
1085 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1089 pipe_config->ddi_pll_sel = SKL_DPLL0;
1090 pipe_config->dpll_hw_state.cfgcr1 = 0;
1091 pipe_config->dpll_hw_state.cfgcr2 = 0;
1093 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1094 switch (link_clock / 2) {
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1111 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1112 results in CDCLK change. Need to handle the change of CDCLK by
1113 disabling pipes and re-enabling them */
1115 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1119 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1124 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1128 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1131 case DP_LINK_BW_1_62:
1132 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1134 case DP_LINK_BW_2_7:
1135 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1137 case DP_LINK_BW_5_4:
1138 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1144 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1146 if (intel_dp->num_supported_rates) {
1147 *sink_rates = intel_dp->supported_rates;
1148 return intel_dp->num_supported_rates;
1151 *sink_rates = default_rates;
1153 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1157 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1159 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1161 if (INTEL_INFO(dev)->gen >= 9) {
1162 *source_rates = gen9_rates;
1163 return ARRAY_SIZE(gen9_rates);
1166 *source_rates = default_rates;
1168 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1172 intel_dp_set_clock(struct intel_encoder *encoder,
1173 struct intel_crtc_state *pipe_config, int link_bw)
1175 struct drm_device *dev = encoder->base.dev;
1176 const struct dp_link_dpll *divisor = NULL;
1180 divisor = gen4_dpll;
1181 count = ARRAY_SIZE(gen4_dpll);
1182 } else if (HAS_PCH_SPLIT(dev)) {
1184 count = ARRAY_SIZE(pch_dpll);
1185 } else if (IS_CHERRYVIEW(dev)) {
1187 count = ARRAY_SIZE(chv_dpll);
1188 } else if (IS_VALLEYVIEW(dev)) {
1190 count = ARRAY_SIZE(vlv_dpll);
1193 if (divisor && count) {
1194 for (i = 0; i < count; i++) {
1195 if (link_bw == divisor[i].link_bw) {
1196 pipe_config->dpll = divisor[i].dpll;
1197 pipe_config->clock_set = true;
1204 static int intel_supported_rates(const int *source_rates, int source_len,
1205 const int *sink_rates, int sink_len,
1206 int *supported_rates)
1208 int i = 0, j = 0, k = 0;
1210 /* For panels with edp version less than 1.4 */
1211 if (sink_len == 0) {
1212 for (i = 0; i < source_len; ++i)
1213 supported_rates[i] = source_rates[i];
1217 /* For edp1.4 panels, find the common rates between source and sink */
1218 while (i < source_len && j < sink_len) {
1219 if (source_rates[i] == sink_rates[j]) {
1220 supported_rates[k] = source_rates[i];
1224 } else if (source_rates[i] < sink_rates[j]) {
1233 static int rate_to_index(int find, const int *rates)
1237 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1238 if (find == rates[i])
1245 intel_dp_compute_config(struct intel_encoder *encoder,
1246 struct intel_crtc_state *pipe_config)
1248 struct drm_device *dev = encoder->base.dev;
1249 struct drm_i915_private *dev_priv = dev->dev_private;
1250 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1251 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1252 enum port port = dp_to_dig_port(intel_dp)->port;
1253 struct intel_crtc *intel_crtc = encoder->new_crtc;
1254 struct intel_connector *intel_connector = intel_dp->attached_connector;
1255 int lane_count, clock;
1256 int min_lane_count = 1;
1257 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1258 /* Conveniently, the link BW constants become indices with a shift...*/
1262 int link_avail, link_clock;
1263 const int *sink_rates;
1264 int supported_rates[8] = {0};
1265 const int *source_rates;
1266 int source_len, sink_len, supported_len;
1268 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1270 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1272 supported_len = intel_supported_rates(source_rates, source_len,
1273 sink_rates, sink_len, supported_rates);
1275 /* No common link rates between source and sink */
1276 WARN_ON(supported_len <= 0);
1278 max_clock = supported_len - 1;
1280 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1281 pipe_config->has_pch_encoder = true;
1283 pipe_config->has_dp_encoder = true;
1284 pipe_config->has_drrs = false;
1285 pipe_config->has_audio = intel_dp->has_audio;
1287 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1288 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1290 if (!HAS_PCH_SPLIT(dev))
1291 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1292 intel_connector->panel.fitting_mode);
1294 intel_pch_panel_fitting(intel_crtc, pipe_config,
1295 intel_connector->panel.fitting_mode);
1298 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1301 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1302 "max bw %d pixel clock %iKHz\n",
1303 max_lane_count, supported_rates[max_clock],
1304 adjusted_mode->crtc_clock);
1306 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1307 * bpc in between. */
1308 bpp = pipe_config->pipe_bpp;
1309 if (is_edp(intel_dp)) {
1310 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1311 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1312 dev_priv->vbt.edp_bpp);
1313 bpp = dev_priv->vbt.edp_bpp;
1317 * Use the maximum clock and number of lanes the eDP panel
1318 * advertizes being capable of. The panels are generally
1319 * designed to support only a single clock and lane
1320 * configuration, and typically these values correspond to the
1321 * native resolution of the panel.
1323 min_lane_count = max_lane_count;
1324 min_clock = max_clock;
1327 for (; bpp >= 6*3; bpp -= 2*3) {
1328 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1331 for (clock = min_clock; clock <= max_clock; clock++) {
1332 for (lane_count = min_lane_count;
1333 lane_count <= max_lane_count;
1336 link_clock = supported_rates[clock];
1337 link_avail = intel_dp_max_data_rate(link_clock,
1340 if (mode_rate <= link_avail) {
1350 if (intel_dp->color_range_auto) {
1353 * CEA-861-E - 5.1 Default Encoding Parameters
1354 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1356 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1357 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1359 intel_dp->color_range = 0;
1362 if (intel_dp->color_range)
1363 pipe_config->limited_color_range = true;
1365 intel_dp->lane_count = lane_count;
1368 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1370 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1371 intel_dp->rate_select =
1372 rate_to_index(supported_rates[clock], sink_rates);
1373 intel_dp->link_bw = 0;
1376 pipe_config->pipe_bpp = bpp;
1377 pipe_config->port_clock = supported_rates[clock];
1379 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1380 intel_dp->link_bw, intel_dp->lane_count,
1381 pipe_config->port_clock, bpp);
1382 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1383 mode_rate, link_avail);
1385 intel_link_compute_m_n(bpp, lane_count,
1386 adjusted_mode->crtc_clock,
1387 pipe_config->port_clock,
1388 &pipe_config->dp_m_n);
1390 if (intel_connector->panel.downclock_mode != NULL &&
1391 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1392 pipe_config->has_drrs = true;
1393 intel_link_compute_m_n(bpp, lane_count,
1394 intel_connector->panel.downclock_mode->clock,
1395 pipe_config->port_clock,
1396 &pipe_config->dp_m2_n2);
1399 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1400 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
1401 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1402 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1404 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1409 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1411 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1412 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1413 struct drm_device *dev = crtc->base.dev;
1414 struct drm_i915_private *dev_priv = dev->dev_private;
1417 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1418 crtc->config->port_clock);
1419 dpa_ctl = I915_READ(DP_A);
1420 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1422 if (crtc->config->port_clock == 162000) {
1423 /* For a long time we've carried around a ILK-DevA w/a for the
1424 * 160MHz clock. If we're really unlucky, it's still required.
1426 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1427 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1428 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1430 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1431 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1434 I915_WRITE(DP_A, dpa_ctl);
1440 static void intel_dp_prepare(struct intel_encoder *encoder)
1442 struct drm_device *dev = encoder->base.dev;
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1444 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1445 enum port port = dp_to_dig_port(intel_dp)->port;
1446 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1447 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1450 * There are four kinds of DP registers:
1457 * IBX PCH and CPU are the same for almost everything,
1458 * except that the CPU DP PLL is configured in this
1461 * CPT PCH is quite different, having many bits moved
1462 * to the TRANS_DP_CTL register instead. That
1463 * configuration happens (oddly) in ironlake_pch_enable
1466 /* Preserve the BIOS-computed detected bit. This is
1467 * supposed to be read-only.
1469 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1471 /* Handle DP bits in common between all three register formats */
1472 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1473 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1475 if (crtc->config->has_audio)
1476 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1478 /* Split out the IBX/CPU vs CPT settings */
1480 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1481 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1482 intel_dp->DP |= DP_SYNC_HS_HIGH;
1483 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1484 intel_dp->DP |= DP_SYNC_VS_HIGH;
1485 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1487 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1488 intel_dp->DP |= DP_ENHANCED_FRAMING;
1490 intel_dp->DP |= crtc->pipe << 29;
1491 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1492 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1493 intel_dp->DP |= intel_dp->color_range;
1495 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1496 intel_dp->DP |= DP_SYNC_HS_HIGH;
1497 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1498 intel_dp->DP |= DP_SYNC_VS_HIGH;
1499 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1501 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1502 intel_dp->DP |= DP_ENHANCED_FRAMING;
1504 if (!IS_CHERRYVIEW(dev)) {
1505 if (crtc->pipe == 1)
1506 intel_dp->DP |= DP_PIPEB_SELECT;
1508 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1511 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1515 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1516 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1518 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1519 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1521 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1522 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1524 static void wait_panel_status(struct intel_dp *intel_dp,
1528 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 u32 pp_stat_reg, pp_ctrl_reg;
1532 lockdep_assert_held(&dev_priv->pps_mutex);
1534 pp_stat_reg = _pp_stat_reg(intel_dp);
1535 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1537 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1539 I915_READ(pp_stat_reg),
1540 I915_READ(pp_ctrl_reg));
1542 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1543 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1544 I915_READ(pp_stat_reg),
1545 I915_READ(pp_ctrl_reg));
1548 DRM_DEBUG_KMS("Wait complete\n");
1551 static void wait_panel_on(struct intel_dp *intel_dp)
1553 DRM_DEBUG_KMS("Wait for panel power on\n");
1554 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1557 static void wait_panel_off(struct intel_dp *intel_dp)
1559 DRM_DEBUG_KMS("Wait for panel power off time\n");
1560 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1563 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1565 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1567 /* When we disable the VDD override bit last we have to do the manual
1569 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1570 intel_dp->panel_power_cycle_delay);
1572 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1575 static void wait_backlight_on(struct intel_dp *intel_dp)
1577 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1578 intel_dp->backlight_on_delay);
1581 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1583 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1584 intel_dp->backlight_off_delay);
1587 /* Read the current pp_control value, unlocking the register if it
1591 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1594 struct drm_i915_private *dev_priv = dev->dev_private;
1597 lockdep_assert_held(&dev_priv->pps_mutex);
1599 control = I915_READ(_pp_ctrl_reg(intel_dp));
1600 control &= ~PANEL_UNLOCK_MASK;
1601 control |= PANEL_UNLOCK_REGS;
1606 * Must be paired with edp_panel_vdd_off().
1607 * Must hold pps_mutex around the whole on/off sequence.
1608 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1610 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1612 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1613 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1614 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1615 struct drm_i915_private *dev_priv = dev->dev_private;
1616 enum intel_display_power_domain power_domain;
1618 u32 pp_stat_reg, pp_ctrl_reg;
1619 bool need_to_disable = !intel_dp->want_panel_vdd;
1621 lockdep_assert_held(&dev_priv->pps_mutex);
1623 if (!is_edp(intel_dp))
1626 cancel_delayed_work(&intel_dp->panel_vdd_work);
1627 intel_dp->want_panel_vdd = true;
1629 if (edp_have_panel_vdd(intel_dp))
1630 return need_to_disable;
1632 power_domain = intel_display_port_power_domain(intel_encoder);
1633 intel_display_power_get(dev_priv, power_domain);
1635 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1636 port_name(intel_dig_port->port));
1638 if (!edp_have_panel_power(intel_dp))
1639 wait_panel_power_cycle(intel_dp);
1641 pp = ironlake_get_pp_control(intel_dp);
1642 pp |= EDP_FORCE_VDD;
1644 pp_stat_reg = _pp_stat_reg(intel_dp);
1645 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1647 I915_WRITE(pp_ctrl_reg, pp);
1648 POSTING_READ(pp_ctrl_reg);
1649 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1650 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1652 * If the panel wasn't on, delay before accessing aux channel
1654 if (!edp_have_panel_power(intel_dp)) {
1655 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1656 port_name(intel_dig_port->port));
1657 msleep(intel_dp->panel_power_up_delay);
1660 return need_to_disable;
1664 * Must be paired with intel_edp_panel_vdd_off() or
1665 * intel_edp_panel_off().
1666 * Nested calls to these functions are not allowed since
1667 * we drop the lock. Caller must use some higher level
1668 * locking to prevent nested calls from other threads.
1670 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1674 if (!is_edp(intel_dp))
1678 vdd = edp_panel_vdd_on(intel_dp);
1679 pps_unlock(intel_dp);
1681 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1682 port_name(dp_to_dig_port(intel_dp)->port));
1685 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1687 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 struct intel_digital_port *intel_dig_port =
1690 dp_to_dig_port(intel_dp);
1691 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1692 enum intel_display_power_domain power_domain;
1694 u32 pp_stat_reg, pp_ctrl_reg;
1696 lockdep_assert_held(&dev_priv->pps_mutex);
1698 WARN_ON(intel_dp->want_panel_vdd);
1700 if (!edp_have_panel_vdd(intel_dp))
1703 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1704 port_name(intel_dig_port->port));
1706 pp = ironlake_get_pp_control(intel_dp);
1707 pp &= ~EDP_FORCE_VDD;
1709 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1710 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 I915_WRITE(pp_ctrl_reg, pp);
1713 POSTING_READ(pp_ctrl_reg);
1715 /* Make sure sequencer is idle before allowing subsequent activity */
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1719 if ((pp & POWER_TARGET_ON) == 0)
1720 intel_dp->last_power_cycle = jiffies;
1722 power_domain = intel_display_port_power_domain(intel_encoder);
1723 intel_display_power_put(dev_priv, power_domain);
1726 static void edp_panel_vdd_work(struct work_struct *__work)
1728 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1729 struct intel_dp, panel_vdd_work);
1732 if (!intel_dp->want_panel_vdd)
1733 edp_panel_vdd_off_sync(intel_dp);
1734 pps_unlock(intel_dp);
1737 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1739 unsigned long delay;
1742 * Queue the timer to fire a long time from now (relative to the power
1743 * down delay) to keep the panel power up across a sequence of
1746 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1747 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1751 * Must be paired with edp_panel_vdd_on().
1752 * Must hold pps_mutex around the whole on/off sequence.
1753 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1755 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1757 struct drm_i915_private *dev_priv =
1758 intel_dp_to_dev(intel_dp)->dev_private;
1760 lockdep_assert_held(&dev_priv->pps_mutex);
1762 if (!is_edp(intel_dp))
1765 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1766 port_name(dp_to_dig_port(intel_dp)->port));
1768 intel_dp->want_panel_vdd = false;
1771 edp_panel_vdd_off_sync(intel_dp);
1773 edp_panel_vdd_schedule_off(intel_dp);
1776 static void edp_panel_on(struct intel_dp *intel_dp)
1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779 struct drm_i915_private *dev_priv = dev->dev_private;
1783 lockdep_assert_held(&dev_priv->pps_mutex);
1785 if (!is_edp(intel_dp))
1788 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1789 port_name(dp_to_dig_port(intel_dp)->port));
1791 if (WARN(edp_have_panel_power(intel_dp),
1792 "eDP port %c panel power already on\n",
1793 port_name(dp_to_dig_port(intel_dp)->port)))
1796 wait_panel_power_cycle(intel_dp);
1798 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1799 pp = ironlake_get_pp_control(intel_dp);
1801 /* ILK workaround: disable reset around power sequence */
1802 pp &= ~PANEL_POWER_RESET;
1803 I915_WRITE(pp_ctrl_reg, pp);
1804 POSTING_READ(pp_ctrl_reg);
1807 pp |= POWER_TARGET_ON;
1809 pp |= PANEL_POWER_RESET;
1811 I915_WRITE(pp_ctrl_reg, pp);
1812 POSTING_READ(pp_ctrl_reg);
1814 wait_panel_on(intel_dp);
1815 intel_dp->last_power_on = jiffies;
1818 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1819 I915_WRITE(pp_ctrl_reg, pp);
1820 POSTING_READ(pp_ctrl_reg);
1824 void intel_edp_panel_on(struct intel_dp *intel_dp)
1826 if (!is_edp(intel_dp))
1830 edp_panel_on(intel_dp);
1831 pps_unlock(intel_dp);
1835 static void edp_panel_off(struct intel_dp *intel_dp)
1837 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1838 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1839 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1840 struct drm_i915_private *dev_priv = dev->dev_private;
1841 enum intel_display_power_domain power_domain;
1845 lockdep_assert_held(&dev_priv->pps_mutex);
1847 if (!is_edp(intel_dp))
1850 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1851 port_name(dp_to_dig_port(intel_dp)->port));
1853 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1854 port_name(dp_to_dig_port(intel_dp)->port));
1856 pp = ironlake_get_pp_control(intel_dp);
1857 /* We need to switch off panel power _and_ force vdd, for otherwise some
1858 * panels get very unhappy and cease to work. */
1859 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1862 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864 intel_dp->want_panel_vdd = false;
1866 I915_WRITE(pp_ctrl_reg, pp);
1867 POSTING_READ(pp_ctrl_reg);
1869 intel_dp->last_power_cycle = jiffies;
1870 wait_panel_off(intel_dp);
1872 /* We got a reference when we enabled the VDD. */
1873 power_domain = intel_display_port_power_domain(intel_encoder);
1874 intel_display_power_put(dev_priv, power_domain);
1877 void intel_edp_panel_off(struct intel_dp *intel_dp)
1879 if (!is_edp(intel_dp))
1883 edp_panel_off(intel_dp);
1884 pps_unlock(intel_dp);
1887 /* Enable backlight in the panel power control. */
1888 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1890 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1891 struct drm_device *dev = intel_dig_port->base.base.dev;
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1897 * If we enable the backlight right away following a panel power
1898 * on, we may see slight flicker as the panel syncs with the eDP
1899 * link. So delay a bit to make sure the image is solid before
1900 * allowing it to appear.
1902 wait_backlight_on(intel_dp);
1906 pp = ironlake_get_pp_control(intel_dp);
1907 pp |= EDP_BLC_ENABLE;
1909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
1914 pps_unlock(intel_dp);
1917 /* Enable backlight PWM and backlight PP control. */
1918 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1920 if (!is_edp(intel_dp))
1923 DRM_DEBUG_KMS("\n");
1925 intel_panel_enable_backlight(intel_dp->attached_connector);
1926 _intel_edp_backlight_on(intel_dp);
1929 /* Disable backlight in the panel power control. */
1930 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1937 if (!is_edp(intel_dp))
1942 pp = ironlake_get_pp_control(intel_dp);
1943 pp &= ~EDP_BLC_ENABLE;
1945 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1947 I915_WRITE(pp_ctrl_reg, pp);
1948 POSTING_READ(pp_ctrl_reg);
1950 pps_unlock(intel_dp);
1952 intel_dp->last_backlight_off = jiffies;
1953 edp_wait_backlight_off(intel_dp);
1956 /* Disable backlight PP control and backlight PWM. */
1957 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1959 if (!is_edp(intel_dp))
1962 DRM_DEBUG_KMS("\n");
1964 _intel_edp_backlight_off(intel_dp);
1965 intel_panel_disable_backlight(intel_dp->attached_connector);
1969 * Hook for controlling the panel power control backlight through the bl_power
1970 * sysfs attribute. Take care to handle multiple calls.
1972 static void intel_edp_backlight_power(struct intel_connector *connector,
1975 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1979 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1980 pps_unlock(intel_dp);
1982 if (is_enabled == enable)
1985 DRM_DEBUG_KMS("panel power control backlight %s\n",
1986 enable ? "enable" : "disable");
1989 _intel_edp_backlight_on(intel_dp);
1991 _intel_edp_backlight_off(intel_dp);
1994 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1996 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1998 struct drm_device *dev = crtc->dev;
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2002 assert_pipe_disabled(dev_priv,
2003 to_intel_crtc(crtc)->pipe);
2005 DRM_DEBUG_KMS("\n");
2006 dpa_ctl = I915_READ(DP_A);
2007 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2008 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2010 /* We don't adjust intel_dp->DP while tearing down the link, to
2011 * facilitate link retraining (e.g. after hotplug). Hence clear all
2012 * enable bits here to ensure that we don't enable too much. */
2013 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2014 intel_dp->DP |= DP_PLL_ENABLE;
2015 I915_WRITE(DP_A, intel_dp->DP);
2020 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2022 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2023 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2024 struct drm_device *dev = crtc->dev;
2025 struct drm_i915_private *dev_priv = dev->dev_private;
2028 assert_pipe_disabled(dev_priv,
2029 to_intel_crtc(crtc)->pipe);
2031 dpa_ctl = I915_READ(DP_A);
2032 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2033 "dp pll off, should be on\n");
2034 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2036 /* We can't rely on the value tracked for the DP register in
2037 * intel_dp->DP because link_down must not change that (otherwise link
2038 * re-training will fail. */
2039 dpa_ctl &= ~DP_PLL_ENABLE;
2040 I915_WRITE(DP_A, dpa_ctl);
2045 /* If the sink supports it, try to set the power state appropriately */
2046 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2050 /* Should have a valid DPCD by this point */
2051 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2054 if (mode != DRM_MODE_DPMS_ON) {
2055 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2059 * When turning on, we need to retry for 1ms to give the sink
2062 for (i = 0; i < 3; i++) {
2063 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2072 DRM_DEBUG_KMS("failed to %s sink power state\n",
2073 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2076 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2079 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2080 enum port port = dp_to_dig_port(intel_dp)->port;
2081 struct drm_device *dev = encoder->base.dev;
2082 struct drm_i915_private *dev_priv = dev->dev_private;
2083 enum intel_display_power_domain power_domain;
2086 power_domain = intel_display_port_power_domain(encoder);
2087 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2090 tmp = I915_READ(intel_dp->output_reg);
2092 if (!(tmp & DP_PORT_EN))
2095 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2096 *pipe = PORT_TO_PIPE_CPT(tmp);
2097 } else if (IS_CHERRYVIEW(dev)) {
2098 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2099 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2100 *pipe = PORT_TO_PIPE(tmp);
2106 switch (intel_dp->output_reg) {
2108 trans_sel = TRANS_DP_PORT_SEL_B;
2111 trans_sel = TRANS_DP_PORT_SEL_C;
2114 trans_sel = TRANS_DP_PORT_SEL_D;
2120 for_each_pipe(dev_priv, i) {
2121 trans_dp = I915_READ(TRANS_DP_CTL(i));
2122 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2128 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2129 intel_dp->output_reg);
2135 static void intel_dp_get_config(struct intel_encoder *encoder,
2136 struct intel_crtc_state *pipe_config)
2138 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2140 struct drm_device *dev = encoder->base.dev;
2141 struct drm_i915_private *dev_priv = dev->dev_private;
2142 enum port port = dp_to_dig_port(intel_dp)->port;
2143 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2146 tmp = I915_READ(intel_dp->output_reg);
2147 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2148 pipe_config->has_audio = true;
2150 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2151 if (tmp & DP_SYNC_HS_HIGH)
2152 flags |= DRM_MODE_FLAG_PHSYNC;
2154 flags |= DRM_MODE_FLAG_NHSYNC;
2156 if (tmp & DP_SYNC_VS_HIGH)
2157 flags |= DRM_MODE_FLAG_PVSYNC;
2159 flags |= DRM_MODE_FLAG_NVSYNC;
2161 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2162 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2163 flags |= DRM_MODE_FLAG_PHSYNC;
2165 flags |= DRM_MODE_FLAG_NHSYNC;
2167 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2168 flags |= DRM_MODE_FLAG_PVSYNC;
2170 flags |= DRM_MODE_FLAG_NVSYNC;
2173 pipe_config->base.adjusted_mode.flags |= flags;
2175 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2176 tmp & DP_COLOR_RANGE_16_235)
2177 pipe_config->limited_color_range = true;
2179 pipe_config->has_dp_encoder = true;
2181 intel_dp_get_m_n(crtc, pipe_config);
2183 if (port == PORT_A) {
2184 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2185 pipe_config->port_clock = 162000;
2187 pipe_config->port_clock = 270000;
2190 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2191 &pipe_config->dp_m_n);
2193 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2194 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2196 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2198 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2199 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2201 * This is a big fat ugly hack.
2203 * Some machines in UEFI boot mode provide us a VBT that has 18
2204 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2205 * unknown we fail to light up. Yet the same BIOS boots up with
2206 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2207 * max, not what it tells us to use.
2209 * Note: This will still be broken if the eDP panel is not lit
2210 * up by the BIOS, and thus we can't get the mode at module
2213 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2214 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2215 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2219 static void intel_disable_dp(struct intel_encoder *encoder)
2221 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2222 struct drm_device *dev = encoder->base.dev;
2223 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2225 if (crtc->config->has_audio)
2226 intel_audio_codec_disable(encoder);
2228 if (HAS_PSR(dev) && !HAS_DDI(dev))
2229 intel_psr_disable(intel_dp);
2231 /* Make sure the panel is off before trying to change the mode. But also
2232 * ensure that we have vdd while we switch off the panel. */
2233 intel_edp_panel_vdd_on(intel_dp);
2234 intel_edp_backlight_off(intel_dp);
2235 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2236 intel_edp_panel_off(intel_dp);
2238 /* disable the port before the pipe on g4x */
2239 if (INTEL_INFO(dev)->gen < 5)
2240 intel_dp_link_down(intel_dp);
2243 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2245 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2246 enum port port = dp_to_dig_port(intel_dp)->port;
2248 intel_dp_link_down(intel_dp);
2250 ironlake_edp_pll_off(intel_dp);
2253 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2255 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2257 intel_dp_link_down(intel_dp);
2260 static void chv_post_disable_dp(struct intel_encoder *encoder)
2262 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2263 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2264 struct drm_device *dev = encoder->base.dev;
2265 struct drm_i915_private *dev_priv = dev->dev_private;
2266 struct intel_crtc *intel_crtc =
2267 to_intel_crtc(encoder->base.crtc);
2268 enum dpio_channel ch = vlv_dport_to_channel(dport);
2269 enum pipe pipe = intel_crtc->pipe;
2272 intel_dp_link_down(intel_dp);
2274 mutex_lock(&dev_priv->dpio_lock);
2276 /* Propagate soft reset to data lane reset */
2277 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2278 val |= CHV_PCS_REQ_SOFTRESET_EN;
2279 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2281 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2282 val |= CHV_PCS_REQ_SOFTRESET_EN;
2283 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2285 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2286 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2287 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2289 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2290 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2291 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2293 mutex_unlock(&dev_priv->dpio_lock);
2297 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2299 uint8_t dp_train_pat)
2301 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2302 struct drm_device *dev = intel_dig_port->base.base.dev;
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2304 enum port port = intel_dig_port->port;
2307 uint32_t temp = I915_READ(DP_TP_CTL(port));
2309 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2310 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2312 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2314 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2315 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2316 case DP_TRAINING_PATTERN_DISABLE:
2317 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2320 case DP_TRAINING_PATTERN_1:
2321 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2323 case DP_TRAINING_PATTERN_2:
2324 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2326 case DP_TRAINING_PATTERN_3:
2327 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2330 I915_WRITE(DP_TP_CTL(port), temp);
2332 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2333 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2335 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2336 case DP_TRAINING_PATTERN_DISABLE:
2337 *DP |= DP_LINK_TRAIN_OFF_CPT;
2339 case DP_TRAINING_PATTERN_1:
2340 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2342 case DP_TRAINING_PATTERN_2:
2343 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2345 case DP_TRAINING_PATTERN_3:
2346 DRM_ERROR("DP training pattern 3 not supported\n");
2347 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2352 if (IS_CHERRYVIEW(dev))
2353 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2355 *DP &= ~DP_LINK_TRAIN_MASK;
2357 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2358 case DP_TRAINING_PATTERN_DISABLE:
2359 *DP |= DP_LINK_TRAIN_OFF;
2361 case DP_TRAINING_PATTERN_1:
2362 *DP |= DP_LINK_TRAIN_PAT_1;
2364 case DP_TRAINING_PATTERN_2:
2365 *DP |= DP_LINK_TRAIN_PAT_2;
2367 case DP_TRAINING_PATTERN_3:
2368 if (IS_CHERRYVIEW(dev)) {
2369 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2371 DRM_ERROR("DP training pattern 3 not supported\n");
2372 *DP |= DP_LINK_TRAIN_PAT_2;
2379 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2381 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2382 struct drm_i915_private *dev_priv = dev->dev_private;
2384 /* enable with pattern 1 (as per spec) */
2385 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2386 DP_TRAINING_PATTERN_1);
2388 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2389 POSTING_READ(intel_dp->output_reg);
2392 * Magic for VLV/CHV. We _must_ first set up the register
2393 * without actually enabling the port, and then do another
2394 * write to enable the port. Otherwise link training will
2395 * fail when the power sequencer is freshly used for this port.
2397 intel_dp->DP |= DP_PORT_EN;
2399 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2400 POSTING_READ(intel_dp->output_reg);
2403 static void intel_enable_dp(struct intel_encoder *encoder)
2405 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406 struct drm_device *dev = encoder->base.dev;
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2409 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2411 if (WARN_ON(dp_reg & DP_PORT_EN))
2416 if (IS_VALLEYVIEW(dev))
2417 vlv_init_panel_power_sequencer(intel_dp);
2419 intel_dp_enable_port(intel_dp);
2421 edp_panel_vdd_on(intel_dp);
2422 edp_panel_on(intel_dp);
2423 edp_panel_vdd_off(intel_dp, true);
2425 pps_unlock(intel_dp);
2427 if (IS_VALLEYVIEW(dev))
2428 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2430 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2431 intel_dp_start_link_train(intel_dp);
2432 intel_dp_complete_link_train(intel_dp);
2433 intel_dp_stop_link_train(intel_dp);
2435 if (crtc->config->has_audio) {
2436 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2437 pipe_name(crtc->pipe));
2438 intel_audio_codec_enable(encoder);
2442 static void g4x_enable_dp(struct intel_encoder *encoder)
2444 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2446 intel_enable_dp(encoder);
2447 intel_edp_backlight_on(intel_dp);
2450 static void vlv_enable_dp(struct intel_encoder *encoder)
2452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454 intel_edp_backlight_on(intel_dp);
2455 intel_psr_enable(intel_dp);
2458 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2460 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2461 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2463 intel_dp_prepare(encoder);
2465 /* Only ilk+ has port A */
2466 if (dport->port == PORT_A) {
2467 ironlake_set_pll_cpu_edp(intel_dp);
2468 ironlake_edp_pll_on(intel_dp);
2472 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2474 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2475 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2476 enum pipe pipe = intel_dp->pps_pipe;
2477 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2479 edp_panel_vdd_off_sync(intel_dp);
2482 * VLV seems to get confused when multiple power seqeuencers
2483 * have the same port selected (even if only one has power/vdd
2484 * enabled). The failure manifests as vlv_wait_port_ready() failing
2485 * CHV on the other hand doesn't seem to mind having the same port
2486 * selected in multiple power seqeuencers, but let's clear the
2487 * port select always when logically disconnecting a power sequencer
2490 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2491 pipe_name(pipe), port_name(intel_dig_port->port));
2492 I915_WRITE(pp_on_reg, 0);
2493 POSTING_READ(pp_on_reg);
2495 intel_dp->pps_pipe = INVALID_PIPE;
2498 static void vlv_steal_power_sequencer(struct drm_device *dev,
2501 struct drm_i915_private *dev_priv = dev->dev_private;
2502 struct intel_encoder *encoder;
2504 lockdep_assert_held(&dev_priv->pps_mutex);
2506 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2509 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2511 struct intel_dp *intel_dp;
2514 if (encoder->type != INTEL_OUTPUT_EDP)
2517 intel_dp = enc_to_intel_dp(&encoder->base);
2518 port = dp_to_dig_port(intel_dp)->port;
2520 if (intel_dp->pps_pipe != pipe)
2523 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2524 pipe_name(pipe), port_name(port));
2526 WARN(encoder->connectors_active,
2527 "stealing pipe %c power sequencer from active eDP port %c\n",
2528 pipe_name(pipe), port_name(port));
2530 /* make sure vdd is off before we steal it */
2531 vlv_detach_power_sequencer(intel_dp);
2535 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2537 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2538 struct intel_encoder *encoder = &intel_dig_port->base;
2539 struct drm_device *dev = encoder->base.dev;
2540 struct drm_i915_private *dev_priv = dev->dev_private;
2541 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2543 lockdep_assert_held(&dev_priv->pps_mutex);
2545 if (!is_edp(intel_dp))
2548 if (intel_dp->pps_pipe == crtc->pipe)
2552 * If another power sequencer was being used on this
2553 * port previously make sure to turn off vdd there while
2554 * we still have control of it.
2556 if (intel_dp->pps_pipe != INVALID_PIPE)
2557 vlv_detach_power_sequencer(intel_dp);
2560 * We may be stealing the power
2561 * sequencer from another port.
2563 vlv_steal_power_sequencer(dev, crtc->pipe);
2565 /* now it's all ours */
2566 intel_dp->pps_pipe = crtc->pipe;
2568 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2569 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2571 /* init power sequencer on this pipe and port */
2572 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2573 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2576 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2578 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2579 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2580 struct drm_device *dev = encoder->base.dev;
2581 struct drm_i915_private *dev_priv = dev->dev_private;
2582 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2583 enum dpio_channel port = vlv_dport_to_channel(dport);
2584 int pipe = intel_crtc->pipe;
2587 mutex_lock(&dev_priv->dpio_lock);
2589 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2596 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2597 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2598 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2600 mutex_unlock(&dev_priv->dpio_lock);
2602 intel_enable_dp(encoder);
2605 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2607 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2608 struct drm_device *dev = encoder->base.dev;
2609 struct drm_i915_private *dev_priv = dev->dev_private;
2610 struct intel_crtc *intel_crtc =
2611 to_intel_crtc(encoder->base.crtc);
2612 enum dpio_channel port = vlv_dport_to_channel(dport);
2613 int pipe = intel_crtc->pipe;
2615 intel_dp_prepare(encoder);
2617 /* Program Tx lane resets to default */
2618 mutex_lock(&dev_priv->dpio_lock);
2619 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2620 DPIO_PCS_TX_LANE2_RESET |
2621 DPIO_PCS_TX_LANE1_RESET);
2622 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2623 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2624 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2625 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2626 DPIO_PCS_CLK_SOFT_RESET);
2628 /* Fix up inter-pair skew failure */
2629 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2630 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2631 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2632 mutex_unlock(&dev_priv->dpio_lock);
2635 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2637 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2638 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2639 struct drm_device *dev = encoder->base.dev;
2640 struct drm_i915_private *dev_priv = dev->dev_private;
2641 struct intel_crtc *intel_crtc =
2642 to_intel_crtc(encoder->base.crtc);
2643 enum dpio_channel ch = vlv_dport_to_channel(dport);
2644 int pipe = intel_crtc->pipe;
2648 mutex_lock(&dev_priv->dpio_lock);
2650 /* allow hardware to manage TX FIFO reset source */
2651 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2652 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2653 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2655 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2656 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2657 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2659 /* Deassert soft data lane reset*/
2660 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2661 val |= CHV_PCS_REQ_SOFTRESET_EN;
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2664 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2665 val |= CHV_PCS_REQ_SOFTRESET_EN;
2666 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2668 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2669 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2670 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2672 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2673 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2674 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2676 /* Program Tx lane latency optimal setting*/
2677 for (i = 0; i < 4; i++) {
2678 /* Set the latency optimal bit */
2679 data = (i == 1) ? 0x0 : 0x6;
2680 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2681 data << DPIO_FRC_LATENCY_SHFIT);
2683 /* Set the upar bit */
2684 data = (i == 1) ? 0x0 : 0x1;
2685 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2686 data << DPIO_UPAR_SHIFT);
2689 /* Data lane stagger programming */
2690 /* FIXME: Fix up value only after power analysis */
2692 mutex_unlock(&dev_priv->dpio_lock);
2694 intel_enable_dp(encoder);
2697 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2699 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
2702 struct intel_crtc *intel_crtc =
2703 to_intel_crtc(encoder->base.crtc);
2704 enum dpio_channel ch = vlv_dport_to_channel(dport);
2705 enum pipe pipe = intel_crtc->pipe;
2708 intel_dp_prepare(encoder);
2710 mutex_lock(&dev_priv->dpio_lock);
2712 /* program left/right clock distribution */
2713 if (pipe != PIPE_B) {
2714 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2715 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2717 val |= CHV_BUFLEFTENA1_FORCE;
2719 val |= CHV_BUFRIGHTENA1_FORCE;
2720 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2722 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2723 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2725 val |= CHV_BUFLEFTENA2_FORCE;
2727 val |= CHV_BUFRIGHTENA2_FORCE;
2728 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2731 /* program clock channel usage */
2732 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2733 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2735 val &= ~CHV_PCS_USEDCLKCHANNEL;
2737 val |= CHV_PCS_USEDCLKCHANNEL;
2738 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2740 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2741 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2743 val &= ~CHV_PCS_USEDCLKCHANNEL;
2745 val |= CHV_PCS_USEDCLKCHANNEL;
2746 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2749 * This a a bit weird since generally CL
2750 * matches the pipe, but here we need to
2751 * pick the CL based on the port.
2753 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2755 val &= ~CHV_CMN_USEDCLKCHANNEL;
2757 val |= CHV_CMN_USEDCLKCHANNEL;
2758 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2760 mutex_unlock(&dev_priv->dpio_lock);
2764 * Native read with retry for link status and receiver capability reads for
2765 * cases where the sink may still be asleep.
2767 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2768 * supposed to retry 3 times per the spec.
2771 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2772 void *buffer, size_t size)
2778 * Sometime we just get the same incorrect byte repeated
2779 * over the entire buffer. Doing just one throw away read
2780 * initially seems to "solve" it.
2782 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2784 for (i = 0; i < 3; i++) {
2785 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2795 * Fetch AUX CH registers 0x202 - 0x207 which contain
2796 * link status information
2799 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2801 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2804 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2807 /* These are source-specific values. */
2809 intel_dp_voltage_max(struct intel_dp *intel_dp)
2811 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2812 struct drm_i915_private *dev_priv = dev->dev_private;
2813 enum port port = dp_to_dig_port(intel_dp)->port;
2815 if (INTEL_INFO(dev)->gen >= 9) {
2816 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2817 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2818 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2819 } else if (IS_VALLEYVIEW(dev))
2820 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2821 else if (IS_GEN7(dev) && port == PORT_A)
2822 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2823 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2824 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2826 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2830 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2832 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2833 enum port port = dp_to_dig_port(intel_dp)->port;
2835 if (INTEL_INFO(dev)->gen >= 9) {
2836 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2837 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2838 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2839 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2840 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2841 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2842 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2843 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2844 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2846 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2848 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2849 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2851 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2853 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2855 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2858 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2860 } else if (IS_VALLEYVIEW(dev)) {
2861 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2863 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2865 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2866 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2867 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2868 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2870 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2872 } else if (IS_GEN7(dev) && port == PORT_A) {
2873 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2874 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2875 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2876 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2878 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2880 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2883 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2884 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2885 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2886 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2887 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2888 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2889 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2890 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2892 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2897 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2899 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2900 struct drm_i915_private *dev_priv = dev->dev_private;
2901 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2902 struct intel_crtc *intel_crtc =
2903 to_intel_crtc(dport->base.base.crtc);
2904 unsigned long demph_reg_value, preemph_reg_value,
2905 uniqtranscale_reg_value;
2906 uint8_t train_set = intel_dp->train_set[0];
2907 enum dpio_channel port = vlv_dport_to_channel(dport);
2908 int pipe = intel_crtc->pipe;
2910 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2911 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2912 preemph_reg_value = 0x0004000;
2913 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2915 demph_reg_value = 0x2B405555;
2916 uniqtranscale_reg_value = 0x552AB83A;
2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2919 demph_reg_value = 0x2B404040;
2920 uniqtranscale_reg_value = 0x5548B83A;
2922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2923 demph_reg_value = 0x2B245555;
2924 uniqtranscale_reg_value = 0x5560B83A;
2926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2927 demph_reg_value = 0x2B405555;
2928 uniqtranscale_reg_value = 0x5598DA3A;
2934 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2935 preemph_reg_value = 0x0002000;
2936 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2938 demph_reg_value = 0x2B404040;
2939 uniqtranscale_reg_value = 0x5552B83A;
2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2942 demph_reg_value = 0x2B404848;
2943 uniqtranscale_reg_value = 0x5580B83A;
2945 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2946 demph_reg_value = 0x2B404040;
2947 uniqtranscale_reg_value = 0x55ADDA3A;
2953 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2954 preemph_reg_value = 0x0000000;
2955 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2957 demph_reg_value = 0x2B305555;
2958 uniqtranscale_reg_value = 0x5570B83A;
2960 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2961 demph_reg_value = 0x2B2B4040;
2962 uniqtranscale_reg_value = 0x55ADDA3A;
2968 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2969 preemph_reg_value = 0x0006000;
2970 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2971 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2972 demph_reg_value = 0x1B405555;
2973 uniqtranscale_reg_value = 0x55ADDA3A;
2983 mutex_lock(&dev_priv->dpio_lock);
2984 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2985 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2986 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2987 uniqtranscale_reg_value);
2988 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2989 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2991 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2992 mutex_unlock(&dev_priv->dpio_lock);
2997 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3000 struct drm_i915_private *dev_priv = dev->dev_private;
3001 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3002 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3003 u32 deemph_reg_value, margin_reg_value, val;
3004 uint8_t train_set = intel_dp->train_set[0];
3005 enum dpio_channel ch = vlv_dport_to_channel(dport);
3006 enum pipe pipe = intel_crtc->pipe;
3009 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3010 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3011 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3013 deemph_reg_value = 128;
3014 margin_reg_value = 52;
3016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3017 deemph_reg_value = 128;
3018 margin_reg_value = 77;
3020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3021 deemph_reg_value = 128;
3022 margin_reg_value = 102;
3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3025 deemph_reg_value = 128;
3026 margin_reg_value = 154;
3027 /* FIXME extra to set for 1200 */
3033 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3034 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036 deemph_reg_value = 85;
3037 margin_reg_value = 78;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3040 deemph_reg_value = 85;
3041 margin_reg_value = 116;
3043 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3044 deemph_reg_value = 85;
3045 margin_reg_value = 154;
3051 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3052 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3053 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3054 deemph_reg_value = 64;
3055 margin_reg_value = 104;
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3058 deemph_reg_value = 64;
3059 margin_reg_value = 154;
3065 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3066 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3067 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3068 deemph_reg_value = 43;
3069 margin_reg_value = 154;
3079 mutex_lock(&dev_priv->dpio_lock);
3081 /* Clear calc init */
3082 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3083 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3084 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3085 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3086 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3088 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3089 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3090 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3091 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3092 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3094 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3095 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3096 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3097 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3099 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3100 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3101 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3102 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3104 /* Program swing deemph */
3105 for (i = 0; i < 4; i++) {
3106 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3107 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3108 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3109 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3112 /* Program swing margin */
3113 for (i = 0; i < 4; i++) {
3114 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3115 val &= ~DPIO_SWING_MARGIN000_MASK;
3116 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3117 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3120 /* Disable unique transition scale */
3121 for (i = 0; i < 4; i++) {
3122 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3123 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3124 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3127 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3128 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3129 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3130 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3133 * The document said it needs to set bit 27 for ch0 and bit 26
3134 * for ch1. Might be a typo in the doc.
3135 * For now, for this unique transition scale selection, set bit
3136 * 27 for ch0 and ch1.
3138 for (i = 0; i < 4; i++) {
3139 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3140 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3141 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3144 for (i = 0; i < 4; i++) {
3145 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3146 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3147 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3148 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3152 /* Start swing calculation */
3153 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3154 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3155 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3157 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3158 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3162 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3163 val |= DPIO_LRC_BYPASS;
3164 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3166 mutex_unlock(&dev_priv->dpio_lock);
3172 intel_get_adjust_train(struct intel_dp *intel_dp,
3173 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3178 uint8_t voltage_max;
3179 uint8_t preemph_max;
3181 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3182 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3183 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3191 voltage_max = intel_dp_voltage_max(intel_dp);
3192 if (v >= voltage_max)
3193 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3195 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3196 if (p >= preemph_max)
3197 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3199 for (lane = 0; lane < 4; lane++)
3200 intel_dp->train_set[lane] = v | p;
3204 intel_gen4_signal_levels(uint8_t train_set)
3206 uint32_t signal_levels = 0;
3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3211 signal_levels |= DP_VOLTAGE_0_4;
3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3214 signal_levels |= DP_VOLTAGE_0_6;
3216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3217 signal_levels |= DP_VOLTAGE_0_8;
3219 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3220 signal_levels |= DP_VOLTAGE_1_2;
3223 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3224 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3226 signal_levels |= DP_PRE_EMPHASIS_0;
3228 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3229 signal_levels |= DP_PRE_EMPHASIS_3_5;
3231 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3232 signal_levels |= DP_PRE_EMPHASIS_6;
3234 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3235 signal_levels |= DP_PRE_EMPHASIS_9_5;
3238 return signal_levels;
3241 /* Gen6's DP voltage swing and pre-emphasis control */
3243 intel_gen6_edp_signal_levels(uint8_t train_set)
3245 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3246 DP_TRAIN_PRE_EMPHASIS_MASK);
3247 switch (signal_levels) {
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3250 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3252 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3255 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3258 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3261 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3263 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3264 "0x%x\n", signal_levels);
3265 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3269 /* Gen7's DP voltage swing and pre-emphasis control */
3271 intel_gen7_edp_signal_levels(uint8_t train_set)
3273 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3274 DP_TRAIN_PRE_EMPHASIS_MASK);
3275 switch (signal_levels) {
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3277 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3279 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3281 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3284 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3286 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3289 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3291 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3294 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3295 "0x%x\n", signal_levels);
3296 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3300 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3302 intel_hsw_signal_levels(uint8_t train_set)
3304 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3305 DP_TRAIN_PRE_EMPHASIS_MASK);
3306 switch (signal_levels) {
3307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3308 return DDI_BUF_TRANS_SELECT(0);
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3310 return DDI_BUF_TRANS_SELECT(1);
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3312 return DDI_BUF_TRANS_SELECT(2);
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3314 return DDI_BUF_TRANS_SELECT(3);
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3317 return DDI_BUF_TRANS_SELECT(4);
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 return DDI_BUF_TRANS_SELECT(5);
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321 return DDI_BUF_TRANS_SELECT(6);
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3324 return DDI_BUF_TRANS_SELECT(7);
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3326 return DDI_BUF_TRANS_SELECT(8);
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3329 return DDI_BUF_TRANS_SELECT(9);
3331 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3332 "0x%x\n", signal_levels);
3333 return DDI_BUF_TRANS_SELECT(0);
3337 /* Properly updates "DP" with the correct signal levels. */
3339 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3341 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3342 enum port port = intel_dig_port->port;
3343 struct drm_device *dev = intel_dig_port->base.base.dev;
3344 uint32_t signal_levels, mask;
3345 uint8_t train_set = intel_dp->train_set[0];
3347 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3348 signal_levels = intel_hsw_signal_levels(train_set);
3349 mask = DDI_BUF_EMP_MASK;
3350 } else if (IS_CHERRYVIEW(dev)) {
3351 signal_levels = intel_chv_signal_levels(intel_dp);
3353 } else if (IS_VALLEYVIEW(dev)) {
3354 signal_levels = intel_vlv_signal_levels(intel_dp);
3356 } else if (IS_GEN7(dev) && port == PORT_A) {
3357 signal_levels = intel_gen7_edp_signal_levels(train_set);
3358 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3359 } else if (IS_GEN6(dev) && port == PORT_A) {
3360 signal_levels = intel_gen6_edp_signal_levels(train_set);
3361 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3363 signal_levels = intel_gen4_signal_levels(train_set);
3364 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3367 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3369 *DP = (*DP & ~mask) | signal_levels;
3373 intel_dp_set_link_train(struct intel_dp *intel_dp,
3375 uint8_t dp_train_pat)
3377 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3378 struct drm_device *dev = intel_dig_port->base.base.dev;
3379 struct drm_i915_private *dev_priv = dev->dev_private;
3380 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3383 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3385 I915_WRITE(intel_dp->output_reg, *DP);
3386 POSTING_READ(intel_dp->output_reg);
3388 buf[0] = dp_train_pat;
3389 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3390 DP_TRAINING_PATTERN_DISABLE) {
3391 /* don't write DP_TRAINING_LANEx_SET on disable */
3394 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3395 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3396 len = intel_dp->lane_count + 1;
3399 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3406 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3407 uint8_t dp_train_pat)
3409 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3410 intel_dp_set_signal_levels(intel_dp, DP);
3411 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3415 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3416 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3418 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3419 struct drm_device *dev = intel_dig_port->base.base.dev;
3420 struct drm_i915_private *dev_priv = dev->dev_private;
3423 intel_get_adjust_train(intel_dp, link_status);
3424 intel_dp_set_signal_levels(intel_dp, DP);
3426 I915_WRITE(intel_dp->output_reg, *DP);
3427 POSTING_READ(intel_dp->output_reg);
3429 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3430 intel_dp->train_set, intel_dp->lane_count);
3432 return ret == intel_dp->lane_count;
3435 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3437 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3438 struct drm_device *dev = intel_dig_port->base.base.dev;
3439 struct drm_i915_private *dev_priv = dev->dev_private;
3440 enum port port = intel_dig_port->port;
3446 val = I915_READ(DP_TP_CTL(port));
3447 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3448 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3449 I915_WRITE(DP_TP_CTL(port), val);
3452 * On PORT_A we can have only eDP in SST mode. There the only reason
3453 * we need to set idle transmission mode is to work around a HW issue
3454 * where we enable the pipe while not in idle link-training mode.
3455 * In this case there is requirement to wait for a minimum number of
3456 * idle patterns to be sent.
3461 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3463 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3466 /* Enable corresponding port and start training pattern 1 */
3468 intel_dp_start_link_train(struct intel_dp *intel_dp)
3470 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3471 struct drm_device *dev = encoder->dev;
3474 int voltage_tries, loop_tries;
3475 uint32_t DP = intel_dp->DP;
3476 uint8_t link_config[2];
3479 intel_ddi_prepare_link_retrain(encoder);
3481 /* Write the link configuration data */
3482 link_config[0] = intel_dp->link_bw;
3483 link_config[1] = intel_dp->lane_count;
3484 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3485 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3486 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3487 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3488 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3489 &intel_dp->rate_select, 1);
3492 link_config[1] = DP_SET_ANSI_8B10B;
3493 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3497 /* clock recovery */
3498 if (!intel_dp_reset_link_train(intel_dp, &DP,
3499 DP_TRAINING_PATTERN_1 |
3500 DP_LINK_SCRAMBLING_DISABLE)) {
3501 DRM_ERROR("failed to enable link training\n");
3509 uint8_t link_status[DP_LINK_STATUS_SIZE];
3511 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3512 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3513 DRM_ERROR("failed to get link status\n");
3517 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3518 DRM_DEBUG_KMS("clock recovery OK\n");
3522 /* Check to see if we've tried the max voltage */
3523 for (i = 0; i < intel_dp->lane_count; i++)
3524 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3526 if (i == intel_dp->lane_count) {
3528 if (loop_tries == 5) {
3529 DRM_ERROR("too many full retries, give up\n");
3532 intel_dp_reset_link_train(intel_dp, &DP,
3533 DP_TRAINING_PATTERN_1 |
3534 DP_LINK_SCRAMBLING_DISABLE);
3539 /* Check to see if we've tried the same voltage 5 times */
3540 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3542 if (voltage_tries == 5) {
3543 DRM_ERROR("too many voltage retries, give up\n");
3548 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3550 /* Update training set as requested by target */
3551 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3552 DRM_ERROR("failed to update link training\n");
3561 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3563 bool channel_eq = false;
3564 int tries, cr_tries;
3565 uint32_t DP = intel_dp->DP;
3566 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3568 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3569 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3570 training_pattern = DP_TRAINING_PATTERN_3;
3572 /* channel equalization */
3573 if (!intel_dp_set_link_train(intel_dp, &DP,
3575 DP_LINK_SCRAMBLING_DISABLE)) {
3576 DRM_ERROR("failed to start channel equalization\n");
3584 uint8_t link_status[DP_LINK_STATUS_SIZE];
3587 DRM_ERROR("failed to train DP, aborting\n");
3591 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3592 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3593 DRM_ERROR("failed to get link status\n");
3597 /* Make sure clock is still ok */
3598 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3599 intel_dp_start_link_train(intel_dp);
3600 intel_dp_set_link_train(intel_dp, &DP,
3602 DP_LINK_SCRAMBLING_DISABLE);
3607 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3612 /* Try 5 times, then try clock recovery if that fails */
3614 intel_dp_start_link_train(intel_dp);
3615 intel_dp_set_link_train(intel_dp, &DP,
3617 DP_LINK_SCRAMBLING_DISABLE);
3623 /* Update training set as requested by target */
3624 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3625 DRM_ERROR("failed to update link training\n");
3631 intel_dp_set_idle_link_train(intel_dp);
3636 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3640 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3642 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3643 DP_TRAINING_PATTERN_DISABLE);
3647 intel_dp_link_down(struct intel_dp *intel_dp)
3649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3650 enum port port = intel_dig_port->port;
3651 struct drm_device *dev = intel_dig_port->base.base.dev;
3652 struct drm_i915_private *dev_priv = dev->dev_private;
3653 uint32_t DP = intel_dp->DP;
3655 if (WARN_ON(HAS_DDI(dev)))
3658 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3661 DRM_DEBUG_KMS("\n");
3663 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3664 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3665 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3667 if (IS_CHERRYVIEW(dev))
3668 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3670 DP &= ~DP_LINK_TRAIN_MASK;
3671 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3673 POSTING_READ(intel_dp->output_reg);
3675 if (HAS_PCH_IBX(dev) &&
3676 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3677 /* Hardware workaround: leaving our transcoder select
3678 * set to transcoder B while it's off will prevent the
3679 * corresponding HDMI output on transcoder A.
3681 * Combine this with another hardware workaround:
3682 * transcoder select bit can only be cleared while the
3685 DP &= ~DP_PIPEB_SELECT;
3686 I915_WRITE(intel_dp->output_reg, DP);
3687 POSTING_READ(intel_dp->output_reg);
3690 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3691 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3692 POSTING_READ(intel_dp->output_reg);
3693 msleep(intel_dp->panel_power_down_delay);
3697 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3699 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3700 struct drm_device *dev = dig_port->base.base.dev;
3701 struct drm_i915_private *dev_priv = dev->dev_private;
3704 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3705 sizeof(intel_dp->dpcd)) < 0)
3706 return false; /* aux transfer failed */
3708 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3710 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3711 return false; /* DPCD not present */
3713 /* Check if the panel supports PSR */
3714 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3715 if (is_edp(intel_dp)) {
3716 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3718 sizeof(intel_dp->psr_dpcd));
3719 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3720 dev_priv->psr.sink_support = true;
3721 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3725 /* Training Pattern 3 support, both source and sink */
3726 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3727 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3728 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3729 intel_dp->use_tps3 = true;
3730 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3732 intel_dp->use_tps3 = false;
3734 /* Intermediate frequency support */
3735 if (is_edp(intel_dp) &&
3736 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3737 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3738 (rev >= 0x03)) { /* eDp v1.4 or higher */
3739 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3742 intel_dp_dpcd_read_wake(&intel_dp->aux,
3743 DP_SUPPORTED_LINK_RATES,
3745 sizeof(supported_rates));
3747 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3748 int val = le16_to_cpu(supported_rates[i]);
3753 intel_dp->supported_rates[i] = val * 200;
3755 intel_dp->num_supported_rates = i;
3757 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3758 DP_DWN_STRM_PORT_PRESENT))
3759 return true; /* native DP sink */
3761 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3762 return true; /* no per-port downstream info */
3764 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3765 intel_dp->downstream_ports,
3766 DP_MAX_DOWNSTREAM_PORTS) < 0)
3767 return false; /* downstream port status fetch failed */
3773 intel_dp_probe_oui(struct intel_dp *intel_dp)
3777 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3780 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3781 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3782 buf[0], buf[1], buf[2]);
3784 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3785 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3786 buf[0], buf[1], buf[2]);
3790 intel_dp_probe_mst(struct intel_dp *intel_dp)
3794 if (!intel_dp->can_mst)
3797 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3800 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3801 if (buf[0] & DP_MST_CAP) {
3802 DRM_DEBUG_KMS("Sink is MST capable\n");
3803 intel_dp->is_mst = true;
3805 DRM_DEBUG_KMS("Sink is not MST capable\n");
3806 intel_dp->is_mst = false;
3810 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3811 return intel_dp->is_mst;
3814 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3816 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3817 struct drm_device *dev = intel_dig_port->base.base.dev;
3818 struct intel_crtc *intel_crtc =
3819 to_intel_crtc(intel_dig_port->base.base.crtc);
3824 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3827 if (!(buf & DP_TEST_CRC_SUPPORTED))
3830 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3833 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3834 buf | DP_TEST_SINK_START) < 0)
3837 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3839 test_crc_count = buf & DP_TEST_COUNT_MASK;
3842 if (drm_dp_dpcd_readb(&intel_dp->aux,
3843 DP_TEST_SINK_MISC, &buf) < 0)
3845 intel_wait_for_vblank(dev, intel_crtc->pipe);
3846 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3848 if (attempts == 0) {
3849 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3853 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3856 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3858 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3859 buf & ~DP_TEST_SINK_START) < 0)
3866 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3868 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3869 DP_DEVICE_SERVICE_IRQ_VECTOR,
3870 sink_irq_vector, 1) == 1;
3874 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3878 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3880 sink_irq_vector, 14);
3888 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3890 /* NAK by default */
3891 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3895 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3899 if (intel_dp->is_mst) {
3904 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3908 /* check link status - esi[10] = 0x200c */
3909 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3910 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3911 intel_dp_start_link_train(intel_dp);
3912 intel_dp_complete_link_train(intel_dp);
3913 intel_dp_stop_link_train(intel_dp);
3916 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3917 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3920 for (retry = 0; retry < 3; retry++) {
3922 wret = drm_dp_dpcd_write(&intel_dp->aux,
3923 DP_SINK_COUNT_ESI+1,
3930 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3932 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3940 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3941 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3942 intel_dp->is_mst = false;
3943 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3944 /* send a hotplug event */
3945 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3952 * According to DP spec
3955 * 2. Configure link according to Receiver Capabilities
3956 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3957 * 4. Check link status on receipt of hot-plug interrupt
3960 intel_dp_check_link_status(struct intel_dp *intel_dp)
3962 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3963 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3965 u8 link_status[DP_LINK_STATUS_SIZE];
3967 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3969 if (!intel_encoder->connectors_active)
3972 if (WARN_ON(!intel_encoder->base.crtc))
3975 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3978 /* Try to read receiver status if the link appears to be up */
3979 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3983 /* Now read the DPCD to see if it's actually running */
3984 if (!intel_dp_get_dpcd(intel_dp)) {
3988 /* Try to read the source of the interrupt */
3989 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3990 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3991 /* Clear interrupt source */
3992 drm_dp_dpcd_writeb(&intel_dp->aux,
3993 DP_DEVICE_SERVICE_IRQ_VECTOR,
3996 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3997 intel_dp_handle_test_request(intel_dp);
3998 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3999 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4002 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4003 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4004 intel_encoder->base.name);
4005 intel_dp_start_link_train(intel_dp);
4006 intel_dp_complete_link_train(intel_dp);
4007 intel_dp_stop_link_train(intel_dp);
4011 /* XXX this is probably wrong for multiple downstream ports */
4012 static enum drm_connector_status
4013 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4015 uint8_t *dpcd = intel_dp->dpcd;
4018 if (!intel_dp_get_dpcd(intel_dp))
4019 return connector_status_disconnected;
4021 /* if there's no downstream port, we're done */
4022 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4023 return connector_status_connected;
4025 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4026 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4027 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4030 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4032 return connector_status_unknown;
4034 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4035 : connector_status_disconnected;
4038 /* If no HPD, poke DDC gently */
4039 if (drm_probe_ddc(&intel_dp->aux.ddc))
4040 return connector_status_connected;
4042 /* Well we tried, say unknown for unreliable port types */
4043 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4044 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4045 if (type == DP_DS_PORT_TYPE_VGA ||
4046 type == DP_DS_PORT_TYPE_NON_EDID)
4047 return connector_status_unknown;
4049 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4050 DP_DWN_STRM_PORT_TYPE_MASK;
4051 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4052 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4053 return connector_status_unknown;
4056 /* Anything else is out of spec, warn and ignore */
4057 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4058 return connector_status_disconnected;
4061 static enum drm_connector_status
4062 edp_detect(struct intel_dp *intel_dp)
4064 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4065 enum drm_connector_status status;
4067 status = intel_panel_detect(dev);
4068 if (status == connector_status_unknown)
4069 status = connector_status_connected;
4074 static enum drm_connector_status
4075 ironlake_dp_detect(struct intel_dp *intel_dp)
4077 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4078 struct drm_i915_private *dev_priv = dev->dev_private;
4079 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4081 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4082 return connector_status_disconnected;
4084 return intel_dp_detect_dpcd(intel_dp);
4087 static int g4x_digital_port_connected(struct drm_device *dev,
4088 struct intel_digital_port *intel_dig_port)
4090 struct drm_i915_private *dev_priv = dev->dev_private;
4093 if (IS_VALLEYVIEW(dev)) {
4094 switch (intel_dig_port->port) {
4096 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4099 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4102 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4108 switch (intel_dig_port->port) {
4110 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4113 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4116 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4123 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4128 static enum drm_connector_status
4129 g4x_dp_detect(struct intel_dp *intel_dp)
4131 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4132 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4135 /* Can't disconnect eDP, but you can close the lid... */
4136 if (is_edp(intel_dp)) {
4137 enum drm_connector_status status;
4139 status = intel_panel_detect(dev);
4140 if (status == connector_status_unknown)
4141 status = connector_status_connected;
4145 ret = g4x_digital_port_connected(dev, intel_dig_port);
4147 return connector_status_unknown;
4149 return connector_status_disconnected;
4151 return intel_dp_detect_dpcd(intel_dp);
4154 static struct edid *
4155 intel_dp_get_edid(struct intel_dp *intel_dp)
4157 struct intel_connector *intel_connector = intel_dp->attached_connector;
4159 /* use cached edid if we have one */
4160 if (intel_connector->edid) {
4162 if (IS_ERR(intel_connector->edid))
4165 return drm_edid_duplicate(intel_connector->edid);
4167 return drm_get_edid(&intel_connector->base,
4168 &intel_dp->aux.ddc);
4172 intel_dp_set_edid(struct intel_dp *intel_dp)
4174 struct intel_connector *intel_connector = intel_dp->attached_connector;
4177 edid = intel_dp_get_edid(intel_dp);
4178 intel_connector->detect_edid = edid;
4180 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4181 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4183 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4187 intel_dp_unset_edid(struct intel_dp *intel_dp)
4189 struct intel_connector *intel_connector = intel_dp->attached_connector;
4191 kfree(intel_connector->detect_edid);
4192 intel_connector->detect_edid = NULL;
4194 intel_dp->has_audio = false;
4197 static enum intel_display_power_domain
4198 intel_dp_power_get(struct intel_dp *dp)
4200 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4201 enum intel_display_power_domain power_domain;
4203 power_domain = intel_display_port_power_domain(encoder);
4204 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4206 return power_domain;
4210 intel_dp_power_put(struct intel_dp *dp,
4211 enum intel_display_power_domain power_domain)
4213 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4214 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4217 static enum drm_connector_status
4218 intel_dp_detect(struct drm_connector *connector, bool force)
4220 struct intel_dp *intel_dp = intel_attached_dp(connector);
4221 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4222 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4223 struct drm_device *dev = connector->dev;
4224 enum drm_connector_status status;
4225 enum intel_display_power_domain power_domain;
4228 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4229 connector->base.id, connector->name);
4230 intel_dp_unset_edid(intel_dp);
4232 if (intel_dp->is_mst) {
4233 /* MST devices are disconnected from a monitor POV */
4234 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4235 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4236 return connector_status_disconnected;
4239 power_domain = intel_dp_power_get(intel_dp);
4241 /* Can't disconnect eDP, but you can close the lid... */
4242 if (is_edp(intel_dp))
4243 status = edp_detect(intel_dp);
4244 else if (HAS_PCH_SPLIT(dev))
4245 status = ironlake_dp_detect(intel_dp);
4247 status = g4x_dp_detect(intel_dp);
4248 if (status != connector_status_connected)
4251 intel_dp_probe_oui(intel_dp);
4253 ret = intel_dp_probe_mst(intel_dp);
4255 /* if we are in MST mode then this connector
4256 won't appear connected or have anything with EDID on it */
4257 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4258 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4259 status = connector_status_disconnected;
4263 intel_dp_set_edid(intel_dp);
4265 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4266 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4267 status = connector_status_connected;
4270 intel_dp_power_put(intel_dp, power_domain);
4275 intel_dp_force(struct drm_connector *connector)
4277 struct intel_dp *intel_dp = intel_attached_dp(connector);
4278 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4279 enum intel_display_power_domain power_domain;
4281 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4282 connector->base.id, connector->name);
4283 intel_dp_unset_edid(intel_dp);
4285 if (connector->status != connector_status_connected)
4288 power_domain = intel_dp_power_get(intel_dp);
4290 intel_dp_set_edid(intel_dp);
4292 intel_dp_power_put(intel_dp, power_domain);
4294 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4295 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4298 static int intel_dp_get_modes(struct drm_connector *connector)
4300 struct intel_connector *intel_connector = to_intel_connector(connector);
4303 edid = intel_connector->detect_edid;
4305 int ret = intel_connector_update_modes(connector, edid);
4310 /* if eDP has no EDID, fall back to fixed mode */
4311 if (is_edp(intel_attached_dp(connector)) &&
4312 intel_connector->panel.fixed_mode) {
4313 struct drm_display_mode *mode;
4315 mode = drm_mode_duplicate(connector->dev,
4316 intel_connector->panel.fixed_mode);
4318 drm_mode_probed_add(connector, mode);
4327 intel_dp_detect_audio(struct drm_connector *connector)
4329 bool has_audio = false;
4332 edid = to_intel_connector(connector)->detect_edid;
4334 has_audio = drm_detect_monitor_audio(edid);
4340 intel_dp_set_property(struct drm_connector *connector,
4341 struct drm_property *property,
4344 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4345 struct intel_connector *intel_connector = to_intel_connector(connector);
4346 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4347 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4350 ret = drm_object_property_set_value(&connector->base, property, val);
4354 if (property == dev_priv->force_audio_property) {
4358 if (i == intel_dp->force_audio)
4361 intel_dp->force_audio = i;
4363 if (i == HDMI_AUDIO_AUTO)
4364 has_audio = intel_dp_detect_audio(connector);
4366 has_audio = (i == HDMI_AUDIO_ON);
4368 if (has_audio == intel_dp->has_audio)
4371 intel_dp->has_audio = has_audio;
4375 if (property == dev_priv->broadcast_rgb_property) {
4376 bool old_auto = intel_dp->color_range_auto;
4377 uint32_t old_range = intel_dp->color_range;
4380 case INTEL_BROADCAST_RGB_AUTO:
4381 intel_dp->color_range_auto = true;
4383 case INTEL_BROADCAST_RGB_FULL:
4384 intel_dp->color_range_auto = false;
4385 intel_dp->color_range = 0;
4387 case INTEL_BROADCAST_RGB_LIMITED:
4388 intel_dp->color_range_auto = false;
4389 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4395 if (old_auto == intel_dp->color_range_auto &&
4396 old_range == intel_dp->color_range)
4402 if (is_edp(intel_dp) &&
4403 property == connector->dev->mode_config.scaling_mode_property) {
4404 if (val == DRM_MODE_SCALE_NONE) {
4405 DRM_DEBUG_KMS("no scaling not supported\n");
4409 if (intel_connector->panel.fitting_mode == val) {
4410 /* the eDP scaling property is not changed */
4413 intel_connector->panel.fitting_mode = val;
4421 if (intel_encoder->base.crtc)
4422 intel_crtc_restore_mode(intel_encoder->base.crtc);
4428 intel_dp_connector_destroy(struct drm_connector *connector)
4430 struct intel_connector *intel_connector = to_intel_connector(connector);
4432 kfree(intel_connector->detect_edid);
4434 if (!IS_ERR_OR_NULL(intel_connector->edid))
4435 kfree(intel_connector->edid);
4437 /* Can't call is_edp() since the encoder may have been destroyed
4439 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4440 intel_panel_fini(&intel_connector->panel);
4442 drm_connector_cleanup(connector);
4446 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4448 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4449 struct intel_dp *intel_dp = &intel_dig_port->dp;
4451 drm_dp_aux_unregister(&intel_dp->aux);
4452 intel_dp_mst_encoder_cleanup(intel_dig_port);
4453 if (is_edp(intel_dp)) {
4454 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4456 * vdd might still be enabled do to the delayed vdd off.
4457 * Make sure vdd is actually turned off here.
4460 edp_panel_vdd_off_sync(intel_dp);
4461 pps_unlock(intel_dp);
4463 if (intel_dp->edp_notifier.notifier_call) {
4464 unregister_reboot_notifier(&intel_dp->edp_notifier);
4465 intel_dp->edp_notifier.notifier_call = NULL;
4468 drm_encoder_cleanup(encoder);
4469 kfree(intel_dig_port);
4472 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4474 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4476 if (!is_edp(intel_dp))
4480 * vdd might still be enabled do to the delayed vdd off.
4481 * Make sure vdd is actually turned off here.
4483 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4485 edp_panel_vdd_off_sync(intel_dp);
4486 pps_unlock(intel_dp);
4489 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4491 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4492 struct drm_device *dev = intel_dig_port->base.base.dev;
4493 struct drm_i915_private *dev_priv = dev->dev_private;
4494 enum intel_display_power_domain power_domain;
4496 lockdep_assert_held(&dev_priv->pps_mutex);
4498 if (!edp_have_panel_vdd(intel_dp))
4502 * The VDD bit needs a power domain reference, so if the bit is
4503 * already enabled when we boot or resume, grab this reference and
4504 * schedule a vdd off, so we don't hold on to the reference
4507 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4508 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4509 intel_display_power_get(dev_priv, power_domain);
4511 edp_panel_vdd_schedule_off(intel_dp);
4514 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4516 struct intel_dp *intel_dp;
4518 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4521 intel_dp = enc_to_intel_dp(encoder);
4526 * Read out the current power sequencer assignment,
4527 * in case the BIOS did something with it.
4529 if (IS_VALLEYVIEW(encoder->dev))
4530 vlv_initial_power_sequencer_setup(intel_dp);
4532 intel_edp_panel_vdd_sanitize(intel_dp);
4534 pps_unlock(intel_dp);
4537 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4538 .dpms = intel_connector_dpms,
4539 .detect = intel_dp_detect,
4540 .force = intel_dp_force,
4541 .fill_modes = drm_helper_probe_single_connector_modes,
4542 .set_property = intel_dp_set_property,
4543 .atomic_get_property = intel_connector_atomic_get_property,
4544 .destroy = intel_dp_connector_destroy,
4545 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4548 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4549 .get_modes = intel_dp_get_modes,
4550 .mode_valid = intel_dp_mode_valid,
4551 .best_encoder = intel_best_encoder,
4554 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4555 .reset = intel_dp_encoder_reset,
4556 .destroy = intel_dp_encoder_destroy,
4560 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4566 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4568 struct intel_dp *intel_dp = &intel_dig_port->dp;
4569 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4570 struct drm_device *dev = intel_dig_port->base.base.dev;
4571 struct drm_i915_private *dev_priv = dev->dev_private;
4572 enum intel_display_power_domain power_domain;
4573 enum irqreturn ret = IRQ_NONE;
4575 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4576 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4578 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4580 * vdd off can generate a long pulse on eDP which
4581 * would require vdd on to handle it, and thus we
4582 * would end up in an endless cycle of
4583 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4585 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4586 port_name(intel_dig_port->port));
4590 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4591 port_name(intel_dig_port->port),
4592 long_hpd ? "long" : "short");
4594 power_domain = intel_display_port_power_domain(intel_encoder);
4595 intel_display_power_get(dev_priv, power_domain);
4599 if (HAS_PCH_SPLIT(dev)) {
4600 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4603 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4607 if (!intel_dp_get_dpcd(intel_dp)) {
4611 intel_dp_probe_oui(intel_dp);
4613 if (!intel_dp_probe_mst(intel_dp))
4617 if (intel_dp->is_mst) {
4618 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4622 if (!intel_dp->is_mst) {
4624 * we'll check the link status via the normal hot plug path later -
4625 * but for short hpds we should check it now
4627 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4628 intel_dp_check_link_status(intel_dp);
4629 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4637 /* if we were in MST mode, and device is not there get out of MST mode */
4638 if (intel_dp->is_mst) {
4639 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4640 intel_dp->is_mst = false;
4641 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4644 intel_display_power_put(dev_priv, power_domain);
4649 /* Return which DP Port should be selected for Transcoder DP control */
4651 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4653 struct drm_device *dev = crtc->dev;
4654 struct intel_encoder *intel_encoder;
4655 struct intel_dp *intel_dp;
4657 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4658 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4660 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4661 intel_encoder->type == INTEL_OUTPUT_EDP)
4662 return intel_dp->output_reg;
4668 /* check the VBT to see whether the eDP is on DP-D port */
4669 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4671 struct drm_i915_private *dev_priv = dev->dev_private;
4672 union child_device_config *p_child;
4674 static const short port_mapping[] = {
4675 [PORT_B] = PORT_IDPB,
4676 [PORT_C] = PORT_IDPC,
4677 [PORT_D] = PORT_IDPD,
4683 if (!dev_priv->vbt.child_dev_num)
4686 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4687 p_child = dev_priv->vbt.child_dev + i;
4689 if (p_child->common.dvo_port == port_mapping[port] &&
4690 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4691 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4698 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4700 struct intel_connector *intel_connector = to_intel_connector(connector);
4702 intel_attach_force_audio_property(connector);
4703 intel_attach_broadcast_rgb_property(connector);
4704 intel_dp->color_range_auto = true;
4706 if (is_edp(intel_dp)) {
4707 drm_mode_create_scaling_mode_property(connector->dev);
4708 drm_object_attach_property(
4710 connector->dev->mode_config.scaling_mode_property,
4711 DRM_MODE_SCALE_ASPECT);
4712 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4716 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4718 intel_dp->last_power_cycle = jiffies;
4719 intel_dp->last_power_on = jiffies;
4720 intel_dp->last_backlight_off = jiffies;
4724 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4725 struct intel_dp *intel_dp)
4727 struct drm_i915_private *dev_priv = dev->dev_private;
4728 struct edp_power_seq cur, vbt, spec,
4729 *final = &intel_dp->pps_delays;
4730 u32 pp_on, pp_off, pp_div, pp;
4731 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4733 lockdep_assert_held(&dev_priv->pps_mutex);
4735 /* already initialized? */
4736 if (final->t11_t12 != 0)
4739 if (HAS_PCH_SPLIT(dev)) {
4740 pp_ctrl_reg = PCH_PP_CONTROL;
4741 pp_on_reg = PCH_PP_ON_DELAYS;
4742 pp_off_reg = PCH_PP_OFF_DELAYS;
4743 pp_div_reg = PCH_PP_DIVISOR;
4745 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4747 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4748 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4749 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4750 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4753 /* Workaround: Need to write PP_CONTROL with the unlock key as
4754 * the very first thing. */
4755 pp = ironlake_get_pp_control(intel_dp);
4756 I915_WRITE(pp_ctrl_reg, pp);
4758 pp_on = I915_READ(pp_on_reg);
4759 pp_off = I915_READ(pp_off_reg);
4760 pp_div = I915_READ(pp_div_reg);
4762 /* Pull timing values out of registers */
4763 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4764 PANEL_POWER_UP_DELAY_SHIFT;
4766 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4767 PANEL_LIGHT_ON_DELAY_SHIFT;
4769 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4770 PANEL_LIGHT_OFF_DELAY_SHIFT;
4772 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4773 PANEL_POWER_DOWN_DELAY_SHIFT;
4775 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4776 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4778 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4779 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4781 vbt = dev_priv->vbt.edp_pps;
4783 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4784 * our hw here, which are all in 100usec. */
4785 spec.t1_t3 = 210 * 10;
4786 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4787 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4788 spec.t10 = 500 * 10;
4789 /* This one is special and actually in units of 100ms, but zero
4790 * based in the hw (so we need to add 100 ms). But the sw vbt
4791 * table multiplies it with 1000 to make it in units of 100usec,
4793 spec.t11_t12 = (510 + 100) * 10;
4795 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4796 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4798 /* Use the max of the register settings and vbt. If both are
4799 * unset, fall back to the spec limits. */
4800 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4802 max(cur.field, vbt.field))
4803 assign_final(t1_t3);
4807 assign_final(t11_t12);
4810 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4811 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4812 intel_dp->backlight_on_delay = get_delay(t8);
4813 intel_dp->backlight_off_delay = get_delay(t9);
4814 intel_dp->panel_power_down_delay = get_delay(t10);
4815 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4818 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4819 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4820 intel_dp->panel_power_cycle_delay);
4822 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4823 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4827 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4828 struct intel_dp *intel_dp)
4830 struct drm_i915_private *dev_priv = dev->dev_private;
4831 u32 pp_on, pp_off, pp_div, port_sel = 0;
4832 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4833 int pp_on_reg, pp_off_reg, pp_div_reg;
4834 enum port port = dp_to_dig_port(intel_dp)->port;
4835 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4837 lockdep_assert_held(&dev_priv->pps_mutex);
4839 if (HAS_PCH_SPLIT(dev)) {
4840 pp_on_reg = PCH_PP_ON_DELAYS;
4841 pp_off_reg = PCH_PP_OFF_DELAYS;
4842 pp_div_reg = PCH_PP_DIVISOR;
4844 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4846 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4847 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4848 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4852 * And finally store the new values in the power sequencer. The
4853 * backlight delays are set to 1 because we do manual waits on them. For
4854 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4855 * we'll end up waiting for the backlight off delay twice: once when we
4856 * do the manual sleep, and once when we disable the panel and wait for
4857 * the PP_STATUS bit to become zero.
4859 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4860 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4861 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4862 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4863 /* Compute the divisor for the pp clock, simply match the Bspec
4865 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4866 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4867 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4869 /* Haswell doesn't have any port selection bits for the panel
4870 * power sequencer any more. */
4871 if (IS_VALLEYVIEW(dev)) {
4872 port_sel = PANEL_PORT_SELECT_VLV(port);
4873 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4875 port_sel = PANEL_PORT_SELECT_DPA;
4877 port_sel = PANEL_PORT_SELECT_DPD;
4882 I915_WRITE(pp_on_reg, pp_on);
4883 I915_WRITE(pp_off_reg, pp_off);
4884 I915_WRITE(pp_div_reg, pp_div);
4886 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4887 I915_READ(pp_on_reg),
4888 I915_READ(pp_off_reg),
4889 I915_READ(pp_div_reg));
4893 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4895 * @refresh_rate: RR to be programmed
4897 * This function gets called when refresh rate (RR) has to be changed from
4898 * one frequency to another. Switches can be between high and low RR
4899 * supported by the panel or to any other RR based on media playback (in
4900 * this case, RR value needs to be passed from user space).
4902 * The caller of this function needs to take a lock on dev_priv->drrs.
4904 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4906 struct drm_i915_private *dev_priv = dev->dev_private;
4907 struct intel_encoder *encoder;
4908 struct intel_digital_port *dig_port = NULL;
4909 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4910 struct intel_crtc_state *config = NULL;
4911 struct intel_crtc *intel_crtc = NULL;
4913 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4915 if (refresh_rate <= 0) {
4916 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4920 if (intel_dp == NULL) {
4921 DRM_DEBUG_KMS("DRRS not supported.\n");
4926 * FIXME: This needs proper synchronization with psr state for some
4927 * platforms that cannot have PSR and DRRS enabled at the same time.
4930 dig_port = dp_to_dig_port(intel_dp);
4931 encoder = &dig_port->base;
4932 intel_crtc = encoder->new_crtc;
4935 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4939 config = intel_crtc->config;
4941 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4942 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4946 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4948 index = DRRS_LOW_RR;
4950 if (index == dev_priv->drrs.refresh_rate_type) {
4952 "DRRS requested for previously set RR...ignoring\n");
4956 if (!intel_crtc->active) {
4957 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4961 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4964 intel_dp_set_m_n(intel_crtc, M1_N1);
4967 intel_dp_set_m_n(intel_crtc, M2_N2);
4971 DRM_ERROR("Unsupported refreshrate type\n");
4973 } else if (INTEL_INFO(dev)->gen > 6) {
4974 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4975 val = I915_READ(reg);
4977 if (index > DRRS_HIGH_RR) {
4978 if (IS_VALLEYVIEW(dev))
4979 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4981 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4983 if (IS_VALLEYVIEW(dev))
4984 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4986 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4988 I915_WRITE(reg, val);
4991 dev_priv->drrs.refresh_rate_type = index;
4993 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4997 * intel_edp_drrs_enable - init drrs struct if supported
4998 * @intel_dp: DP struct
5000 * Initializes frontbuffer_bits and drrs.dp
5002 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5004 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5005 struct drm_i915_private *dev_priv = dev->dev_private;
5006 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5007 struct drm_crtc *crtc = dig_port->base.base.crtc;
5008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5010 if (!intel_crtc->config->has_drrs) {
5011 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5015 mutex_lock(&dev_priv->drrs.mutex);
5016 if (WARN_ON(dev_priv->drrs.dp)) {
5017 DRM_ERROR("DRRS already enabled\n");
5021 dev_priv->drrs.busy_frontbuffer_bits = 0;
5023 dev_priv->drrs.dp = intel_dp;
5026 mutex_unlock(&dev_priv->drrs.mutex);
5030 * intel_edp_drrs_disable - Disable DRRS
5031 * @intel_dp: DP struct
5034 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5036 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5037 struct drm_i915_private *dev_priv = dev->dev_private;
5038 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5039 struct drm_crtc *crtc = dig_port->base.base.crtc;
5040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5042 if (!intel_crtc->config->has_drrs)
5045 mutex_lock(&dev_priv->drrs.mutex);
5046 if (!dev_priv->drrs.dp) {
5047 mutex_unlock(&dev_priv->drrs.mutex);
5051 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5052 intel_dp_set_drrs_state(dev_priv->dev,
5053 intel_dp->attached_connector->panel.
5054 fixed_mode->vrefresh);
5056 dev_priv->drrs.dp = NULL;
5057 mutex_unlock(&dev_priv->drrs.mutex);
5059 cancel_delayed_work_sync(&dev_priv->drrs.work);
5062 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5064 struct drm_i915_private *dev_priv =
5065 container_of(work, typeof(*dev_priv), drrs.work.work);
5066 struct intel_dp *intel_dp;
5068 mutex_lock(&dev_priv->drrs.mutex);
5070 intel_dp = dev_priv->drrs.dp;
5076 * The delayed work can race with an invalidate hence we need to
5080 if (dev_priv->drrs.busy_frontbuffer_bits)
5083 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5084 intel_dp_set_drrs_state(dev_priv->dev,
5085 intel_dp->attached_connector->panel.
5086 downclock_mode->vrefresh);
5090 mutex_unlock(&dev_priv->drrs.mutex);
5094 * intel_edp_drrs_invalidate - Invalidate DRRS
5096 * @frontbuffer_bits: frontbuffer plane tracking bits
5098 * When there is a disturbance on screen (due to cursor movement/time
5099 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5102 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5104 void intel_edp_drrs_invalidate(struct drm_device *dev,
5105 unsigned frontbuffer_bits)
5107 struct drm_i915_private *dev_priv = dev->dev_private;
5108 struct drm_crtc *crtc;
5111 if (!dev_priv->drrs.dp)
5114 cancel_delayed_work_sync(&dev_priv->drrs.work);
5116 mutex_lock(&dev_priv->drrs.mutex);
5117 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5118 pipe = to_intel_crtc(crtc)->pipe;
5120 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5121 intel_dp_set_drrs_state(dev_priv->dev,
5122 dev_priv->drrs.dp->attached_connector->panel.
5123 fixed_mode->vrefresh);
5126 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5128 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5129 mutex_unlock(&dev_priv->drrs.mutex);
5133 * intel_edp_drrs_flush - Flush DRRS
5135 * @frontbuffer_bits: frontbuffer plane tracking bits
5137 * When there is no movement on screen, DRRS work can be scheduled.
5138 * This DRRS work is responsible for setting relevant registers after a
5139 * timeout of 1 second.
5141 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5143 void intel_edp_drrs_flush(struct drm_device *dev,
5144 unsigned frontbuffer_bits)
5146 struct drm_i915_private *dev_priv = dev->dev_private;
5147 struct drm_crtc *crtc;
5150 if (!dev_priv->drrs.dp)
5153 cancel_delayed_work_sync(&dev_priv->drrs.work);
5155 mutex_lock(&dev_priv->drrs.mutex);
5156 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5157 pipe = to_intel_crtc(crtc)->pipe;
5158 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5160 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5161 !dev_priv->drrs.busy_frontbuffer_bits)
5162 schedule_delayed_work(&dev_priv->drrs.work,
5163 msecs_to_jiffies(1000));
5164 mutex_unlock(&dev_priv->drrs.mutex);
5168 * DOC: Display Refresh Rate Switching (DRRS)
5170 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5171 * which enables swtching between low and high refresh rates,
5172 * dynamically, based on the usage scenario. This feature is applicable
5173 * for internal panels.
5175 * Indication that the panel supports DRRS is given by the panel EDID, which
5176 * would list multiple refresh rates for one resolution.
5178 * DRRS is of 2 types - static and seamless.
5179 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5180 * (may appear as a blink on screen) and is used in dock-undock scenario.
5181 * Seamless DRRS involves changing RR without any visual effect to the user
5182 * and can be used during normal system usage. This is done by programming
5183 * certain registers.
5185 * Support for static/seamless DRRS may be indicated in the VBT based on
5186 * inputs from the panel spec.
5188 * DRRS saves power by switching to low RR based on usage scenarios.
5191 * The implementation is based on frontbuffer tracking implementation.
5192 * When there is a disturbance on the screen triggered by user activity or a
5193 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5194 * When there is no movement on screen, after a timeout of 1 second, a switch
5195 * to low RR is made.
5196 * For integration with frontbuffer tracking code,
5197 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5199 * DRRS can be further extended to support other internal panels and also
5200 * the scenario of video playback wherein RR is set based on the rate
5201 * requested by userspace.
5205 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5206 * @intel_connector: eDP connector
5207 * @fixed_mode: preferred mode of panel
5209 * This function is called only once at driver load to initialize basic
5213 * Downclock mode if panel supports it, else return NULL.
5214 * DRRS support is determined by the presence of downclock mode (apart
5215 * from VBT setting).
5217 static struct drm_display_mode *
5218 intel_dp_drrs_init(struct intel_connector *intel_connector,
5219 struct drm_display_mode *fixed_mode)
5221 struct drm_connector *connector = &intel_connector->base;
5222 struct drm_device *dev = connector->dev;
5223 struct drm_i915_private *dev_priv = dev->dev_private;
5224 struct drm_display_mode *downclock_mode = NULL;
5226 if (INTEL_INFO(dev)->gen <= 6) {
5227 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5231 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5232 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5236 downclock_mode = intel_find_panel_downclock
5237 (dev, fixed_mode, connector);
5239 if (!downclock_mode) {
5240 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5244 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5246 mutex_init(&dev_priv->drrs.mutex);
5248 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5250 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5251 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5252 return downclock_mode;
5255 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5256 struct intel_connector *intel_connector)
5258 struct drm_connector *connector = &intel_connector->base;
5259 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5260 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5261 struct drm_device *dev = intel_encoder->base.dev;
5262 struct drm_i915_private *dev_priv = dev->dev_private;
5263 struct drm_display_mode *fixed_mode = NULL;
5264 struct drm_display_mode *downclock_mode = NULL;
5266 struct drm_display_mode *scan;
5268 enum pipe pipe = INVALID_PIPE;
5270 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5272 if (!is_edp(intel_dp))
5276 intel_edp_panel_vdd_sanitize(intel_dp);
5277 pps_unlock(intel_dp);
5279 /* Cache DPCD and EDID for edp. */
5280 has_dpcd = intel_dp_get_dpcd(intel_dp);
5283 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5284 dev_priv->no_aux_handshake =
5285 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5286 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5288 /* if this fails, presume the device is a ghost */
5289 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5293 /* We now know it's not a ghost, init power sequence regs. */
5295 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5296 pps_unlock(intel_dp);
5298 mutex_lock(&dev->mode_config.mutex);
5299 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5301 if (drm_add_edid_modes(connector, edid)) {
5302 drm_mode_connector_update_edid_property(connector,
5304 drm_edid_to_eld(connector, edid);
5307 edid = ERR_PTR(-EINVAL);
5310 edid = ERR_PTR(-ENOENT);
5312 intel_connector->edid = edid;
5314 /* prefer fixed mode from EDID if available */
5315 list_for_each_entry(scan, &connector->probed_modes, head) {
5316 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5317 fixed_mode = drm_mode_duplicate(dev, scan);
5318 downclock_mode = intel_dp_drrs_init(
5319 intel_connector, fixed_mode);
5324 /* fallback to VBT if available for eDP */
5325 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5326 fixed_mode = drm_mode_duplicate(dev,
5327 dev_priv->vbt.lfp_lvds_vbt_mode);
5329 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5331 mutex_unlock(&dev->mode_config.mutex);
5333 if (IS_VALLEYVIEW(dev)) {
5334 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5335 register_reboot_notifier(&intel_dp->edp_notifier);
5338 * Figure out the current pipe for the initial backlight setup.
5339 * If the current pipe isn't valid, try the PPS pipe, and if that
5340 * fails just assume pipe A.
5342 if (IS_CHERRYVIEW(dev))
5343 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5345 pipe = PORT_TO_PIPE(intel_dp->DP);
5347 if (pipe != PIPE_A && pipe != PIPE_B)
5348 pipe = intel_dp->pps_pipe;
5350 if (pipe != PIPE_A && pipe != PIPE_B)
5353 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5357 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5358 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5359 intel_panel_setup_backlight(connector, pipe);
5365 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5366 struct intel_connector *intel_connector)
5368 struct drm_connector *connector = &intel_connector->base;
5369 struct intel_dp *intel_dp = &intel_dig_port->dp;
5370 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5371 struct drm_device *dev = intel_encoder->base.dev;
5372 struct drm_i915_private *dev_priv = dev->dev_private;
5373 enum port port = intel_dig_port->port;
5376 intel_dp->pps_pipe = INVALID_PIPE;
5378 /* intel_dp vfuncs */
5379 if (INTEL_INFO(dev)->gen >= 9)
5380 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5381 else if (IS_VALLEYVIEW(dev))
5382 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5383 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5384 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5385 else if (HAS_PCH_SPLIT(dev))
5386 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5388 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5390 if (INTEL_INFO(dev)->gen >= 9)
5391 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5393 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5395 /* Preserve the current hw state. */
5396 intel_dp->DP = I915_READ(intel_dp->output_reg);
5397 intel_dp->attached_connector = intel_connector;
5399 if (intel_dp_is_edp(dev, port))
5400 type = DRM_MODE_CONNECTOR_eDP;
5402 type = DRM_MODE_CONNECTOR_DisplayPort;
5405 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5406 * for DP the encoder type can be set by the caller to
5407 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5409 if (type == DRM_MODE_CONNECTOR_eDP)
5410 intel_encoder->type = INTEL_OUTPUT_EDP;
5412 /* eDP only on port B and/or C on vlv/chv */
5413 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5414 port != PORT_B && port != PORT_C))
5417 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5418 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5421 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5422 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5424 connector->interlace_allowed = true;
5425 connector->doublescan_allowed = 0;
5427 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5428 edp_panel_vdd_work);
5430 intel_connector_attach_encoder(intel_connector, intel_encoder);
5431 drm_connector_register(connector);
5434 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5436 intel_connector->get_hw_state = intel_connector_get_hw_state;
5437 intel_connector->unregister = intel_dp_connector_unregister;
5439 /* Set up the hotplug pin. */
5442 intel_encoder->hpd_pin = HPD_PORT_A;
5445 intel_encoder->hpd_pin = HPD_PORT_B;
5448 intel_encoder->hpd_pin = HPD_PORT_C;
5451 intel_encoder->hpd_pin = HPD_PORT_D;
5457 if (is_edp(intel_dp)) {
5459 intel_dp_init_panel_power_timestamps(intel_dp);
5460 if (IS_VALLEYVIEW(dev))
5461 vlv_initial_power_sequencer_setup(intel_dp);
5463 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5464 pps_unlock(intel_dp);
5467 intel_dp_aux_init(intel_dp, intel_connector);
5469 /* init MST on ports that can support it */
5470 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5471 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5472 intel_dp_mst_encoder_init(intel_dig_port,
5473 intel_connector->base.base.id);
5477 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5478 drm_dp_aux_unregister(&intel_dp->aux);
5479 if (is_edp(intel_dp)) {
5480 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5482 * vdd might still be enabled do to the delayed vdd off.
5483 * Make sure vdd is actually turned off here.
5486 edp_panel_vdd_off_sync(intel_dp);
5487 pps_unlock(intel_dp);
5489 drm_connector_unregister(connector);
5490 drm_connector_cleanup(connector);
5494 intel_dp_add_properties(intel_dp, connector);
5496 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5497 * 0xd. Failure to do so will result in spurious interrupts being
5498 * generated on the port when a cable is not attached.
5500 if (IS_G4X(dev) && !IS_GM45(dev)) {
5501 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5502 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5509 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5511 struct drm_i915_private *dev_priv = dev->dev_private;
5512 struct intel_digital_port *intel_dig_port;
5513 struct intel_encoder *intel_encoder;
5514 struct drm_encoder *encoder;
5515 struct intel_connector *intel_connector;
5517 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5518 if (!intel_dig_port)
5521 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5522 if (!intel_connector) {
5523 kfree(intel_dig_port);
5527 intel_encoder = &intel_dig_port->base;
5528 encoder = &intel_encoder->base;
5530 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5531 DRM_MODE_ENCODER_TMDS);
5533 intel_encoder->compute_config = intel_dp_compute_config;
5534 intel_encoder->disable = intel_disable_dp;
5535 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5536 intel_encoder->get_config = intel_dp_get_config;
5537 intel_encoder->suspend = intel_dp_encoder_suspend;
5538 if (IS_CHERRYVIEW(dev)) {
5539 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5540 intel_encoder->pre_enable = chv_pre_enable_dp;
5541 intel_encoder->enable = vlv_enable_dp;
5542 intel_encoder->post_disable = chv_post_disable_dp;
5543 } else if (IS_VALLEYVIEW(dev)) {
5544 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5545 intel_encoder->pre_enable = vlv_pre_enable_dp;
5546 intel_encoder->enable = vlv_enable_dp;
5547 intel_encoder->post_disable = vlv_post_disable_dp;
5549 intel_encoder->pre_enable = g4x_pre_enable_dp;
5550 intel_encoder->enable = g4x_enable_dp;
5551 if (INTEL_INFO(dev)->gen >= 5)
5552 intel_encoder->post_disable = ilk_post_disable_dp;
5555 intel_dig_port->port = port;
5556 intel_dig_port->dp.output_reg = output_reg;
5558 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5559 if (IS_CHERRYVIEW(dev)) {
5561 intel_encoder->crtc_mask = 1 << 2;
5563 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5565 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5567 intel_encoder->cloneable = 0;
5568 intel_encoder->hot_plug = intel_dp_hot_plug;
5570 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5571 dev_priv->hpd_irq_port[port] = intel_dig_port;
5573 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5574 drm_encoder_cleanup(encoder);
5575 kfree(intel_dig_port);
5576 kfree(intel_connector);
5580 void intel_dp_mst_suspend(struct drm_device *dev)
5582 struct drm_i915_private *dev_priv = dev->dev_private;
5586 for (i = 0; i < I915_MAX_PORTS; i++) {
5587 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5588 if (!intel_dig_port)
5591 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5592 if (!intel_dig_port->dp.can_mst)
5594 if (intel_dig_port->dp.is_mst)
5595 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5600 void intel_dp_mst_resume(struct drm_device *dev)
5602 struct drm_i915_private *dev_priv = dev->dev_private;
5605 for (i = 0; i < I915_MAX_PORTS; i++) {
5606 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5607 if (!intel_dig_port)
5609 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5612 if (!intel_dig_port->dp.can_mst)
5615 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5617 intel_dp_check_mst_status(&intel_dig_port->dp);