]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Generalize cursor size checks a bit
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <linux/dma_remapping.h>
50 #include <linux/reservation.h>
51
52 static bool is_mmio_work(struct intel_flip_work *work)
53 {
54         return work->mmio_work.func;
55 }
56
57 /* Primary plane formats for gen <= 3 */
58 static const uint32_t i8xx_primary_formats[] = {
59         DRM_FORMAT_C8,
60         DRM_FORMAT_RGB565,
61         DRM_FORMAT_XRGB1555,
62         DRM_FORMAT_XRGB8888,
63 };
64
65 /* Primary plane formats for gen >= 4 */
66 static const uint32_t i965_primary_formats[] = {
67         DRM_FORMAT_C8,
68         DRM_FORMAT_RGB565,
69         DRM_FORMAT_XRGB8888,
70         DRM_FORMAT_XBGR8888,
71         DRM_FORMAT_XRGB2101010,
72         DRM_FORMAT_XBGR2101010,
73 };
74
75 static const uint32_t skl_primary_formats[] = {
76         DRM_FORMAT_C8,
77         DRM_FORMAT_RGB565,
78         DRM_FORMAT_XRGB8888,
79         DRM_FORMAT_XBGR8888,
80         DRM_FORMAT_ARGB8888,
81         DRM_FORMAT_ABGR8888,
82         DRM_FORMAT_XRGB2101010,
83         DRM_FORMAT_XBGR2101010,
84         DRM_FORMAT_YUYV,
85         DRM_FORMAT_YVYU,
86         DRM_FORMAT_UYVY,
87         DRM_FORMAT_VYUY,
88 };
89
90 /* Cursor formats */
91 static const uint32_t intel_cursor_formats[] = {
92         DRM_FORMAT_ARGB8888,
93 };
94
95 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
96                                 struct intel_crtc_state *pipe_config);
97 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
98                                    struct intel_crtc_state *pipe_config);
99
100 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
101                                   struct drm_i915_gem_object *obj,
102                                   struct drm_mode_fb_cmd2 *mode_cmd);
103 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
104 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
105 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
106 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
107                                          struct intel_link_m_n *m_n,
108                                          struct intel_link_m_n *m2_n2);
109 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
110 static void haswell_set_pipeconf(struct drm_crtc *crtc);
111 static void haswell_set_pipemisc(struct drm_crtc *crtc);
112 static void vlv_prepare_pll(struct intel_crtc *crtc,
113                             const struct intel_crtc_state *pipe_config);
114 static void chv_prepare_pll(struct intel_crtc *crtc,
115                             const struct intel_crtc_state *pipe_config);
116 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
118 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
119                                     struct intel_crtc_state *crtc_state);
120 static void skylake_pfit_enable(struct intel_crtc *crtc);
121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122 static void ironlake_pfit_enable(struct intel_crtc *crtc);
123 static void intel_modeset_setup_hw_state(struct drm_device *dev);
124 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125
126 struct intel_limit {
127         struct {
128                 int min, max;
129         } dot, vco, n, m, m1, m2, p, p1;
130
131         struct {
132                 int dot_limit;
133                 int p2_slow, p2_fast;
134         } p2;
135 };
136
137 /* returns HPLL frequency in kHz */
138 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
139 {
140         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
141
142         /* Obtain SKU information */
143         mutex_lock(&dev_priv->sb_lock);
144         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
145                 CCK_FUSE_HPLL_FREQ_MASK;
146         mutex_unlock(&dev_priv->sb_lock);
147
148         return vco_freq[hpll_freq] * 1000;
149 }
150
151 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
152                       const char *name, u32 reg, int ref_freq)
153 {
154         u32 val;
155         int divider;
156
157         mutex_lock(&dev_priv->sb_lock);
158         val = vlv_cck_read(dev_priv, reg);
159         mutex_unlock(&dev_priv->sb_lock);
160
161         divider = val & CCK_FREQUENCY_VALUES;
162
163         WARN((val & CCK_FREQUENCY_STATUS) !=
164              (divider << CCK_FREQUENCY_STATUS_SHIFT),
165              "%s change in progress\n", name);
166
167         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
168 }
169
170 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
171                            const char *name, u32 reg)
172 {
173         if (dev_priv->hpll_freq == 0)
174                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
175
176         return vlv_get_cck_clock(dev_priv, name, reg,
177                                  dev_priv->hpll_freq);
178 }
179
180 static void intel_update_czclk(struct drm_i915_private *dev_priv)
181 {
182         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
183                 return;
184
185         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
186                                                       CCK_CZ_CLOCK_CONTROL);
187
188         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
189 }
190
191 static inline u32 /* units of 100MHz */
192 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
193                     const struct intel_crtc_state *pipe_config)
194 {
195         if (HAS_DDI(dev_priv))
196                 return pipe_config->port_clock; /* SPLL */
197         else if (IS_GEN5(dev_priv))
198                 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
199         else
200                 return 270000;
201 }
202
203 static const struct intel_limit intel_limits_i8xx_dac = {
204         .dot = { .min = 25000, .max = 350000 },
205         .vco = { .min = 908000, .max = 1512000 },
206         .n = { .min = 2, .max = 16 },
207         .m = { .min = 96, .max = 140 },
208         .m1 = { .min = 18, .max = 26 },
209         .m2 = { .min = 6, .max = 16 },
210         .p = { .min = 4, .max = 128 },
211         .p1 = { .min = 2, .max = 33 },
212         .p2 = { .dot_limit = 165000,
213                 .p2_slow = 4, .p2_fast = 2 },
214 };
215
216 static const struct intel_limit intel_limits_i8xx_dvo = {
217         .dot = { .min = 25000, .max = 350000 },
218         .vco = { .min = 908000, .max = 1512000 },
219         .n = { .min = 2, .max = 16 },
220         .m = { .min = 96, .max = 140 },
221         .m1 = { .min = 18, .max = 26 },
222         .m2 = { .min = 6, .max = 16 },
223         .p = { .min = 4, .max = 128 },
224         .p1 = { .min = 2, .max = 33 },
225         .p2 = { .dot_limit = 165000,
226                 .p2_slow = 4, .p2_fast = 4 },
227 };
228
229 static const struct intel_limit intel_limits_i8xx_lvds = {
230         .dot = { .min = 25000, .max = 350000 },
231         .vco = { .min = 908000, .max = 1512000 },
232         .n = { .min = 2, .max = 16 },
233         .m = { .min = 96, .max = 140 },
234         .m1 = { .min = 18, .max = 26 },
235         .m2 = { .min = 6, .max = 16 },
236         .p = { .min = 4, .max = 128 },
237         .p1 = { .min = 1, .max = 6 },
238         .p2 = { .dot_limit = 165000,
239                 .p2_slow = 14, .p2_fast = 7 },
240 };
241
242 static const struct intel_limit intel_limits_i9xx_sdvo = {
243         .dot = { .min = 20000, .max = 400000 },
244         .vco = { .min = 1400000, .max = 2800000 },
245         .n = { .min = 1, .max = 6 },
246         .m = { .min = 70, .max = 120 },
247         .m1 = { .min = 8, .max = 18 },
248         .m2 = { .min = 3, .max = 7 },
249         .p = { .min = 5, .max = 80 },
250         .p1 = { .min = 1, .max = 8 },
251         .p2 = { .dot_limit = 200000,
252                 .p2_slow = 10, .p2_fast = 5 },
253 };
254
255 static const struct intel_limit intel_limits_i9xx_lvds = {
256         .dot = { .min = 20000, .max = 400000 },
257         .vco = { .min = 1400000, .max = 2800000 },
258         .n = { .min = 1, .max = 6 },
259         .m = { .min = 70, .max = 120 },
260         .m1 = { .min = 8, .max = 18 },
261         .m2 = { .min = 3, .max = 7 },
262         .p = { .min = 7, .max = 98 },
263         .p1 = { .min = 1, .max = 8 },
264         .p2 = { .dot_limit = 112000,
265                 .p2_slow = 14, .p2_fast = 7 },
266 };
267
268
269 static const struct intel_limit intel_limits_g4x_sdvo = {
270         .dot = { .min = 25000, .max = 270000 },
271         .vco = { .min = 1750000, .max = 3500000},
272         .n = { .min = 1, .max = 4 },
273         .m = { .min = 104, .max = 138 },
274         .m1 = { .min = 17, .max = 23 },
275         .m2 = { .min = 5, .max = 11 },
276         .p = { .min = 10, .max = 30 },
277         .p1 = { .min = 1, .max = 3},
278         .p2 = { .dot_limit = 270000,
279                 .p2_slow = 10,
280                 .p2_fast = 10
281         },
282 };
283
284 static const struct intel_limit intel_limits_g4x_hdmi = {
285         .dot = { .min = 22000, .max = 400000 },
286         .vco = { .min = 1750000, .max = 3500000},
287         .n = { .min = 1, .max = 4 },
288         .m = { .min = 104, .max = 138 },
289         .m1 = { .min = 16, .max = 23 },
290         .m2 = { .min = 5, .max = 11 },
291         .p = { .min = 5, .max = 80 },
292         .p1 = { .min = 1, .max = 8},
293         .p2 = { .dot_limit = 165000,
294                 .p2_slow = 10, .p2_fast = 5 },
295 };
296
297 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
298         .dot = { .min = 20000, .max = 115000 },
299         .vco = { .min = 1750000, .max = 3500000 },
300         .n = { .min = 1, .max = 3 },
301         .m = { .min = 104, .max = 138 },
302         .m1 = { .min = 17, .max = 23 },
303         .m2 = { .min = 5, .max = 11 },
304         .p = { .min = 28, .max = 112 },
305         .p1 = { .min = 2, .max = 8 },
306         .p2 = { .dot_limit = 0,
307                 .p2_slow = 14, .p2_fast = 14
308         },
309 };
310
311 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
312         .dot = { .min = 80000, .max = 224000 },
313         .vco = { .min = 1750000, .max = 3500000 },
314         .n = { .min = 1, .max = 3 },
315         .m = { .min = 104, .max = 138 },
316         .m1 = { .min = 17, .max = 23 },
317         .m2 = { .min = 5, .max = 11 },
318         .p = { .min = 14, .max = 42 },
319         .p1 = { .min = 2, .max = 6 },
320         .p2 = { .dot_limit = 0,
321                 .p2_slow = 7, .p2_fast = 7
322         },
323 };
324
325 static const struct intel_limit intel_limits_pineview_sdvo = {
326         .dot = { .min = 20000, .max = 400000},
327         .vco = { .min = 1700000, .max = 3500000 },
328         /* Pineview's Ncounter is a ring counter */
329         .n = { .min = 3, .max = 6 },
330         .m = { .min = 2, .max = 256 },
331         /* Pineview only has one combined m divider, which we treat as m2. */
332         .m1 = { .min = 0, .max = 0 },
333         .m2 = { .min = 0, .max = 254 },
334         .p = { .min = 5, .max = 80 },
335         .p1 = { .min = 1, .max = 8 },
336         .p2 = { .dot_limit = 200000,
337                 .p2_slow = 10, .p2_fast = 5 },
338 };
339
340 static const struct intel_limit intel_limits_pineview_lvds = {
341         .dot = { .min = 20000, .max = 400000 },
342         .vco = { .min = 1700000, .max = 3500000 },
343         .n = { .min = 3, .max = 6 },
344         .m = { .min = 2, .max = 256 },
345         .m1 = { .min = 0, .max = 0 },
346         .m2 = { .min = 0, .max = 254 },
347         .p = { .min = 7, .max = 112 },
348         .p1 = { .min = 1, .max = 8 },
349         .p2 = { .dot_limit = 112000,
350                 .p2_slow = 14, .p2_fast = 14 },
351 };
352
353 /* Ironlake / Sandybridge
354  *
355  * We calculate clock using (register_value + 2) for N/M1/M2, so here
356  * the range value for them is (actual_value - 2).
357  */
358 static const struct intel_limit intel_limits_ironlake_dac = {
359         .dot = { .min = 25000, .max = 350000 },
360         .vco = { .min = 1760000, .max = 3510000 },
361         .n = { .min = 1, .max = 5 },
362         .m = { .min = 79, .max = 127 },
363         .m1 = { .min = 12, .max = 22 },
364         .m2 = { .min = 5, .max = 9 },
365         .p = { .min = 5, .max = 80 },
366         .p1 = { .min = 1, .max = 8 },
367         .p2 = { .dot_limit = 225000,
368                 .p2_slow = 10, .p2_fast = 5 },
369 };
370
371 static const struct intel_limit intel_limits_ironlake_single_lvds = {
372         .dot = { .min = 25000, .max = 350000 },
373         .vco = { .min = 1760000, .max = 3510000 },
374         .n = { .min = 1, .max = 3 },
375         .m = { .min = 79, .max = 118 },
376         .m1 = { .min = 12, .max = 22 },
377         .m2 = { .min = 5, .max = 9 },
378         .p = { .min = 28, .max = 112 },
379         .p1 = { .min = 2, .max = 8 },
380         .p2 = { .dot_limit = 225000,
381                 .p2_slow = 14, .p2_fast = 14 },
382 };
383
384 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
385         .dot = { .min = 25000, .max = 350000 },
386         .vco = { .min = 1760000, .max = 3510000 },
387         .n = { .min = 1, .max = 3 },
388         .m = { .min = 79, .max = 127 },
389         .m1 = { .min = 12, .max = 22 },
390         .m2 = { .min = 5, .max = 9 },
391         .p = { .min = 14, .max = 56 },
392         .p1 = { .min = 2, .max = 8 },
393         .p2 = { .dot_limit = 225000,
394                 .p2_slow = 7, .p2_fast = 7 },
395 };
396
397 /* LVDS 100mhz refclk limits. */
398 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
399         .dot = { .min = 25000, .max = 350000 },
400         .vco = { .min = 1760000, .max = 3510000 },
401         .n = { .min = 1, .max = 2 },
402         .m = { .min = 79, .max = 126 },
403         .m1 = { .min = 12, .max = 22 },
404         .m2 = { .min = 5, .max = 9 },
405         .p = { .min = 28, .max = 112 },
406         .p1 = { .min = 2, .max = 8 },
407         .p2 = { .dot_limit = 225000,
408                 .p2_slow = 14, .p2_fast = 14 },
409 };
410
411 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
412         .dot = { .min = 25000, .max = 350000 },
413         .vco = { .min = 1760000, .max = 3510000 },
414         .n = { .min = 1, .max = 3 },
415         .m = { .min = 79, .max = 126 },
416         .m1 = { .min = 12, .max = 22 },
417         .m2 = { .min = 5, .max = 9 },
418         .p = { .min = 14, .max = 42 },
419         .p1 = { .min = 2, .max = 6 },
420         .p2 = { .dot_limit = 225000,
421                 .p2_slow = 7, .p2_fast = 7 },
422 };
423
424 static const struct intel_limit intel_limits_vlv = {
425          /*
426           * These are the data rate limits (measured in fast clocks)
427           * since those are the strictest limits we have. The fast
428           * clock and actual rate limits are more relaxed, so checking
429           * them would make no difference.
430           */
431         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
432         .vco = { .min = 4000000, .max = 6000000 },
433         .n = { .min = 1, .max = 7 },
434         .m1 = { .min = 2, .max = 3 },
435         .m2 = { .min = 11, .max = 156 },
436         .p1 = { .min = 2, .max = 3 },
437         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
438 };
439
440 static const struct intel_limit intel_limits_chv = {
441         /*
442          * These are the data rate limits (measured in fast clocks)
443          * since those are the strictest limits we have.  The fast
444          * clock and actual rate limits are more relaxed, so checking
445          * them would make no difference.
446          */
447         .dot = { .min = 25000 * 5, .max = 540000 * 5},
448         .vco = { .min = 4800000, .max = 6480000 },
449         .n = { .min = 1, .max = 1 },
450         .m1 = { .min = 2, .max = 2 },
451         .m2 = { .min = 24 << 22, .max = 175 << 22 },
452         .p1 = { .min = 2, .max = 4 },
453         .p2 = { .p2_slow = 1, .p2_fast = 14 },
454 };
455
456 static const struct intel_limit intel_limits_bxt = {
457         /* FIXME: find real dot limits */
458         .dot = { .min = 0, .max = INT_MAX },
459         .vco = { .min = 4800000, .max = 6700000 },
460         .n = { .min = 1, .max = 1 },
461         .m1 = { .min = 2, .max = 2 },
462         /* FIXME: find real m2 limits */
463         .m2 = { .min = 2 << 22, .max = 255 << 22 },
464         .p1 = { .min = 2, .max = 4 },
465         .p2 = { .p2_slow = 1, .p2_fast = 20 },
466 };
467
468 static bool
469 needs_modeset(struct drm_crtc_state *state)
470 {
471         return drm_atomic_crtc_needs_modeset(state);
472 }
473
474 /*
475  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
476  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
477  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
478  * The helpers' return value is the rate of the clock that is fed to the
479  * display engine's pipe which can be the above fast dot clock rate or a
480  * divided-down version of it.
481  */
482 /* m1 is reserved as 0 in Pineview, n is a ring counter */
483 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
484 {
485         clock->m = clock->m2 + 2;
486         clock->p = clock->p1 * clock->p2;
487         if (WARN_ON(clock->n == 0 || clock->p == 0))
488                 return 0;
489         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
490         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
491
492         return clock->dot;
493 }
494
495 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
496 {
497         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
498 }
499
500 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
501 {
502         clock->m = i9xx_dpll_compute_m(clock);
503         clock->p = clock->p1 * clock->p2;
504         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
505                 return 0;
506         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
507         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
508
509         return clock->dot;
510 }
511
512 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
513 {
514         clock->m = clock->m1 * clock->m2;
515         clock->p = clock->p1 * clock->p2;
516         if (WARN_ON(clock->n == 0 || clock->p == 0))
517                 return 0;
518         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
519         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
520
521         return clock->dot / 5;
522 }
523
524 int chv_calc_dpll_params(int refclk, struct dpll *clock)
525 {
526         clock->m = clock->m1 * clock->m2;
527         clock->p = clock->p1 * clock->p2;
528         if (WARN_ON(clock->n == 0 || clock->p == 0))
529                 return 0;
530         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
531                         clock->n << 22);
532         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533
534         return clock->dot / 5;
535 }
536
537 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
538 /**
539  * Returns whether the given set of divisors are valid for a given refclk with
540  * the given connectors.
541  */
542
543 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
544                                const struct intel_limit *limit,
545                                const struct dpll *clock)
546 {
547         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
548                 INTELPllInvalid("n out of range\n");
549         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
550                 INTELPllInvalid("p1 out of range\n");
551         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
552                 INTELPllInvalid("m2 out of range\n");
553         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
554                 INTELPllInvalid("m1 out of range\n");
555
556         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
557             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
558                 if (clock->m1 <= clock->m2)
559                         INTELPllInvalid("m1 <= m2\n");
560
561         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
562             !IS_GEN9_LP(dev_priv)) {
563                 if (clock->p < limit->p.min || limit->p.max < clock->p)
564                         INTELPllInvalid("p out of range\n");
565                 if (clock->m < limit->m.min || limit->m.max < clock->m)
566                         INTELPllInvalid("m out of range\n");
567         }
568
569         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
570                 INTELPllInvalid("vco out of range\n");
571         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
572          * connector, etc., rather than just a single range.
573          */
574         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
575                 INTELPllInvalid("dot out of range\n");
576
577         return true;
578 }
579
580 static int
581 i9xx_select_p2_div(const struct intel_limit *limit,
582                    const struct intel_crtc_state *crtc_state,
583                    int target)
584 {
585         struct drm_device *dev = crtc_state->base.crtc->dev;
586
587         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
588                 /*
589                  * For LVDS just rely on its current settings for dual-channel.
590                  * We haven't figured out how to reliably set up different
591                  * single/dual channel state, if we even can.
592                  */
593                 if (intel_is_dual_link_lvds(dev))
594                         return limit->p2.p2_fast;
595                 else
596                         return limit->p2.p2_slow;
597         } else {
598                 if (target < limit->p2.dot_limit)
599                         return limit->p2.p2_slow;
600                 else
601                         return limit->p2.p2_fast;
602         }
603 }
604
605 /*
606  * Returns a set of divisors for the desired target clock with the given
607  * refclk, or FALSE.  The returned values represent the clock equation:
608  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
609  *
610  * Target and reference clocks are specified in kHz.
611  *
612  * If match_clock is provided, then best_clock P divider must match the P
613  * divider from @match_clock used for LVDS downclocking.
614  */
615 static bool
616 i9xx_find_best_dpll(const struct intel_limit *limit,
617                     struct intel_crtc_state *crtc_state,
618                     int target, int refclk, struct dpll *match_clock,
619                     struct dpll *best_clock)
620 {
621         struct drm_device *dev = crtc_state->base.crtc->dev;
622         struct dpll clock;
623         int err = target;
624
625         memset(best_clock, 0, sizeof(*best_clock));
626
627         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
628
629         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630              clock.m1++) {
631                 for (clock.m2 = limit->m2.min;
632                      clock.m2 <= limit->m2.max; clock.m2++) {
633                         if (clock.m2 >= clock.m1)
634                                 break;
635                         for (clock.n = limit->n.min;
636                              clock.n <= limit->n.max; clock.n++) {
637                                 for (clock.p1 = limit->p1.min;
638                                         clock.p1 <= limit->p1.max; clock.p1++) {
639                                         int this_err;
640
641                                         i9xx_calc_dpll_params(refclk, &clock);
642                                         if (!intel_PLL_is_valid(to_i915(dev),
643                                                                 limit,
644                                                                 &clock))
645                                                 continue;
646                                         if (match_clock &&
647                                             clock.p != match_clock->p)
648                                                 continue;
649
650                                         this_err = abs(clock.dot - target);
651                                         if (this_err < err) {
652                                                 *best_clock = clock;
653                                                 err = this_err;
654                                         }
655                                 }
656                         }
657                 }
658         }
659
660         return (err != target);
661 }
662
663 /*
664  * Returns a set of divisors for the desired target clock with the given
665  * refclk, or FALSE.  The returned values represent the clock equation:
666  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
667  *
668  * Target and reference clocks are specified in kHz.
669  *
670  * If match_clock is provided, then best_clock P divider must match the P
671  * divider from @match_clock used for LVDS downclocking.
672  */
673 static bool
674 pnv_find_best_dpll(const struct intel_limit *limit,
675                    struct intel_crtc_state *crtc_state,
676                    int target, int refclk, struct dpll *match_clock,
677                    struct dpll *best_clock)
678 {
679         struct drm_device *dev = crtc_state->base.crtc->dev;
680         struct dpll clock;
681         int err = target;
682
683         memset(best_clock, 0, sizeof(*best_clock));
684
685         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
686
687         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
688              clock.m1++) {
689                 for (clock.m2 = limit->m2.min;
690                      clock.m2 <= limit->m2.max; clock.m2++) {
691                         for (clock.n = limit->n.min;
692                              clock.n <= limit->n.max; clock.n++) {
693                                 for (clock.p1 = limit->p1.min;
694                                         clock.p1 <= limit->p1.max; clock.p1++) {
695                                         int this_err;
696
697                                         pnv_calc_dpll_params(refclk, &clock);
698                                         if (!intel_PLL_is_valid(to_i915(dev),
699                                                                 limit,
700                                                                 &clock))
701                                                 continue;
702                                         if (match_clock &&
703                                             clock.p != match_clock->p)
704                                                 continue;
705
706                                         this_err = abs(clock.dot - target);
707                                         if (this_err < err) {
708                                                 *best_clock = clock;
709                                                 err = this_err;
710                                         }
711                                 }
712                         }
713                 }
714         }
715
716         return (err != target);
717 }
718
719 /*
720  * Returns a set of divisors for the desired target clock with the given
721  * refclk, or FALSE.  The returned values represent the clock equation:
722  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
723  *
724  * Target and reference clocks are specified in kHz.
725  *
726  * If match_clock is provided, then best_clock P divider must match the P
727  * divider from @match_clock used for LVDS downclocking.
728  */
729 static bool
730 g4x_find_best_dpll(const struct intel_limit *limit,
731                    struct intel_crtc_state *crtc_state,
732                    int target, int refclk, struct dpll *match_clock,
733                    struct dpll *best_clock)
734 {
735         struct drm_device *dev = crtc_state->base.crtc->dev;
736         struct dpll clock;
737         int max_n;
738         bool found = false;
739         /* approximately equals target * 0.00585 */
740         int err_most = (target >> 8) + (target >> 9);
741
742         memset(best_clock, 0, sizeof(*best_clock));
743
744         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
745
746         max_n = limit->n.max;
747         /* based on hardware requirement, prefer smaller n to precision */
748         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
749                 /* based on hardware requirement, prefere larger m1,m2 */
750                 for (clock.m1 = limit->m1.max;
751                      clock.m1 >= limit->m1.min; clock.m1--) {
752                         for (clock.m2 = limit->m2.max;
753                              clock.m2 >= limit->m2.min; clock.m2--) {
754                                 for (clock.p1 = limit->p1.max;
755                                      clock.p1 >= limit->p1.min; clock.p1--) {
756                                         int this_err;
757
758                                         i9xx_calc_dpll_params(refclk, &clock);
759                                         if (!intel_PLL_is_valid(to_i915(dev),
760                                                                 limit,
761                                                                 &clock))
762                                                 continue;
763
764                                         this_err = abs(clock.dot - target);
765                                         if (this_err < err_most) {
766                                                 *best_clock = clock;
767                                                 err_most = this_err;
768                                                 max_n = clock.n;
769                                                 found = true;
770                                         }
771                                 }
772                         }
773                 }
774         }
775         return found;
776 }
777
778 /*
779  * Check if the calculated PLL configuration is more optimal compared to the
780  * best configuration and error found so far. Return the calculated error.
781  */
782 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
783                                const struct dpll *calculated_clock,
784                                const struct dpll *best_clock,
785                                unsigned int best_error_ppm,
786                                unsigned int *error_ppm)
787 {
788         /*
789          * For CHV ignore the error and consider only the P value.
790          * Prefer a bigger P value based on HW requirements.
791          */
792         if (IS_CHERRYVIEW(to_i915(dev))) {
793                 *error_ppm = 0;
794
795                 return calculated_clock->p > best_clock->p;
796         }
797
798         if (WARN_ON_ONCE(!target_freq))
799                 return false;
800
801         *error_ppm = div_u64(1000000ULL *
802                                 abs(target_freq - calculated_clock->dot),
803                              target_freq);
804         /*
805          * Prefer a better P value over a better (smaller) error if the error
806          * is small. Ensure this preference for future configurations too by
807          * setting the error to 0.
808          */
809         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
810                 *error_ppm = 0;
811
812                 return true;
813         }
814
815         return *error_ppm + 10 < best_error_ppm;
816 }
817
818 /*
819  * Returns a set of divisors for the desired target clock with the given
820  * refclk, or FALSE.  The returned values represent the clock equation:
821  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
822  */
823 static bool
824 vlv_find_best_dpll(const struct intel_limit *limit,
825                    struct intel_crtc_state *crtc_state,
826                    int target, int refclk, struct dpll *match_clock,
827                    struct dpll *best_clock)
828 {
829         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
830         struct drm_device *dev = crtc->base.dev;
831         struct dpll clock;
832         unsigned int bestppm = 1000000;
833         /* min update 19.2 MHz */
834         int max_n = min(limit->n.max, refclk / 19200);
835         bool found = false;
836
837         target *= 5; /* fast clock */
838
839         memset(best_clock, 0, sizeof(*best_clock));
840
841         /* based on hardware requirement, prefer smaller n to precision */
842         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
843                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
844                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
845                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
846                                 clock.p = clock.p1 * clock.p2;
847                                 /* based on hardware requirement, prefer bigger m1,m2 values */
848                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
849                                         unsigned int ppm;
850
851                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
852                                                                      refclk * clock.m1);
853
854                                         vlv_calc_dpll_params(refclk, &clock);
855
856                                         if (!intel_PLL_is_valid(to_i915(dev),
857                                                                 limit,
858                                                                 &clock))
859                                                 continue;
860
861                                         if (!vlv_PLL_is_optimal(dev, target,
862                                                                 &clock,
863                                                                 best_clock,
864                                                                 bestppm, &ppm))
865                                                 continue;
866
867                                         *best_clock = clock;
868                                         bestppm = ppm;
869                                         found = true;
870                                 }
871                         }
872                 }
873         }
874
875         return found;
876 }
877
878 /*
879  * Returns a set of divisors for the desired target clock with the given
880  * refclk, or FALSE.  The returned values represent the clock equation:
881  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
882  */
883 static bool
884 chv_find_best_dpll(const struct intel_limit *limit,
885                    struct intel_crtc_state *crtc_state,
886                    int target, int refclk, struct dpll *match_clock,
887                    struct dpll *best_clock)
888 {
889         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
890         struct drm_device *dev = crtc->base.dev;
891         unsigned int best_error_ppm;
892         struct dpll clock;
893         uint64_t m2;
894         int found = false;
895
896         memset(best_clock, 0, sizeof(*best_clock));
897         best_error_ppm = 1000000;
898
899         /*
900          * Based on hardware doc, the n always set to 1, and m1 always
901          * set to 2.  If requires to support 200Mhz refclk, we need to
902          * revisit this because n may not 1 anymore.
903          */
904         clock.n = 1, clock.m1 = 2;
905         target *= 5;    /* fast clock */
906
907         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
908                 for (clock.p2 = limit->p2.p2_fast;
909                                 clock.p2 >= limit->p2.p2_slow;
910                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
911                         unsigned int error_ppm;
912
913                         clock.p = clock.p1 * clock.p2;
914
915                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
916                                         clock.n) << 22, refclk * clock.m1);
917
918                         if (m2 > INT_MAX/clock.m1)
919                                 continue;
920
921                         clock.m2 = m2;
922
923                         chv_calc_dpll_params(refclk, &clock);
924
925                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
926                                 continue;
927
928                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
929                                                 best_error_ppm, &error_ppm))
930                                 continue;
931
932                         *best_clock = clock;
933                         best_error_ppm = error_ppm;
934                         found = true;
935                 }
936         }
937
938         return found;
939 }
940
941 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
942                         struct dpll *best_clock)
943 {
944         int refclk = 100000;
945         const struct intel_limit *limit = &intel_limits_bxt;
946
947         return chv_find_best_dpll(limit, crtc_state,
948                                   target_clock, refclk, NULL, best_clock);
949 }
950
951 bool intel_crtc_active(struct intel_crtc *crtc)
952 {
953         /* Be paranoid as we can arrive here with only partial
954          * state retrieved from the hardware during setup.
955          *
956          * We can ditch the adjusted_mode.crtc_clock check as soon
957          * as Haswell has gained clock readout/fastboot support.
958          *
959          * We can ditch the crtc->primary->fb check as soon as we can
960          * properly reconstruct framebuffers.
961          *
962          * FIXME: The intel_crtc->active here should be switched to
963          * crtc->state->active once we have proper CRTC states wired up
964          * for atomic.
965          */
966         return crtc->active && crtc->base.primary->state->fb &&
967                 crtc->config->base.adjusted_mode.crtc_clock;
968 }
969
970 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
971                                              enum pipe pipe)
972 {
973         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
974
975         return crtc->config->cpu_transcoder;
976 }
977
978 static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
979 {
980         i915_reg_t reg = PIPEDSL(pipe);
981         u32 line1, line2;
982         u32 line_mask;
983
984         if (IS_GEN2(dev_priv))
985                 line_mask = DSL_LINEMASK_GEN2;
986         else
987                 line_mask = DSL_LINEMASK_GEN3;
988
989         line1 = I915_READ(reg) & line_mask;
990         msleep(5);
991         line2 = I915_READ(reg) & line_mask;
992
993         return line1 == line2;
994 }
995
996 /*
997  * intel_wait_for_pipe_off - wait for pipe to turn off
998  * @crtc: crtc whose pipe to wait for
999  *
1000  * After disabling a pipe, we can't wait for vblank in the usual way,
1001  * spinning on the vblank interrupt status bit, since we won't actually
1002  * see an interrupt when the pipe is disabled.
1003  *
1004  * On Gen4 and above:
1005  *   wait for the pipe register state bit to turn off
1006  *
1007  * Otherwise:
1008  *   wait for the display line value to settle (it usually
1009  *   ends up stopping at the start of the next frame).
1010  *
1011  */
1012 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1013 {
1014         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1015         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1016         enum pipe pipe = crtc->pipe;
1017
1018         if (INTEL_GEN(dev_priv) >= 4) {
1019                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1020
1021                 /* Wait for the Pipe State to go off */
1022                 if (intel_wait_for_register(dev_priv,
1023                                             reg, I965_PIPECONF_ACTIVE, 0,
1024                                             100))
1025                         WARN(1, "pipe_off wait timed out\n");
1026         } else {
1027                 /* Wait for the display line to settle */
1028                 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
1029                         WARN(1, "pipe_off wait timed out\n");
1030         }
1031 }
1032
1033 /* Only for pre-ILK configs */
1034 void assert_pll(struct drm_i915_private *dev_priv,
1035                 enum pipe pipe, bool state)
1036 {
1037         u32 val;
1038         bool cur_state;
1039
1040         val = I915_READ(DPLL(pipe));
1041         cur_state = !!(val & DPLL_VCO_ENABLE);
1042         I915_STATE_WARN(cur_state != state,
1043              "PLL state assertion failure (expected %s, current %s)\n",
1044                         onoff(state), onoff(cur_state));
1045 }
1046
1047 /* XXX: the dsi pll is shared between MIPI DSI ports */
1048 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1049 {
1050         u32 val;
1051         bool cur_state;
1052
1053         mutex_lock(&dev_priv->sb_lock);
1054         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1055         mutex_unlock(&dev_priv->sb_lock);
1056
1057         cur_state = val & DSI_PLL_VCO_EN;
1058         I915_STATE_WARN(cur_state != state,
1059              "DSI PLL state assertion failure (expected %s, current %s)\n",
1060                         onoff(state), onoff(cur_state));
1061 }
1062
1063 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1064                           enum pipe pipe, bool state)
1065 {
1066         bool cur_state;
1067         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1068                                                                       pipe);
1069
1070         if (HAS_DDI(dev_priv)) {
1071                 /* DDI does not have a specific FDI_TX register */
1072                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1073                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1074         } else {
1075                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1076                 cur_state = !!(val & FDI_TX_ENABLE);
1077         }
1078         I915_STATE_WARN(cur_state != state,
1079              "FDI TX state assertion failure (expected %s, current %s)\n",
1080                         onoff(state), onoff(cur_state));
1081 }
1082 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1083 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1084
1085 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1086                           enum pipe pipe, bool state)
1087 {
1088         u32 val;
1089         bool cur_state;
1090
1091         val = I915_READ(FDI_RX_CTL(pipe));
1092         cur_state = !!(val & FDI_RX_ENABLE);
1093         I915_STATE_WARN(cur_state != state,
1094              "FDI RX state assertion failure (expected %s, current %s)\n",
1095                         onoff(state), onoff(cur_state));
1096 }
1097 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1098 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1099
1100 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1101                                       enum pipe pipe)
1102 {
1103         u32 val;
1104
1105         /* ILK FDI PLL is always enabled */
1106         if (IS_GEN5(dev_priv))
1107                 return;
1108
1109         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1110         if (HAS_DDI(dev_priv))
1111                 return;
1112
1113         val = I915_READ(FDI_TX_CTL(pipe));
1114         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1115 }
1116
1117 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1118                        enum pipe pipe, bool state)
1119 {
1120         u32 val;
1121         bool cur_state;
1122
1123         val = I915_READ(FDI_RX_CTL(pipe));
1124         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1125         I915_STATE_WARN(cur_state != state,
1126              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1127                         onoff(state), onoff(cur_state));
1128 }
1129
1130 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1131 {
1132         i915_reg_t pp_reg;
1133         u32 val;
1134         enum pipe panel_pipe = PIPE_A;
1135         bool locked = true;
1136
1137         if (WARN_ON(HAS_DDI(dev_priv)))
1138                 return;
1139
1140         if (HAS_PCH_SPLIT(dev_priv)) {
1141                 u32 port_sel;
1142
1143                 pp_reg = PP_CONTROL(0);
1144                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1145
1146                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1147                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1148                         panel_pipe = PIPE_B;
1149                 /* XXX: else fix for eDP */
1150         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1151                 /* presumably write lock depends on pipe, not port select */
1152                 pp_reg = PP_CONTROL(pipe);
1153                 panel_pipe = pipe;
1154         } else {
1155                 pp_reg = PP_CONTROL(0);
1156                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1157                         panel_pipe = PIPE_B;
1158         }
1159
1160         val = I915_READ(pp_reg);
1161         if (!(val & PANEL_POWER_ON) ||
1162             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1163                 locked = false;
1164
1165         I915_STATE_WARN(panel_pipe == pipe && locked,
1166              "panel assertion failure, pipe %c regs locked\n",
1167              pipe_name(pipe));
1168 }
1169
1170 static void assert_cursor(struct drm_i915_private *dev_priv,
1171                           enum pipe pipe, bool state)
1172 {
1173         bool cur_state;
1174
1175         if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1176                 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1177         else
1178                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1179
1180         I915_STATE_WARN(cur_state != state,
1181              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1182                         pipe_name(pipe), onoff(state), onoff(cur_state));
1183 }
1184 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1185 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1186
1187 void assert_pipe(struct drm_i915_private *dev_priv,
1188                  enum pipe pipe, bool state)
1189 {
1190         bool cur_state;
1191         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1192                                                                       pipe);
1193         enum intel_display_power_domain power_domain;
1194
1195         /* if we need the pipe quirk it must be always on */
1196         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1197             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1198                 state = true;
1199
1200         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1201         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1202                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1203                 cur_state = !!(val & PIPECONF_ENABLE);
1204
1205                 intel_display_power_put(dev_priv, power_domain);
1206         } else {
1207                 cur_state = false;
1208         }
1209
1210         I915_STATE_WARN(cur_state != state,
1211              "pipe %c assertion failure (expected %s, current %s)\n",
1212                         pipe_name(pipe), onoff(state), onoff(cur_state));
1213 }
1214
1215 static void assert_plane(struct drm_i915_private *dev_priv,
1216                          enum plane plane, bool state)
1217 {
1218         u32 val;
1219         bool cur_state;
1220
1221         val = I915_READ(DSPCNTR(plane));
1222         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1223         I915_STATE_WARN(cur_state != state,
1224              "plane %c assertion failure (expected %s, current %s)\n",
1225                         plane_name(plane), onoff(state), onoff(cur_state));
1226 }
1227
1228 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1229 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1230
1231 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1232                                    enum pipe pipe)
1233 {
1234         int i;
1235
1236         /* Primary planes are fixed to pipes on gen4+ */
1237         if (INTEL_GEN(dev_priv) >= 4) {
1238                 u32 val = I915_READ(DSPCNTR(pipe));
1239                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1240                      "plane %c assertion failure, should be disabled but not\n",
1241                      plane_name(pipe));
1242                 return;
1243         }
1244
1245         /* Need to check both planes against the pipe */
1246         for_each_pipe(dev_priv, i) {
1247                 u32 val = I915_READ(DSPCNTR(i));
1248                 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1249                         DISPPLANE_SEL_PIPE_SHIFT;
1250                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1251                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1252                      plane_name(i), pipe_name(pipe));
1253         }
1254 }
1255
1256 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1257                                     enum pipe pipe)
1258 {
1259         int sprite;
1260
1261         if (INTEL_GEN(dev_priv) >= 9) {
1262                 for_each_sprite(dev_priv, pipe, sprite) {
1263                         u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1264                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1265                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1266                              sprite, pipe_name(pipe));
1267                 }
1268         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1269                 for_each_sprite(dev_priv, pipe, sprite) {
1270                         u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
1271                         I915_STATE_WARN(val & SP_ENABLE,
1272                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1273                              sprite_name(pipe, sprite), pipe_name(pipe));
1274                 }
1275         } else if (INTEL_GEN(dev_priv) >= 7) {
1276                 u32 val = I915_READ(SPRCTL(pipe));
1277                 I915_STATE_WARN(val & SPRITE_ENABLE,
1278                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1279                      plane_name(pipe), pipe_name(pipe));
1280         } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
1281                 u32 val = I915_READ(DVSCNTR(pipe));
1282                 I915_STATE_WARN(val & DVS_ENABLE,
1283                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1284                      plane_name(pipe), pipe_name(pipe));
1285         }
1286 }
1287
1288 static void assert_vblank_disabled(struct drm_crtc *crtc)
1289 {
1290         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1291                 drm_crtc_vblank_put(crtc);
1292 }
1293
1294 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1295                                     enum pipe pipe)
1296 {
1297         u32 val;
1298         bool enabled;
1299
1300         val = I915_READ(PCH_TRANSCONF(pipe));
1301         enabled = !!(val & TRANS_ENABLE);
1302         I915_STATE_WARN(enabled,
1303              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1304              pipe_name(pipe));
1305 }
1306
1307 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1308                             enum pipe pipe, u32 port_sel, u32 val)
1309 {
1310         if ((val & DP_PORT_EN) == 0)
1311                 return false;
1312
1313         if (HAS_PCH_CPT(dev_priv)) {
1314                 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1315                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1316                         return false;
1317         } else if (IS_CHERRYVIEW(dev_priv)) {
1318                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1319                         return false;
1320         } else {
1321                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1322                         return false;
1323         }
1324         return true;
1325 }
1326
1327 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1328                               enum pipe pipe, u32 val)
1329 {
1330         if ((val & SDVO_ENABLE) == 0)
1331                 return false;
1332
1333         if (HAS_PCH_CPT(dev_priv)) {
1334                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1335                         return false;
1336         } else if (IS_CHERRYVIEW(dev_priv)) {
1337                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1338                         return false;
1339         } else {
1340                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1341                         return false;
1342         }
1343         return true;
1344 }
1345
1346 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1347                               enum pipe pipe, u32 val)
1348 {
1349         if ((val & LVDS_PORT_EN) == 0)
1350                 return false;
1351
1352         if (HAS_PCH_CPT(dev_priv)) {
1353                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1354                         return false;
1355         } else {
1356                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1357                         return false;
1358         }
1359         return true;
1360 }
1361
1362 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1363                               enum pipe pipe, u32 val)
1364 {
1365         if ((val & ADPA_DAC_ENABLE) == 0)
1366                 return false;
1367         if (HAS_PCH_CPT(dev_priv)) {
1368                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1369                         return false;
1370         } else {
1371                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1372                         return false;
1373         }
1374         return true;
1375 }
1376
1377 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1378                                    enum pipe pipe, i915_reg_t reg,
1379                                    u32 port_sel)
1380 {
1381         u32 val = I915_READ(reg);
1382         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1383              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1384              i915_mmio_reg_offset(reg), pipe_name(pipe));
1385
1386         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1387              && (val & DP_PIPEB_SELECT),
1388              "IBX PCH dp port still using transcoder B\n");
1389 }
1390
1391 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1392                                      enum pipe pipe, i915_reg_t reg)
1393 {
1394         u32 val = I915_READ(reg);
1395         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1396              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1397              i915_mmio_reg_offset(reg), pipe_name(pipe));
1398
1399         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1400              && (val & SDVO_PIPE_B_SELECT),
1401              "IBX PCH hdmi port still using transcoder B\n");
1402 }
1403
1404 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1405                                       enum pipe pipe)
1406 {
1407         u32 val;
1408
1409         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1410         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1411         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1412
1413         val = I915_READ(PCH_ADPA);
1414         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1415              "PCH VGA enabled on transcoder %c, should be disabled\n",
1416              pipe_name(pipe));
1417
1418         val = I915_READ(PCH_LVDS);
1419         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1420              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1421              pipe_name(pipe));
1422
1423         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1424         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1425         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1426 }
1427
1428 static void _vlv_enable_pll(struct intel_crtc *crtc,
1429                             const struct intel_crtc_state *pipe_config)
1430 {
1431         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1432         enum pipe pipe = crtc->pipe;
1433
1434         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1435         POSTING_READ(DPLL(pipe));
1436         udelay(150);
1437
1438         if (intel_wait_for_register(dev_priv,
1439                                     DPLL(pipe),
1440                                     DPLL_LOCK_VLV,
1441                                     DPLL_LOCK_VLV,
1442                                     1))
1443                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1444 }
1445
1446 static void vlv_enable_pll(struct intel_crtc *crtc,
1447                            const struct intel_crtc_state *pipe_config)
1448 {
1449         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1450         enum pipe pipe = crtc->pipe;
1451
1452         assert_pipe_disabled(dev_priv, pipe);
1453
1454         /* PLL is protected by panel, make sure we can write it */
1455         assert_panel_unlocked(dev_priv, pipe);
1456
1457         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1458                 _vlv_enable_pll(crtc, pipe_config);
1459
1460         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1461         POSTING_READ(DPLL_MD(pipe));
1462 }
1463
1464
1465 static void _chv_enable_pll(struct intel_crtc *crtc,
1466                             const struct intel_crtc_state *pipe_config)
1467 {
1468         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1469         enum pipe pipe = crtc->pipe;
1470         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1471         u32 tmp;
1472
1473         mutex_lock(&dev_priv->sb_lock);
1474
1475         /* Enable back the 10bit clock to display controller */
1476         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1477         tmp |= DPIO_DCLKP_EN;
1478         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1479
1480         mutex_unlock(&dev_priv->sb_lock);
1481
1482         /*
1483          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1484          */
1485         udelay(1);
1486
1487         /* Enable PLL */
1488         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1489
1490         /* Check PLL is locked */
1491         if (intel_wait_for_register(dev_priv,
1492                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1493                                     1))
1494                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1495 }
1496
1497 static void chv_enable_pll(struct intel_crtc *crtc,
1498                            const struct intel_crtc_state *pipe_config)
1499 {
1500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1501         enum pipe pipe = crtc->pipe;
1502
1503         assert_pipe_disabled(dev_priv, pipe);
1504
1505         /* PLL is protected by panel, make sure we can write it */
1506         assert_panel_unlocked(dev_priv, pipe);
1507
1508         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1509                 _chv_enable_pll(crtc, pipe_config);
1510
1511         if (pipe != PIPE_A) {
1512                 /*
1513                  * WaPixelRepeatModeFixForC0:chv
1514                  *
1515                  * DPLLCMD is AWOL. Use chicken bits to propagate
1516                  * the value from DPLLBMD to either pipe B or C.
1517                  */
1518                 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1519                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1520                 I915_WRITE(CBR4_VLV, 0);
1521                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1522
1523                 /*
1524                  * DPLLB VGA mode also seems to cause problems.
1525                  * We should always have it disabled.
1526                  */
1527                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1528         } else {
1529                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1530                 POSTING_READ(DPLL_MD(pipe));
1531         }
1532 }
1533
1534 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1535 {
1536         struct intel_crtc *crtc;
1537         int count = 0;
1538
1539         for_each_intel_crtc(&dev_priv->drm, crtc) {
1540                 count += crtc->base.state->active &&
1541                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1542         }
1543
1544         return count;
1545 }
1546
1547 static void i9xx_enable_pll(struct intel_crtc *crtc)
1548 {
1549         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1550         i915_reg_t reg = DPLL(crtc->pipe);
1551         u32 dpll = crtc->config->dpll_hw_state.dpll;
1552
1553         assert_pipe_disabled(dev_priv, crtc->pipe);
1554
1555         /* PLL is protected by panel, make sure we can write it */
1556         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1557                 assert_panel_unlocked(dev_priv, crtc->pipe);
1558
1559         /* Enable DVO 2x clock on both PLLs if necessary */
1560         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1561                 /*
1562                  * It appears to be important that we don't enable this
1563                  * for the current pipe before otherwise configuring the
1564                  * PLL. No idea how this should be handled if multiple
1565                  * DVO outputs are enabled simultaneosly.
1566                  */
1567                 dpll |= DPLL_DVO_2X_MODE;
1568                 I915_WRITE(DPLL(!crtc->pipe),
1569                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1570         }
1571
1572         /*
1573          * Apparently we need to have VGA mode enabled prior to changing
1574          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1575          * dividers, even though the register value does change.
1576          */
1577         I915_WRITE(reg, 0);
1578
1579         I915_WRITE(reg, dpll);
1580
1581         /* Wait for the clocks to stabilize. */
1582         POSTING_READ(reg);
1583         udelay(150);
1584
1585         if (INTEL_GEN(dev_priv) >= 4) {
1586                 I915_WRITE(DPLL_MD(crtc->pipe),
1587                            crtc->config->dpll_hw_state.dpll_md);
1588         } else {
1589                 /* The pixel multiplier can only be updated once the
1590                  * DPLL is enabled and the clocks are stable.
1591                  *
1592                  * So write it again.
1593                  */
1594                 I915_WRITE(reg, dpll);
1595         }
1596
1597         /* We do this three times for luck */
1598         I915_WRITE(reg, dpll);
1599         POSTING_READ(reg);
1600         udelay(150); /* wait for warmup */
1601         I915_WRITE(reg, dpll);
1602         POSTING_READ(reg);
1603         udelay(150); /* wait for warmup */
1604         I915_WRITE(reg, dpll);
1605         POSTING_READ(reg);
1606         udelay(150); /* wait for warmup */
1607 }
1608
1609 /**
1610  * i9xx_disable_pll - disable a PLL
1611  * @dev_priv: i915 private structure
1612  * @pipe: pipe PLL to disable
1613  *
1614  * Disable the PLL for @pipe, making sure the pipe is off first.
1615  *
1616  * Note!  This is for pre-ILK only.
1617  */
1618 static void i9xx_disable_pll(struct intel_crtc *crtc)
1619 {
1620         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1621         enum pipe pipe = crtc->pipe;
1622
1623         /* Disable DVO 2x clock on both PLLs if necessary */
1624         if (IS_I830(dev_priv) &&
1625             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1626             !intel_num_dvo_pipes(dev_priv)) {
1627                 I915_WRITE(DPLL(PIPE_B),
1628                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1629                 I915_WRITE(DPLL(PIPE_A),
1630                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1631         }
1632
1633         /* Don't disable pipe or pipe PLLs if needed */
1634         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1635             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1636                 return;
1637
1638         /* Make sure the pipe isn't still relying on us */
1639         assert_pipe_disabled(dev_priv, pipe);
1640
1641         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1642         POSTING_READ(DPLL(pipe));
1643 }
1644
1645 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1646 {
1647         u32 val;
1648
1649         /* Make sure the pipe isn't still relying on us */
1650         assert_pipe_disabled(dev_priv, pipe);
1651
1652         val = DPLL_INTEGRATED_REF_CLK_VLV |
1653                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1654         if (pipe != PIPE_A)
1655                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1656
1657         I915_WRITE(DPLL(pipe), val);
1658         POSTING_READ(DPLL(pipe));
1659 }
1660
1661 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1662 {
1663         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1664         u32 val;
1665
1666         /* Make sure the pipe isn't still relying on us */
1667         assert_pipe_disabled(dev_priv, pipe);
1668
1669         val = DPLL_SSC_REF_CLK_CHV |
1670                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1671         if (pipe != PIPE_A)
1672                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1673
1674         I915_WRITE(DPLL(pipe), val);
1675         POSTING_READ(DPLL(pipe));
1676
1677         mutex_lock(&dev_priv->sb_lock);
1678
1679         /* Disable 10bit clock to display controller */
1680         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1681         val &= ~DPIO_DCLKP_EN;
1682         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1683
1684         mutex_unlock(&dev_priv->sb_lock);
1685 }
1686
1687 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1688                          struct intel_digital_port *dport,
1689                          unsigned int expected_mask)
1690 {
1691         u32 port_mask;
1692         i915_reg_t dpll_reg;
1693
1694         switch (dport->port) {
1695         case PORT_B:
1696                 port_mask = DPLL_PORTB_READY_MASK;
1697                 dpll_reg = DPLL(0);
1698                 break;
1699         case PORT_C:
1700                 port_mask = DPLL_PORTC_READY_MASK;
1701                 dpll_reg = DPLL(0);
1702                 expected_mask <<= 4;
1703                 break;
1704         case PORT_D:
1705                 port_mask = DPLL_PORTD_READY_MASK;
1706                 dpll_reg = DPIO_PHY_STATUS;
1707                 break;
1708         default:
1709                 BUG();
1710         }
1711
1712         if (intel_wait_for_register(dev_priv,
1713                                     dpll_reg, port_mask, expected_mask,
1714                                     1000))
1715                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1716                      port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1717 }
1718
1719 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1720                                            enum pipe pipe)
1721 {
1722         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1723                                                                 pipe);
1724         i915_reg_t reg;
1725         uint32_t val, pipeconf_val;
1726
1727         /* Make sure PCH DPLL is enabled */
1728         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1729
1730         /* FDI must be feeding us bits for PCH ports */
1731         assert_fdi_tx_enabled(dev_priv, pipe);
1732         assert_fdi_rx_enabled(dev_priv, pipe);
1733
1734         if (HAS_PCH_CPT(dev_priv)) {
1735                 /* Workaround: Set the timing override bit before enabling the
1736                  * pch transcoder. */
1737                 reg = TRANS_CHICKEN2(pipe);
1738                 val = I915_READ(reg);
1739                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1740                 I915_WRITE(reg, val);
1741         }
1742
1743         reg = PCH_TRANSCONF(pipe);
1744         val = I915_READ(reg);
1745         pipeconf_val = I915_READ(PIPECONF(pipe));
1746
1747         if (HAS_PCH_IBX(dev_priv)) {
1748                 /*
1749                  * Make the BPC in transcoder be consistent with
1750                  * that in pipeconf reg. For HDMI we must use 8bpc
1751                  * here for both 8bpc and 12bpc.
1752                  */
1753                 val &= ~PIPECONF_BPC_MASK;
1754                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1755                         val |= PIPECONF_8BPC;
1756                 else
1757                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1758         }
1759
1760         val &= ~TRANS_INTERLACE_MASK;
1761         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1762                 if (HAS_PCH_IBX(dev_priv) &&
1763                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1764                         val |= TRANS_LEGACY_INTERLACED_ILK;
1765                 else
1766                         val |= TRANS_INTERLACED;
1767         else
1768                 val |= TRANS_PROGRESSIVE;
1769
1770         I915_WRITE(reg, val | TRANS_ENABLE);
1771         if (intel_wait_for_register(dev_priv,
1772                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1773                                     100))
1774                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1775 }
1776
1777 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1778                                       enum transcoder cpu_transcoder)
1779 {
1780         u32 val, pipeconf_val;
1781
1782         /* FDI must be feeding us bits for PCH ports */
1783         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1784         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1785
1786         /* Workaround: set timing override bit. */
1787         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1788         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1789         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1790
1791         val = TRANS_ENABLE;
1792         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1793
1794         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1795             PIPECONF_INTERLACED_ILK)
1796                 val |= TRANS_INTERLACED;
1797         else
1798                 val |= TRANS_PROGRESSIVE;
1799
1800         I915_WRITE(LPT_TRANSCONF, val);
1801         if (intel_wait_for_register(dev_priv,
1802                                     LPT_TRANSCONF,
1803                                     TRANS_STATE_ENABLE,
1804                                     TRANS_STATE_ENABLE,
1805                                     100))
1806                 DRM_ERROR("Failed to enable PCH transcoder\n");
1807 }
1808
1809 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1810                                             enum pipe pipe)
1811 {
1812         i915_reg_t reg;
1813         uint32_t val;
1814
1815         /* FDI relies on the transcoder */
1816         assert_fdi_tx_disabled(dev_priv, pipe);
1817         assert_fdi_rx_disabled(dev_priv, pipe);
1818
1819         /* Ports must be off as well */
1820         assert_pch_ports_disabled(dev_priv, pipe);
1821
1822         reg = PCH_TRANSCONF(pipe);
1823         val = I915_READ(reg);
1824         val &= ~TRANS_ENABLE;
1825         I915_WRITE(reg, val);
1826         /* wait for PCH transcoder off, transcoder state */
1827         if (intel_wait_for_register(dev_priv,
1828                                     reg, TRANS_STATE_ENABLE, 0,
1829                                     50))
1830                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1831
1832         if (HAS_PCH_CPT(dev_priv)) {
1833                 /* Workaround: Clear the timing override chicken bit again. */
1834                 reg = TRANS_CHICKEN2(pipe);
1835                 val = I915_READ(reg);
1836                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1837                 I915_WRITE(reg, val);
1838         }
1839 }
1840
1841 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1842 {
1843         u32 val;
1844
1845         val = I915_READ(LPT_TRANSCONF);
1846         val &= ~TRANS_ENABLE;
1847         I915_WRITE(LPT_TRANSCONF, val);
1848         /* wait for PCH transcoder off, transcoder state */
1849         if (intel_wait_for_register(dev_priv,
1850                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1851                                     50))
1852                 DRM_ERROR("Failed to disable PCH transcoder\n");
1853
1854         /* Workaround: clear timing override bit. */
1855         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1856         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1857         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1858 }
1859
1860 enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1861 {
1862         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1863
1864         WARN_ON(!crtc->config->has_pch_encoder);
1865
1866         if (HAS_PCH_LPT(dev_priv))
1867                 return TRANSCODER_A;
1868         else
1869                 return (enum transcoder) crtc->pipe;
1870 }
1871
1872 /**
1873  * intel_enable_pipe - enable a pipe, asserting requirements
1874  * @crtc: crtc responsible for the pipe
1875  *
1876  * Enable @crtc's pipe, making sure that various hardware specific requirements
1877  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1878  */
1879 static void intel_enable_pipe(struct intel_crtc *crtc)
1880 {
1881         struct drm_device *dev = crtc->base.dev;
1882         struct drm_i915_private *dev_priv = to_i915(dev);
1883         enum pipe pipe = crtc->pipe;
1884         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1885         i915_reg_t reg;
1886         u32 val;
1887
1888         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1889
1890         assert_planes_disabled(dev_priv, pipe);
1891         assert_cursor_disabled(dev_priv, pipe);
1892         assert_sprites_disabled(dev_priv, pipe);
1893
1894         /*
1895          * A pipe without a PLL won't actually be able to drive bits from
1896          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1897          * need the check.
1898          */
1899         if (HAS_GMCH_DISPLAY(dev_priv)) {
1900                 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
1901                         assert_dsi_pll_enabled(dev_priv);
1902                 else
1903                         assert_pll_enabled(dev_priv, pipe);
1904         } else {
1905                 if (crtc->config->has_pch_encoder) {
1906                         /* if driving the PCH, we need FDI enabled */
1907                         assert_fdi_rx_pll_enabled(dev_priv,
1908                                                   (enum pipe) intel_crtc_pch_transcoder(crtc));
1909                         assert_fdi_tx_pll_enabled(dev_priv,
1910                                                   (enum pipe) cpu_transcoder);
1911                 }
1912                 /* FIXME: assert CPU port conditions for SNB+ */
1913         }
1914
1915         reg = PIPECONF(cpu_transcoder);
1916         val = I915_READ(reg);
1917         if (val & PIPECONF_ENABLE) {
1918                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1919                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
1920                 return;
1921         }
1922
1923         I915_WRITE(reg, val | PIPECONF_ENABLE);
1924         POSTING_READ(reg);
1925
1926         /*
1927          * Until the pipe starts DSL will read as 0, which would cause
1928          * an apparent vblank timestamp jump, which messes up also the
1929          * frame count when it's derived from the timestamps. So let's
1930          * wait for the pipe to start properly before we call
1931          * drm_crtc_vblank_on()
1932          */
1933         if (dev->max_vblank_count == 0 &&
1934             wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
1935                 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
1936 }
1937
1938 /**
1939  * intel_disable_pipe - disable a pipe, asserting requirements
1940  * @crtc: crtc whose pipes is to be disabled
1941  *
1942  * Disable the pipe of @crtc, making sure that various hardware
1943  * specific requirements are met, if applicable, e.g. plane
1944  * disabled, panel fitter off, etc.
1945  *
1946  * Will wait until the pipe has shut down before returning.
1947  */
1948 static void intel_disable_pipe(struct intel_crtc *crtc)
1949 {
1950         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1951         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1952         enum pipe pipe = crtc->pipe;
1953         i915_reg_t reg;
1954         u32 val;
1955
1956         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1957
1958         /*
1959          * Make sure planes won't keep trying to pump pixels to us,
1960          * or we might hang the display.
1961          */
1962         assert_planes_disabled(dev_priv, pipe);
1963         assert_cursor_disabled(dev_priv, pipe);
1964         assert_sprites_disabled(dev_priv, pipe);
1965
1966         reg = PIPECONF(cpu_transcoder);
1967         val = I915_READ(reg);
1968         if ((val & PIPECONF_ENABLE) == 0)
1969                 return;
1970
1971         /*
1972          * Double wide has implications for planes
1973          * so best keep it disabled when not needed.
1974          */
1975         if (crtc->config->double_wide)
1976                 val &= ~PIPECONF_DOUBLE_WIDE;
1977
1978         /* Don't disable pipe or pipe PLLs if needed */
1979         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
1980             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1981                 val &= ~PIPECONF_ENABLE;
1982
1983         I915_WRITE(reg, val);
1984         if ((val & PIPECONF_ENABLE) == 0)
1985                 intel_wait_for_pipe_off(crtc);
1986 }
1987
1988 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1989 {
1990         return IS_GEN2(dev_priv) ? 2048 : 4096;
1991 }
1992
1993 static unsigned int
1994 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1995 {
1996         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1997         unsigned int cpp = fb->format->cpp[plane];
1998
1999         switch (fb->modifier) {
2000         case DRM_FORMAT_MOD_LINEAR:
2001                 return cpp;
2002         case I915_FORMAT_MOD_X_TILED:
2003                 if (IS_GEN2(dev_priv))
2004                         return 128;
2005                 else
2006                         return 512;
2007         case I915_FORMAT_MOD_Y_TILED:
2008                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2009                         return 128;
2010                 else
2011                         return 512;
2012         case I915_FORMAT_MOD_Yf_TILED:
2013                 switch (cpp) {
2014                 case 1:
2015                         return 64;
2016                 case 2:
2017                 case 4:
2018                         return 128;
2019                 case 8:
2020                 case 16:
2021                         return 256;
2022                 default:
2023                         MISSING_CASE(cpp);
2024                         return cpp;
2025                 }
2026                 break;
2027         default:
2028                 MISSING_CASE(fb->modifier);
2029                 return cpp;
2030         }
2031 }
2032
2033 static unsigned int
2034 intel_tile_height(const struct drm_framebuffer *fb, int plane)
2035 {
2036         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
2037                 return 1;
2038         else
2039                 return intel_tile_size(to_i915(fb->dev)) /
2040                         intel_tile_width_bytes(fb, plane);
2041 }
2042
2043 /* Return the tile dimensions in pixel units */
2044 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
2045                             unsigned int *tile_width,
2046                             unsigned int *tile_height)
2047 {
2048         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
2049         unsigned int cpp = fb->format->cpp[plane];
2050
2051         *tile_width = tile_width_bytes / cpp;
2052         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2053 }
2054
2055 unsigned int
2056 intel_fb_align_height(const struct drm_framebuffer *fb,
2057                       int plane, unsigned int height)
2058 {
2059         unsigned int tile_height = intel_tile_height(fb, plane);
2060
2061         return ALIGN(height, tile_height);
2062 }
2063
2064 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2065 {
2066         unsigned int size = 0;
2067         int i;
2068
2069         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2070                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2071
2072         return size;
2073 }
2074
2075 static void
2076 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2077                         const struct drm_framebuffer *fb,
2078                         unsigned int rotation)
2079 {
2080         view->type = I915_GGTT_VIEW_NORMAL;
2081         if (drm_rotation_90_or_270(rotation)) {
2082                 view->type = I915_GGTT_VIEW_ROTATED;
2083                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2084         }
2085 }
2086
2087 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2088 {
2089         if (INTEL_INFO(dev_priv)->gen >= 9)
2090                 return 256 * 1024;
2091         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2092                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2093                 return 128 * 1024;
2094         else if (INTEL_INFO(dev_priv)->gen >= 4)
2095                 return 4 * 1024;
2096         else
2097                 return 0;
2098 }
2099
2100 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2101                                          int plane)
2102 {
2103         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2104
2105         /* AUX_DIST needs only 4K alignment */
2106         if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
2107                 return 4096;
2108
2109         switch (fb->modifier) {
2110         case DRM_FORMAT_MOD_LINEAR:
2111                 return intel_linear_alignment(dev_priv);
2112         case I915_FORMAT_MOD_X_TILED:
2113                 if (INTEL_GEN(dev_priv) >= 9)
2114                         return 256 * 1024;
2115                 return 0;
2116         case I915_FORMAT_MOD_Y_TILED:
2117         case I915_FORMAT_MOD_Yf_TILED:
2118                 return 1 * 1024 * 1024;
2119         default:
2120                 MISSING_CASE(fb->modifier);
2121                 return 0;
2122         }
2123 }
2124
2125 struct i915_vma *
2126 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2127 {
2128         struct drm_device *dev = fb->dev;
2129         struct drm_i915_private *dev_priv = to_i915(dev);
2130         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2131         struct i915_ggtt_view view;
2132         struct i915_vma *vma;
2133         u32 alignment;
2134
2135         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2136
2137         alignment = intel_surf_alignment(fb, 0);
2138
2139         intel_fill_fb_ggtt_view(&view, fb, rotation);
2140
2141         /* Note that the w/a also requires 64 PTE of padding following the
2142          * bo. We currently fill all unused PTE with the shadow page and so
2143          * we should always have valid PTE following the scanout preventing
2144          * the VT-d warning.
2145          */
2146         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2147                 alignment = 256 * 1024;
2148
2149         /*
2150          * Global gtt pte registers are special registers which actually forward
2151          * writes to a chunk of system memory. Which means that there is no risk
2152          * that the register values disappear as soon as we call
2153          * intel_runtime_pm_put(), so it is correct to wrap only the
2154          * pin/unpin/fence and not more.
2155          */
2156         intel_runtime_pm_get(dev_priv);
2157
2158         vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
2159         if (IS_ERR(vma))
2160                 goto err;
2161
2162         if (i915_vma_is_map_and_fenceable(vma)) {
2163                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2164                  * fence, whereas 965+ only requires a fence if using
2165                  * framebuffer compression.  For simplicity, we always, when
2166                  * possible, install a fence as the cost is not that onerous.
2167                  *
2168                  * If we fail to fence the tiled scanout, then either the
2169                  * modeset will reject the change (which is highly unlikely as
2170                  * the affected systems, all but one, do not have unmappable
2171                  * space) or we will not be able to enable full powersaving
2172                  * techniques (also likely not to apply due to various limits
2173                  * FBC and the like impose on the size of the buffer, which
2174                  * presumably we violated anyway with this unmappable buffer).
2175                  * Anyway, it is presumably better to stumble onwards with
2176                  * something and try to run the system in a "less than optimal"
2177                  * mode that matches the user configuration.
2178                  */
2179                 if (i915_vma_get_fence(vma) == 0)
2180                         i915_vma_pin_fence(vma);
2181         }
2182
2183         i915_vma_get(vma);
2184 err:
2185         intel_runtime_pm_put(dev_priv);
2186         return vma;
2187 }
2188
2189 void intel_unpin_fb_vma(struct i915_vma *vma)
2190 {
2191         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2192
2193         i915_vma_unpin_fence(vma);
2194         i915_gem_object_unpin_from_display_plane(vma);
2195         i915_vma_put(vma);
2196 }
2197
2198 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2199                           unsigned int rotation)
2200 {
2201         if (drm_rotation_90_or_270(rotation))
2202                 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2203         else
2204                 return fb->pitches[plane];
2205 }
2206
2207 /*
2208  * Convert the x/y offsets into a linear offset.
2209  * Only valid with 0/180 degree rotation, which is fine since linear
2210  * offset is only used with linear buffers on pre-hsw and tiled buffers
2211  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2212  */
2213 u32 intel_fb_xy_to_linear(int x, int y,
2214                           const struct intel_plane_state *state,
2215                           int plane)
2216 {
2217         const struct drm_framebuffer *fb = state->base.fb;
2218         unsigned int cpp = fb->format->cpp[plane];
2219         unsigned int pitch = fb->pitches[plane];
2220
2221         return y * pitch + x * cpp;
2222 }
2223
2224 /*
2225  * Add the x/y offsets derived from fb->offsets[] to the user
2226  * specified plane src x/y offsets. The resulting x/y offsets
2227  * specify the start of scanout from the beginning of the gtt mapping.
2228  */
2229 void intel_add_fb_offsets(int *x, int *y,
2230                           const struct intel_plane_state *state,
2231                           int plane)
2232
2233 {
2234         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2235         unsigned int rotation = state->base.rotation;
2236
2237         if (drm_rotation_90_or_270(rotation)) {
2238                 *x += intel_fb->rotated[plane].x;
2239                 *y += intel_fb->rotated[plane].y;
2240         } else {
2241                 *x += intel_fb->normal[plane].x;
2242                 *y += intel_fb->normal[plane].y;
2243         }
2244 }
2245
2246 /*
2247  * Input tile dimensions and pitch must already be
2248  * rotated to match x and y, and in pixel units.
2249  */
2250 static u32 _intel_adjust_tile_offset(int *x, int *y,
2251                                      unsigned int tile_width,
2252                                      unsigned int tile_height,
2253                                      unsigned int tile_size,
2254                                      unsigned int pitch_tiles,
2255                                      u32 old_offset,
2256                                      u32 new_offset)
2257 {
2258         unsigned int pitch_pixels = pitch_tiles * tile_width;
2259         unsigned int tiles;
2260
2261         WARN_ON(old_offset & (tile_size - 1));
2262         WARN_ON(new_offset & (tile_size - 1));
2263         WARN_ON(new_offset > old_offset);
2264
2265         tiles = (old_offset - new_offset) / tile_size;
2266
2267         *y += tiles / pitch_tiles * tile_height;
2268         *x += tiles % pitch_tiles * tile_width;
2269
2270         /* minimize x in case it got needlessly big */
2271         *y += *x / pitch_pixels * tile_height;
2272         *x %= pitch_pixels;
2273
2274         return new_offset;
2275 }
2276
2277 /*
2278  * Adjust the tile offset by moving the difference into
2279  * the x/y offsets.
2280  */
2281 static u32 intel_adjust_tile_offset(int *x, int *y,
2282                                     const struct intel_plane_state *state, int plane,
2283                                     u32 old_offset, u32 new_offset)
2284 {
2285         const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
2286         const struct drm_framebuffer *fb = state->base.fb;
2287         unsigned int cpp = fb->format->cpp[plane];
2288         unsigned int rotation = state->base.rotation;
2289         unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2290
2291         WARN_ON(new_offset > old_offset);
2292
2293         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2294                 unsigned int tile_size, tile_width, tile_height;
2295                 unsigned int pitch_tiles;
2296
2297                 tile_size = intel_tile_size(dev_priv);
2298                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2299
2300                 if (drm_rotation_90_or_270(rotation)) {
2301                         pitch_tiles = pitch / tile_height;
2302                         swap(tile_width, tile_height);
2303                 } else {
2304                         pitch_tiles = pitch / (tile_width * cpp);
2305                 }
2306
2307                 _intel_adjust_tile_offset(x, y, tile_width, tile_height,
2308                                           tile_size, pitch_tiles,
2309                                           old_offset, new_offset);
2310         } else {
2311                 old_offset += *y * pitch + *x * cpp;
2312
2313                 *y = (old_offset - new_offset) / pitch;
2314                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2315         }
2316
2317         return new_offset;
2318 }
2319
2320 /*
2321  * Computes the linear offset to the base tile and adjusts
2322  * x, y. bytes per pixel is assumed to be a power-of-two.
2323  *
2324  * In the 90/270 rotated case, x and y are assumed
2325  * to be already rotated to match the rotated GTT view, and
2326  * pitch is the tile_height aligned framebuffer height.
2327  *
2328  * This function is used when computing the derived information
2329  * under intel_framebuffer, so using any of that information
2330  * here is not allowed. Anything under drm_framebuffer can be
2331  * used. This is why the user has to pass in the pitch since it
2332  * is specified in the rotated orientation.
2333  */
2334 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2335                                       int *x, int *y,
2336                                       const struct drm_framebuffer *fb, int plane,
2337                                       unsigned int pitch,
2338                                       unsigned int rotation,
2339                                       u32 alignment)
2340 {
2341         uint64_t fb_modifier = fb->modifier;
2342         unsigned int cpp = fb->format->cpp[plane];
2343         u32 offset, offset_aligned;
2344
2345         if (alignment)
2346                 alignment--;
2347
2348         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2349                 unsigned int tile_size, tile_width, tile_height;
2350                 unsigned int tile_rows, tiles, pitch_tiles;
2351
2352                 tile_size = intel_tile_size(dev_priv);
2353                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2354
2355                 if (drm_rotation_90_or_270(rotation)) {
2356                         pitch_tiles = pitch / tile_height;
2357                         swap(tile_width, tile_height);
2358                 } else {
2359                         pitch_tiles = pitch / (tile_width * cpp);
2360                 }
2361
2362                 tile_rows = *y / tile_height;
2363                 *y %= tile_height;
2364
2365                 tiles = *x / tile_width;
2366                 *x %= tile_width;
2367
2368                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2369                 offset_aligned = offset & ~alignment;
2370
2371                 _intel_adjust_tile_offset(x, y, tile_width, tile_height,
2372                                           tile_size, pitch_tiles,
2373                                           offset, offset_aligned);
2374         } else {
2375                 offset = *y * pitch + *x * cpp;
2376                 offset_aligned = offset & ~alignment;
2377
2378                 *y = (offset & alignment) / pitch;
2379                 *x = ((offset & alignment) - *y * pitch) / cpp;
2380         }
2381
2382         return offset_aligned;
2383 }
2384
2385 u32 intel_compute_tile_offset(int *x, int *y,
2386                               const struct intel_plane_state *state,
2387                               int plane)
2388 {
2389         const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
2390         const struct drm_framebuffer *fb = state->base.fb;
2391         unsigned int rotation = state->base.rotation;
2392         int pitch = intel_fb_pitch(fb, plane, rotation);
2393         u32 alignment = intel_surf_alignment(fb, plane);
2394
2395         return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2396                                           rotation, alignment);
2397 }
2398
2399 /* Convert the fb->offset[] linear offset into x/y offsets */
2400 static void intel_fb_offset_to_xy(int *x, int *y,
2401                                   const struct drm_framebuffer *fb, int plane)
2402 {
2403         unsigned int cpp = fb->format->cpp[plane];
2404         unsigned int pitch = fb->pitches[plane];
2405         u32 linear_offset = fb->offsets[plane];
2406
2407         *y = linear_offset / pitch;
2408         *x = linear_offset % pitch / cpp;
2409 }
2410
2411 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2412 {
2413         switch (fb_modifier) {
2414         case I915_FORMAT_MOD_X_TILED:
2415                 return I915_TILING_X;
2416         case I915_FORMAT_MOD_Y_TILED:
2417                 return I915_TILING_Y;
2418         default:
2419                 return I915_TILING_NONE;
2420         }
2421 }
2422
2423 static int
2424 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2425                    struct drm_framebuffer *fb)
2426 {
2427         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2428         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2429         u32 gtt_offset_rotated = 0;
2430         unsigned int max_size = 0;
2431         int i, num_planes = fb->format->num_planes;
2432         unsigned int tile_size = intel_tile_size(dev_priv);
2433
2434         for (i = 0; i < num_planes; i++) {
2435                 unsigned int width, height;
2436                 unsigned int cpp, size;
2437                 u32 offset;
2438                 int x, y;
2439
2440                 cpp = fb->format->cpp[i];
2441                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2442                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2443
2444                 intel_fb_offset_to_xy(&x, &y, fb, i);
2445
2446                 /*
2447                  * The fence (if used) is aligned to the start of the object
2448                  * so having the framebuffer wrap around across the edge of the
2449                  * fenced region doesn't really work. We have no API to configure
2450                  * the fence start offset within the object (nor could we probably
2451                  * on gen2/3). So it's just easier if we just require that the
2452                  * fb layout agrees with the fence layout. We already check that the
2453                  * fb stride matches the fence stride elsewhere.
2454                  */
2455                 if (i915_gem_object_is_tiled(intel_fb->obj) &&
2456                     (x + width) * cpp > fb->pitches[i]) {
2457                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2458                                       i, fb->offsets[i]);
2459                         return -EINVAL;
2460                 }
2461
2462                 /*
2463                  * First pixel of the framebuffer from
2464                  * the start of the normal gtt mapping.
2465                  */
2466                 intel_fb->normal[i].x = x;
2467                 intel_fb->normal[i].y = y;
2468
2469                 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
2470                                                     fb, i, fb->pitches[i],
2471                                                     DRM_ROTATE_0, tile_size);
2472                 offset /= tile_size;
2473
2474                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2475                         unsigned int tile_width, tile_height;
2476                         unsigned int pitch_tiles;
2477                         struct drm_rect r;
2478
2479                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2480
2481                         rot_info->plane[i].offset = offset;
2482                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2483                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2484                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2485
2486                         intel_fb->rotated[i].pitch =
2487                                 rot_info->plane[i].height * tile_height;
2488
2489                         /* how many tiles does this plane need */
2490                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2491                         /*
2492                          * If the plane isn't horizontally tile aligned,
2493                          * we need one more tile.
2494                          */
2495                         if (x != 0)
2496                                 size++;
2497
2498                         /* rotate the x/y offsets to match the GTT view */
2499                         r.x1 = x;
2500                         r.y1 = y;
2501                         r.x2 = x + width;
2502                         r.y2 = y + height;
2503                         drm_rect_rotate(&r,
2504                                         rot_info->plane[i].width * tile_width,
2505                                         rot_info->plane[i].height * tile_height,
2506                                         DRM_ROTATE_270);
2507                         x = r.x1;
2508                         y = r.y1;
2509
2510                         /* rotate the tile dimensions to match the GTT view */
2511                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2512                         swap(tile_width, tile_height);
2513
2514                         /*
2515                          * We only keep the x/y offsets, so push all of the
2516                          * gtt offset into the x/y offsets.
2517                          */
2518                         _intel_adjust_tile_offset(&x, &y,
2519                                                   tile_width, tile_height,
2520                                                   tile_size, pitch_tiles,
2521                                                   gtt_offset_rotated * tile_size, 0);
2522
2523                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2524
2525                         /*
2526                          * First pixel of the framebuffer from
2527                          * the start of the rotated gtt mapping.
2528                          */
2529                         intel_fb->rotated[i].x = x;
2530                         intel_fb->rotated[i].y = y;
2531                 } else {
2532                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2533                                             x * cpp, tile_size);
2534                 }
2535
2536                 /* how many tiles in total needed in the bo */
2537                 max_size = max(max_size, offset + size);
2538         }
2539
2540         if (max_size * tile_size > intel_fb->obj->base.size) {
2541                 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
2542                               max_size * tile_size, intel_fb->obj->base.size);
2543                 return -EINVAL;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static int i9xx_format_to_fourcc(int format)
2550 {
2551         switch (format) {
2552         case DISPPLANE_8BPP:
2553                 return DRM_FORMAT_C8;
2554         case DISPPLANE_BGRX555:
2555                 return DRM_FORMAT_XRGB1555;
2556         case DISPPLANE_BGRX565:
2557                 return DRM_FORMAT_RGB565;
2558         default:
2559         case DISPPLANE_BGRX888:
2560                 return DRM_FORMAT_XRGB8888;
2561         case DISPPLANE_RGBX888:
2562                 return DRM_FORMAT_XBGR8888;
2563         case DISPPLANE_BGRX101010:
2564                 return DRM_FORMAT_XRGB2101010;
2565         case DISPPLANE_RGBX101010:
2566                 return DRM_FORMAT_XBGR2101010;
2567         }
2568 }
2569
2570 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2571 {
2572         switch (format) {
2573         case PLANE_CTL_FORMAT_RGB_565:
2574                 return DRM_FORMAT_RGB565;
2575         default:
2576         case PLANE_CTL_FORMAT_XRGB_8888:
2577                 if (rgb_order) {
2578                         if (alpha)
2579                                 return DRM_FORMAT_ABGR8888;
2580                         else
2581                                 return DRM_FORMAT_XBGR8888;
2582                 } else {
2583                         if (alpha)
2584                                 return DRM_FORMAT_ARGB8888;
2585                         else
2586                                 return DRM_FORMAT_XRGB8888;
2587                 }
2588         case PLANE_CTL_FORMAT_XRGB_2101010:
2589                 if (rgb_order)
2590                         return DRM_FORMAT_XBGR2101010;
2591                 else
2592                         return DRM_FORMAT_XRGB2101010;
2593         }
2594 }
2595
2596 static bool
2597 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2598                               struct intel_initial_plane_config *plane_config)
2599 {
2600         struct drm_device *dev = crtc->base.dev;
2601         struct drm_i915_private *dev_priv = to_i915(dev);
2602         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2603         struct drm_i915_gem_object *obj = NULL;
2604         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2605         struct drm_framebuffer *fb = &plane_config->fb->base;
2606         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2607         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2608                                     PAGE_SIZE);
2609
2610         size_aligned -= base_aligned;
2611
2612         if (plane_config->size == 0)
2613                 return false;
2614
2615         /* If the FB is too big, just don't use it since fbdev is not very
2616          * important and we should probably use that space with FBC or other
2617          * features. */
2618         if (size_aligned * 2 > ggtt->stolen_usable_size)
2619                 return false;
2620
2621         mutex_lock(&dev->struct_mutex);
2622         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2623                                                              base_aligned,
2624                                                              base_aligned,
2625                                                              size_aligned);
2626         mutex_unlock(&dev->struct_mutex);
2627         if (!obj)
2628                 return false;
2629
2630         if (plane_config->tiling == I915_TILING_X)
2631                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2632
2633         mode_cmd.pixel_format = fb->format->format;
2634         mode_cmd.width = fb->width;
2635         mode_cmd.height = fb->height;
2636         mode_cmd.pitches[0] = fb->pitches[0];
2637         mode_cmd.modifier[0] = fb->modifier;
2638         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2639
2640         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2641                 DRM_DEBUG_KMS("intel fb init failed\n");
2642                 goto out_unref_obj;
2643         }
2644
2645
2646         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2647         return true;
2648
2649 out_unref_obj:
2650         i915_gem_object_put(obj);
2651         return false;
2652 }
2653
2654 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2655 static void
2656 update_state_fb(struct drm_plane *plane)
2657 {
2658         if (plane->fb == plane->state->fb)
2659                 return;
2660
2661         if (plane->state->fb)
2662                 drm_framebuffer_unreference(plane->state->fb);
2663         plane->state->fb = plane->fb;
2664         if (plane->state->fb)
2665                 drm_framebuffer_reference(plane->state->fb);
2666 }
2667
2668 static void
2669 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2670                         struct intel_plane_state *plane_state,
2671                         bool visible)
2672 {
2673         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2674
2675         plane_state->base.visible = visible;
2676
2677         /* FIXME pre-g4x don't work like this */
2678         if (visible) {
2679                 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
2680                 crtc_state->active_planes |= BIT(plane->id);
2681         } else {
2682                 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
2683                 crtc_state->active_planes &= ~BIT(plane->id);
2684         }
2685
2686         DRM_DEBUG_KMS("%s active planes 0x%x\n",
2687                       crtc_state->base.crtc->name,
2688                       crtc_state->active_planes);
2689 }
2690
2691 static void
2692 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2693                              struct intel_initial_plane_config *plane_config)
2694 {
2695         struct drm_device *dev = intel_crtc->base.dev;
2696         struct drm_i915_private *dev_priv = to_i915(dev);
2697         struct drm_crtc *c;
2698         struct drm_i915_gem_object *obj;
2699         struct drm_plane *primary = intel_crtc->base.primary;
2700         struct drm_plane_state *plane_state = primary->state;
2701         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2702         struct intel_plane *intel_plane = to_intel_plane(primary);
2703         struct intel_plane_state *intel_state =
2704                 to_intel_plane_state(plane_state);
2705         struct drm_framebuffer *fb;
2706
2707         if (!plane_config->fb)
2708                 return;
2709
2710         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2711                 fb = &plane_config->fb->base;
2712                 goto valid_fb;
2713         }
2714
2715         kfree(plane_config->fb);
2716
2717         /*
2718          * Failed to alloc the obj, check to see if we should share
2719          * an fb with another CRTC instead
2720          */
2721         for_each_crtc(dev, c) {
2722                 struct intel_plane_state *state;
2723
2724                 if (c == &intel_crtc->base)
2725                         continue;
2726
2727                 if (!to_intel_crtc(c)->active)
2728                         continue;
2729
2730                 state = to_intel_plane_state(c->primary->state);
2731                 if (!state->vma)
2732                         continue;
2733
2734                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2735                         fb = c->primary->fb;
2736                         drm_framebuffer_reference(fb);
2737                         goto valid_fb;
2738                 }
2739         }
2740
2741         /*
2742          * We've failed to reconstruct the BIOS FB.  Current display state
2743          * indicates that the primary plane is visible, but has a NULL FB,
2744          * which will lead to problems later if we don't fix it up.  The
2745          * simplest solution is to just disable the primary plane now and
2746          * pretend the BIOS never had it enabled.
2747          */
2748         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2749                                 to_intel_plane_state(plane_state),
2750                                 false);
2751         intel_pre_disable_primary_noatomic(&intel_crtc->base);
2752         trace_intel_disable_plane(primary, intel_crtc);
2753         intel_plane->disable_plane(intel_plane, intel_crtc);
2754
2755         return;
2756
2757 valid_fb:
2758         mutex_lock(&dev->struct_mutex);
2759         intel_state->vma =
2760                 intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
2761         mutex_unlock(&dev->struct_mutex);
2762         if (IS_ERR(intel_state->vma)) {
2763                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2764                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2765
2766                 intel_state->vma = NULL;
2767                 drm_framebuffer_unreference(fb);
2768                 return;
2769         }
2770
2771         plane_state->src_x = 0;
2772         plane_state->src_y = 0;
2773         plane_state->src_w = fb->width << 16;
2774         plane_state->src_h = fb->height << 16;
2775
2776         plane_state->crtc_x = 0;
2777         plane_state->crtc_y = 0;
2778         plane_state->crtc_w = fb->width;
2779         plane_state->crtc_h = fb->height;
2780
2781         intel_state->base.src = drm_plane_state_src(plane_state);
2782         intel_state->base.dst = drm_plane_state_dest(plane_state);
2783
2784         obj = intel_fb_obj(fb);
2785         if (i915_gem_object_is_tiled(obj))
2786                 dev_priv->preserve_bios_swizzle = true;
2787
2788         drm_framebuffer_reference(fb);
2789         primary->fb = primary->state->fb = fb;
2790         primary->crtc = primary->state->crtc = &intel_crtc->base;
2791
2792         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2793                                 to_intel_plane_state(plane_state),
2794                                 true);
2795
2796         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2797                   &obj->frontbuffer_bits);
2798 }
2799
2800 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2801                                unsigned int rotation)
2802 {
2803         int cpp = fb->format->cpp[plane];
2804
2805         switch (fb->modifier) {
2806         case DRM_FORMAT_MOD_LINEAR:
2807         case I915_FORMAT_MOD_X_TILED:
2808                 switch (cpp) {
2809                 case 8:
2810                         return 4096;
2811                 case 4:
2812                 case 2:
2813                 case 1:
2814                         return 8192;
2815                 default:
2816                         MISSING_CASE(cpp);
2817                         break;
2818                 }
2819                 break;
2820         case I915_FORMAT_MOD_Y_TILED:
2821         case I915_FORMAT_MOD_Yf_TILED:
2822                 switch (cpp) {
2823                 case 8:
2824                         return 2048;
2825                 case 4:
2826                         return 4096;
2827                 case 2:
2828                 case 1:
2829                         return 8192;
2830                 default:
2831                         MISSING_CASE(cpp);
2832                         break;
2833                 }
2834                 break;
2835         default:
2836                 MISSING_CASE(fb->modifier);
2837         }
2838
2839         return 2048;
2840 }
2841
2842 static int skl_check_main_surface(struct intel_plane_state *plane_state)
2843 {
2844         const struct drm_framebuffer *fb = plane_state->base.fb;
2845         unsigned int rotation = plane_state->base.rotation;
2846         int x = plane_state->base.src.x1 >> 16;
2847         int y = plane_state->base.src.y1 >> 16;
2848         int w = drm_rect_width(&plane_state->base.src) >> 16;
2849         int h = drm_rect_height(&plane_state->base.src) >> 16;
2850         int max_width = skl_max_plane_width(fb, 0, rotation);
2851         int max_height = 4096;
2852         u32 alignment, offset, aux_offset = plane_state->aux.offset;
2853
2854         if (w > max_width || h > max_height) {
2855                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
2856                               w, h, max_width, max_height);
2857                 return -EINVAL;
2858         }
2859
2860         intel_add_fb_offsets(&x, &y, plane_state, 0);
2861         offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
2862         alignment = intel_surf_alignment(fb, 0);
2863
2864         /*
2865          * AUX surface offset is specified as the distance from the
2866          * main surface offset, and it must be non-negative. Make
2867          * sure that is what we will get.
2868          */
2869         if (offset > aux_offset)
2870                 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
2871                                                   offset, aux_offset & ~(alignment - 1));
2872
2873         /*
2874          * When using an X-tiled surface, the plane blows up
2875          * if the x offset + width exceed the stride.
2876          *
2877          * TODO: linear and Y-tiled seem fine, Yf untested,
2878          */
2879         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
2880                 int cpp = fb->format->cpp[0];
2881
2882                 while ((x + w) * cpp > fb->pitches[0]) {
2883                         if (offset == 0) {
2884                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
2885                                 return -EINVAL;
2886                         }
2887
2888                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
2889                                                           offset, offset - alignment);
2890                 }
2891         }
2892
2893         plane_state->main.offset = offset;
2894         plane_state->main.x = x;
2895         plane_state->main.y = y;
2896
2897         return 0;
2898 }
2899
2900 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
2901 {
2902         const struct drm_framebuffer *fb = plane_state->base.fb;
2903         unsigned int rotation = plane_state->base.rotation;
2904         int max_width = skl_max_plane_width(fb, 1, rotation);
2905         int max_height = 4096;
2906         int x = plane_state->base.src.x1 >> 17;
2907         int y = plane_state->base.src.y1 >> 17;
2908         int w = drm_rect_width(&plane_state->base.src) >> 17;
2909         int h = drm_rect_height(&plane_state->base.src) >> 17;
2910         u32 offset;
2911
2912         intel_add_fb_offsets(&x, &y, plane_state, 1);
2913         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
2914
2915         /* FIXME not quite sure how/if these apply to the chroma plane */
2916         if (w > max_width || h > max_height) {
2917                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
2918                               w, h, max_width, max_height);
2919                 return -EINVAL;
2920         }
2921
2922         plane_state->aux.offset = offset;
2923         plane_state->aux.x = x;
2924         plane_state->aux.y = y;
2925
2926         return 0;
2927 }
2928
2929 int skl_check_plane_surface(struct intel_plane_state *plane_state)
2930 {
2931         const struct drm_framebuffer *fb = plane_state->base.fb;
2932         unsigned int rotation = plane_state->base.rotation;
2933         int ret;
2934
2935         if (!plane_state->base.visible)
2936                 return 0;
2937
2938         /* Rotate src coordinates to match rotated GTT view */
2939         if (drm_rotation_90_or_270(rotation))
2940                 drm_rect_rotate(&plane_state->base.src,
2941                                 fb->width << 16, fb->height << 16,
2942                                 DRM_ROTATE_270);
2943
2944         /*
2945          * Handle the AUX surface first since
2946          * the main surface setup depends on it.
2947          */
2948         if (fb->format->format == DRM_FORMAT_NV12) {
2949                 ret = skl_check_nv12_aux_surface(plane_state);
2950                 if (ret)
2951                         return ret;
2952         } else {
2953                 plane_state->aux.offset = ~0xfff;
2954                 plane_state->aux.x = 0;
2955                 plane_state->aux.y = 0;
2956         }
2957
2958         ret = skl_check_main_surface(plane_state);
2959         if (ret)
2960                 return ret;
2961
2962         return 0;
2963 }
2964
2965 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
2966                           const struct intel_plane_state *plane_state)
2967 {
2968         struct drm_i915_private *dev_priv =
2969                 to_i915(plane_state->base.plane->dev);
2970         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2971         const struct drm_framebuffer *fb = plane_state->base.fb;
2972         unsigned int rotation = plane_state->base.rotation;
2973         u32 dspcntr;
2974
2975         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
2976
2977         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
2978             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
2979                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2980
2981         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2982                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2983
2984         if (INTEL_GEN(dev_priv) < 4)
2985                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
2986
2987         switch (fb->format->format) {
2988         case DRM_FORMAT_C8:
2989                 dspcntr |= DISPPLANE_8BPP;
2990                 break;
2991         case DRM_FORMAT_XRGB1555:
2992                 dspcntr |= DISPPLANE_BGRX555;
2993                 break;
2994         case DRM_FORMAT_RGB565:
2995                 dspcntr |= DISPPLANE_BGRX565;
2996                 break;
2997         case DRM_FORMAT_XRGB8888:
2998                 dspcntr |= DISPPLANE_BGRX888;
2999                 break;
3000         case DRM_FORMAT_XBGR8888:
3001                 dspcntr |= DISPPLANE_RGBX888;
3002                 break;
3003         case DRM_FORMAT_XRGB2101010:
3004                 dspcntr |= DISPPLANE_BGRX101010;
3005                 break;
3006         case DRM_FORMAT_XBGR2101010:
3007                 dspcntr |= DISPPLANE_RGBX101010;
3008                 break;
3009         default:
3010                 MISSING_CASE(fb->format->format);
3011                 return 0;
3012         }
3013
3014         if (INTEL_GEN(dev_priv) >= 4 &&
3015             fb->modifier == I915_FORMAT_MOD_X_TILED)
3016                 dspcntr |= DISPPLANE_TILED;
3017
3018         if (rotation & DRM_ROTATE_180)
3019                 dspcntr |= DISPPLANE_ROTATE_180;
3020
3021         if (rotation & DRM_REFLECT_X)
3022                 dspcntr |= DISPPLANE_MIRROR;
3023
3024         return dspcntr;
3025 }
3026
3027 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3028 {
3029         struct drm_i915_private *dev_priv =
3030                 to_i915(plane_state->base.plane->dev);
3031         int src_x = plane_state->base.src.x1 >> 16;
3032         int src_y = plane_state->base.src.y1 >> 16;
3033         u32 offset;
3034
3035         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3036
3037         if (INTEL_GEN(dev_priv) >= 4)
3038                 offset = intel_compute_tile_offset(&src_x, &src_y,
3039                                                    plane_state, 0);
3040         else
3041                 offset = 0;
3042
3043         /* HSW/BDW do this automagically in hardware */
3044         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3045                 unsigned int rotation = plane_state->base.rotation;
3046                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3047                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3048
3049                 if (rotation & DRM_ROTATE_180) {
3050                         src_x += src_w - 1;
3051                         src_y += src_h - 1;
3052                 } else if (rotation & DRM_REFLECT_X) {
3053                         src_x += src_w - 1;
3054                 }
3055         }
3056
3057         plane_state->main.offset = offset;
3058         plane_state->main.x = src_x;
3059         plane_state->main.y = src_y;
3060
3061         return 0;
3062 }
3063
3064 static void i9xx_update_primary_plane(struct intel_plane *primary,
3065                                       const struct intel_crtc_state *crtc_state,
3066                                       const struct intel_plane_state *plane_state)
3067 {
3068         struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
3069         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3070         const struct drm_framebuffer *fb = plane_state->base.fb;
3071         enum plane plane = primary->plane;
3072         u32 linear_offset;
3073         u32 dspcntr = plane_state->ctl;
3074         i915_reg_t reg = DSPCNTR(plane);
3075         int x = plane_state->main.x;
3076         int y = plane_state->main.y;
3077         unsigned long irqflags;
3078
3079         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3080
3081         if (INTEL_GEN(dev_priv) >= 4)
3082                 crtc->dspaddr_offset = plane_state->main.offset;
3083         else
3084                 crtc->dspaddr_offset = linear_offset;
3085
3086         crtc->adjusted_x = x;
3087         crtc->adjusted_y = y;
3088
3089         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3090
3091         if (INTEL_GEN(dev_priv) < 4) {
3092                 /* pipesrc and dspsize control the size that is scaled from,
3093                  * which should always be the user's requested size.
3094                  */
3095                 I915_WRITE_FW(DSPSIZE(plane),
3096                               ((crtc_state->pipe_src_h - 1) << 16) |
3097                               (crtc_state->pipe_src_w - 1));
3098                 I915_WRITE_FW(DSPPOS(plane), 0);
3099         } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
3100                 I915_WRITE_FW(PRIMSIZE(plane),
3101                               ((crtc_state->pipe_src_h - 1) << 16) |
3102                               (crtc_state->pipe_src_w - 1));
3103                 I915_WRITE_FW(PRIMPOS(plane), 0);
3104                 I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
3105         }
3106
3107         I915_WRITE_FW(reg, dspcntr);
3108
3109         I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
3110         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3111                 I915_WRITE_FW(DSPSURF(plane),
3112                               intel_plane_ggtt_offset(plane_state) +
3113                               crtc->dspaddr_offset);
3114                 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
3115         } else if (INTEL_GEN(dev_priv) >= 4) {
3116                 I915_WRITE_FW(DSPSURF(plane),
3117                               intel_plane_ggtt_offset(plane_state) +
3118                               crtc->dspaddr_offset);
3119                 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
3120                 I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
3121         } else {
3122                 I915_WRITE_FW(DSPADDR(plane),
3123                               intel_plane_ggtt_offset(plane_state) +
3124                               crtc->dspaddr_offset);
3125         }
3126         POSTING_READ_FW(reg);
3127
3128         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3129 }
3130
3131 static void i9xx_disable_primary_plane(struct intel_plane *primary,
3132                                        struct intel_crtc *crtc)
3133 {
3134         struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
3135         enum plane plane = primary->plane;
3136         unsigned long irqflags;
3137
3138         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3139
3140         I915_WRITE_FW(DSPCNTR(plane), 0);
3141         if (INTEL_INFO(dev_priv)->gen >= 4)
3142                 I915_WRITE_FW(DSPSURF(plane), 0);
3143         else
3144                 I915_WRITE_FW(DSPADDR(plane), 0);
3145         POSTING_READ_FW(DSPCNTR(plane));
3146
3147         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3148 }
3149
3150 static u32
3151 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3152 {
3153         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3154                 return 64;
3155         else
3156                 return intel_tile_width_bytes(fb, plane);
3157 }
3158
3159 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3160 {
3161         struct drm_device *dev = intel_crtc->base.dev;
3162         struct drm_i915_private *dev_priv = to_i915(dev);
3163
3164         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3165         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3166         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3167 }
3168
3169 /*
3170  * This function detaches (aka. unbinds) unused scalers in hardware
3171  */
3172 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3173 {
3174         struct intel_crtc_scaler_state *scaler_state;
3175         int i;
3176
3177         scaler_state = &intel_crtc->config->scaler_state;
3178
3179         /* loop through and disable scalers that aren't in use */
3180         for (i = 0; i < intel_crtc->num_scalers; i++) {
3181                 if (!scaler_state->scalers[i].in_use)
3182                         skl_detach_scaler(intel_crtc, i);
3183         }
3184 }
3185
3186 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3187                      unsigned int rotation)
3188 {
3189         u32 stride;
3190
3191         if (plane >= fb->format->num_planes)
3192                 return 0;
3193
3194         stride = intel_fb_pitch(fb, plane, rotation);
3195
3196         /*
3197          * The stride is either expressed as a multiple of 64 bytes chunks for
3198          * linear buffers or in number of tiles for tiled buffers.
3199          */
3200         if (drm_rotation_90_or_270(rotation))
3201                 stride /= intel_tile_height(fb, plane);
3202         else
3203                 stride /= intel_fb_stride_alignment(fb, plane);
3204
3205         return stride;
3206 }
3207
3208 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3209 {
3210         switch (pixel_format) {
3211         case DRM_FORMAT_C8:
3212                 return PLANE_CTL_FORMAT_INDEXED;
3213         case DRM_FORMAT_RGB565:
3214                 return PLANE_CTL_FORMAT_RGB_565;
3215         case DRM_FORMAT_XBGR8888:
3216                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3217         case DRM_FORMAT_XRGB8888:
3218                 return PLANE_CTL_FORMAT_XRGB_8888;
3219         /*
3220          * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3221          * to be already pre-multiplied. We need to add a knob (or a different
3222          * DRM_FORMAT) for user-space to configure that.
3223          */
3224         case DRM_FORMAT_ABGR8888:
3225                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3226                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3227         case DRM_FORMAT_ARGB8888:
3228                 return PLANE_CTL_FORMAT_XRGB_8888 |
3229                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3230         case DRM_FORMAT_XRGB2101010:
3231                 return PLANE_CTL_FORMAT_XRGB_2101010;
3232         case DRM_FORMAT_XBGR2101010:
3233                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3234         case DRM_FORMAT_YUYV:
3235                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3236         case DRM_FORMAT_YVYU:
3237                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3238         case DRM_FORMAT_UYVY:
3239                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3240         case DRM_FORMAT_VYUY:
3241                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3242         default:
3243                 MISSING_CASE(pixel_format);
3244         }
3245
3246         return 0;
3247 }
3248
3249 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3250 {
3251         switch (fb_modifier) {
3252         case DRM_FORMAT_MOD_LINEAR:
3253                 break;
3254         case I915_FORMAT_MOD_X_TILED:
3255                 return PLANE_CTL_TILED_X;
3256         case I915_FORMAT_MOD_Y_TILED:
3257                 return PLANE_CTL_TILED_Y;
3258         case I915_FORMAT_MOD_Yf_TILED:
3259                 return PLANE_CTL_TILED_YF;
3260         default:
3261                 MISSING_CASE(fb_modifier);
3262         }
3263
3264         return 0;
3265 }
3266
3267 static u32 skl_plane_ctl_rotation(unsigned int rotation)
3268 {
3269         switch (rotation) {
3270         case DRM_ROTATE_0:
3271                 break;
3272         /*
3273          * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3274          * while i915 HW rotation is clockwise, thats why this swapping.
3275          */
3276         case DRM_ROTATE_90:
3277                 return PLANE_CTL_ROTATE_270;
3278         case DRM_ROTATE_180:
3279                 return PLANE_CTL_ROTATE_180;
3280         case DRM_ROTATE_270:
3281                 return PLANE_CTL_ROTATE_90;
3282         default:
3283                 MISSING_CASE(rotation);
3284         }
3285
3286         return 0;
3287 }
3288
3289 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3290                   const struct intel_plane_state *plane_state)
3291 {
3292         struct drm_i915_private *dev_priv =
3293                 to_i915(plane_state->base.plane->dev);
3294         const struct drm_framebuffer *fb = plane_state->base.fb;
3295         unsigned int rotation = plane_state->base.rotation;
3296         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3297         u32 plane_ctl;
3298
3299         plane_ctl = PLANE_CTL_ENABLE;
3300
3301         if (!IS_GEMINILAKE(dev_priv)) {
3302                 plane_ctl |=
3303                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3304                         PLANE_CTL_PIPE_CSC_ENABLE |
3305                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3306         }
3307
3308         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3309         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3310         plane_ctl |= skl_plane_ctl_rotation(rotation);
3311
3312         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3313                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3314         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3315                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3316
3317         return plane_ctl;
3318 }
3319
3320 static void skylake_update_primary_plane(struct intel_plane *plane,
3321                                          const struct intel_crtc_state *crtc_state,
3322                                          const struct intel_plane_state *plane_state)
3323 {
3324         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3325         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3326         const struct drm_framebuffer *fb = plane_state->base.fb;
3327         enum plane_id plane_id = plane->id;
3328         enum pipe pipe = plane->pipe;
3329         u32 plane_ctl = plane_state->ctl;
3330         unsigned int rotation = plane_state->base.rotation;
3331         u32 stride = skl_plane_stride(fb, 0, rotation);
3332         u32 surf_addr = plane_state->main.offset;
3333         int scaler_id = plane_state->scaler_id;
3334         int src_x = plane_state->main.x;
3335         int src_y = plane_state->main.y;
3336         int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3337         int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3338         int dst_x = plane_state->base.dst.x1;
3339         int dst_y = plane_state->base.dst.y1;
3340         int dst_w = drm_rect_width(&plane_state->base.dst);
3341         int dst_h = drm_rect_height(&plane_state->base.dst);
3342         unsigned long irqflags;
3343
3344         /* Sizes are 0 based */
3345         src_w--;
3346         src_h--;
3347         dst_w--;
3348         dst_h--;
3349
3350         crtc->dspaddr_offset = surf_addr;
3351
3352         crtc->adjusted_x = src_x;
3353         crtc->adjusted_y = src_y;
3354
3355         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3356
3357         if (IS_GEMINILAKE(dev_priv)) {
3358                 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
3359                               PLANE_COLOR_PIPE_GAMMA_ENABLE |
3360                               PLANE_COLOR_PIPE_CSC_ENABLE |
3361                               PLANE_COLOR_PLANE_GAMMA_DISABLE);
3362         }
3363
3364         I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
3365         I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
3366         I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
3367         I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
3368
3369         if (scaler_id >= 0) {
3370                 uint32_t ps_ctrl = 0;
3371
3372                 WARN_ON(!dst_w || !dst_h);
3373                 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
3374                         crtc_state->scaler_state.scalers[scaler_id].mode;
3375                 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3376                 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3377                 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3378                 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3379                 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
3380         } else {
3381                 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
3382         }
3383
3384         I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
3385                       intel_plane_ggtt_offset(plane_state) + surf_addr);
3386
3387         POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
3388
3389         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3390 }
3391
3392 static void skylake_disable_primary_plane(struct intel_plane *primary,
3393                                           struct intel_crtc *crtc)
3394 {
3395         struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
3396         enum plane_id plane_id = primary->id;
3397         enum pipe pipe = primary->pipe;
3398         unsigned long irqflags;
3399
3400         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3401
3402         I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
3403         I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
3404         POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
3405
3406         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3407 }
3408
3409 static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3410 {
3411         struct intel_crtc *crtc;
3412
3413         for_each_intel_crtc(&dev_priv->drm, crtc)
3414                 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3415 }
3416
3417 static void intel_update_primary_planes(struct drm_device *dev)
3418 {
3419         struct drm_crtc *crtc;
3420
3421         for_each_crtc(dev, crtc) {
3422                 struct intel_plane *plane = to_intel_plane(crtc->primary);
3423                 struct intel_plane_state *plane_state =
3424                         to_intel_plane_state(plane->base.state);
3425
3426                 if (plane_state->base.visible) {
3427                         trace_intel_update_plane(&plane->base,
3428                                                  to_intel_crtc(crtc));
3429
3430                         plane->update_plane(plane,
3431                                             to_intel_crtc_state(crtc->state),
3432                                             plane_state);
3433                 }
3434         }
3435 }
3436
3437 static int
3438 __intel_display_resume(struct drm_device *dev,
3439                        struct drm_atomic_state *state,
3440                        struct drm_modeset_acquire_ctx *ctx)
3441 {
3442         struct drm_crtc_state *crtc_state;
3443         struct drm_crtc *crtc;
3444         int i, ret;
3445
3446         intel_modeset_setup_hw_state(dev);
3447         i915_redisable_vga(to_i915(dev));
3448
3449         if (!state)
3450                 return 0;
3451
3452         /*
3453          * We've duplicated the state, pointers to the old state are invalid.
3454          *
3455          * Don't attempt to use the old state until we commit the duplicated state.
3456          */
3457         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3458                 /*
3459                  * Force recalculation even if we restore
3460                  * current state. With fast modeset this may not result
3461                  * in a modeset when the state is compatible.
3462                  */
3463                 crtc_state->mode_changed = true;
3464         }
3465
3466         /* ignore any reset values/BIOS leftovers in the WM registers */
3467         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3468                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3469
3470         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3471
3472         WARN_ON(ret == -EDEADLK);
3473         return ret;
3474 }
3475
3476 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3477 {
3478         return intel_has_gpu_reset(dev_priv) &&
3479                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3480 }
3481
3482 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3483 {
3484         struct drm_device *dev = &dev_priv->drm;
3485         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3486         struct drm_atomic_state *state;
3487         int ret;
3488
3489         /*
3490          * Need mode_config.mutex so that we don't
3491          * trample ongoing ->detect() and whatnot.
3492          */
3493         mutex_lock(&dev->mode_config.mutex);
3494         drm_modeset_acquire_init(ctx, 0);
3495         while (1) {
3496                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3497                 if (ret != -EDEADLK)
3498                         break;
3499
3500                 drm_modeset_backoff(ctx);
3501         }
3502
3503         /* reset doesn't touch the display, but flips might get nuked anyway, */
3504         if (!i915.force_reset_modeset_test &&
3505             !gpu_reset_clobbers_display(dev_priv))
3506                 return;
3507
3508         /*
3509          * Disabling the crtcs gracefully seems nicer. Also the
3510          * g33 docs say we should at least disable all the planes.
3511          */
3512         state = drm_atomic_helper_duplicate_state(dev, ctx);
3513         if (IS_ERR(state)) {
3514                 ret = PTR_ERR(state);
3515                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3516                 return;
3517         }
3518
3519         ret = drm_atomic_helper_disable_all(dev, ctx);
3520         if (ret) {
3521                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3522                 drm_atomic_state_put(state);
3523                 return;
3524         }
3525
3526         dev_priv->modeset_restore_state = state;
3527         state->acquire_ctx = ctx;
3528 }
3529
3530 void intel_finish_reset(struct drm_i915_private *dev_priv)
3531 {
3532         struct drm_device *dev = &dev_priv->drm;
3533         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3534         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3535         int ret;
3536
3537         /*
3538          * Flips in the rings will be nuked by the reset,
3539          * so complete all pending flips so that user space
3540          * will get its events and not get stuck.
3541          */
3542         intel_complete_page_flips(dev_priv);
3543
3544         dev_priv->modeset_restore_state = NULL;
3545
3546         /* reset doesn't touch the display */
3547         if (!gpu_reset_clobbers_display(dev_priv)) {
3548                 if (!state) {
3549                         /*
3550                          * Flips in the rings have been nuked by the reset,
3551                          * so update the base address of all primary
3552                          * planes to the the last fb to make sure we're
3553                          * showing the correct fb after a reset.
3554                          *
3555                          * FIXME: Atomic will make this obsolete since we won't schedule
3556                          * CS-based flips (which might get lost in gpu resets) any more.
3557                          */
3558                         intel_update_primary_planes(dev);
3559                 } else {
3560                         ret = __intel_display_resume(dev, state, ctx);
3561                         if (ret)
3562                                 DRM_ERROR("Restoring old state failed with %i\n", ret);
3563                 }
3564         } else {
3565                 /*
3566                  * The display has been reset as well,
3567                  * so need a full re-initialization.
3568                  */
3569                 intel_runtime_pm_disable_interrupts(dev_priv);
3570                 intel_runtime_pm_enable_interrupts(dev_priv);
3571
3572                 intel_pps_unlock_regs_wa(dev_priv);
3573                 intel_modeset_init_hw(dev);
3574
3575                 spin_lock_irq(&dev_priv->irq_lock);
3576                 if (dev_priv->display.hpd_irq_setup)
3577                         dev_priv->display.hpd_irq_setup(dev_priv);
3578                 spin_unlock_irq(&dev_priv->irq_lock);
3579
3580                 ret = __intel_display_resume(dev, state, ctx);
3581                 if (ret)
3582                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3583
3584                 intel_hpd_init(dev_priv);
3585         }
3586
3587         if (state)
3588                 drm_atomic_state_put(state);
3589         drm_modeset_drop_locks(ctx);
3590         drm_modeset_acquire_fini(ctx);
3591         mutex_unlock(&dev->mode_config.mutex);
3592 }
3593
3594 static bool abort_flip_on_reset(struct intel_crtc *crtc)
3595 {
3596         struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
3597
3598         if (i915_reset_backoff(error))
3599                 return true;
3600
3601         if (crtc->reset_count != i915_reset_count(error))
3602                 return true;
3603
3604         return false;
3605 }
3606
3607 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3608 {
3609         struct drm_device *dev = crtc->dev;
3610         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3611         bool pending;
3612
3613         if (abort_flip_on_reset(intel_crtc))
3614                 return false;
3615
3616         spin_lock_irq(&dev->event_lock);
3617         pending = to_intel_crtc(crtc)->flip_work != NULL;
3618         spin_unlock_irq(&dev->event_lock);
3619
3620         return pending;
3621 }
3622
3623 static void intel_update_pipe_config(struct intel_crtc *crtc,
3624                                      struct intel_crtc_state *old_crtc_state)
3625 {
3626         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3627         struct intel_crtc_state *pipe_config =
3628                 to_intel_crtc_state(crtc->base.state);
3629
3630         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3631         crtc->base.mode = crtc->base.state->mode;
3632
3633         /*
3634          * Update pipe size and adjust fitter if needed: the reason for this is
3635          * that in compute_mode_changes we check the native mode (not the pfit
3636          * mode) to see if we can flip rather than do a full mode set. In the
3637          * fastboot case, we'll flip, but if we don't update the pipesrc and
3638          * pfit state, we'll end up with a big fb scanned out into the wrong
3639          * sized surface.
3640          */
3641
3642         I915_WRITE(PIPESRC(crtc->pipe),
3643                    ((pipe_config->pipe_src_w - 1) << 16) |
3644                    (pipe_config->pipe_src_h - 1));
3645
3646         /* on skylake this is done by detaching scalers */
3647         if (INTEL_GEN(dev_priv) >= 9) {
3648                 skl_detach_scalers(crtc);
3649
3650                 if (pipe_config->pch_pfit.enabled)
3651                         skylake_pfit_enable(crtc);
3652         } else if (HAS_PCH_SPLIT(dev_priv)) {
3653                 if (pipe_config->pch_pfit.enabled)
3654                         ironlake_pfit_enable(crtc);
3655                 else if (old_crtc_state->pch_pfit.enabled)
3656                         ironlake_pfit_disable(crtc, true);
3657         }
3658 }
3659
3660 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3661 {
3662         struct drm_device *dev = crtc->base.dev;
3663         struct drm_i915_private *dev_priv = to_i915(dev);
3664         int pipe = crtc->pipe;
3665         i915_reg_t reg;
3666         u32 temp;
3667
3668         /* enable normal train */
3669         reg = FDI_TX_CTL(pipe);
3670         temp = I915_READ(reg);
3671         if (IS_IVYBRIDGE(dev_priv)) {
3672                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3673                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3674         } else {
3675                 temp &= ~FDI_LINK_TRAIN_NONE;
3676                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3677         }
3678         I915_WRITE(reg, temp);
3679
3680         reg = FDI_RX_CTL(pipe);
3681         temp = I915_READ(reg);
3682         if (HAS_PCH_CPT(dev_priv)) {
3683                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3684                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3685         } else {
3686                 temp &= ~FDI_LINK_TRAIN_NONE;
3687                 temp |= FDI_LINK_TRAIN_NONE;
3688         }
3689         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3690
3691         /* wait one idle pattern time */
3692         POSTING_READ(reg);
3693         udelay(1000);
3694
3695         /* IVB wants error correction enabled */
3696         if (IS_IVYBRIDGE(dev_priv))
3697                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3698                            FDI_FE_ERRC_ENABLE);
3699 }
3700
3701 /* The FDI link training functions for ILK/Ibexpeak. */
3702 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3703                                     const struct intel_crtc_state *crtc_state)
3704 {
3705         struct drm_device *dev = crtc->base.dev;
3706         struct drm_i915_private *dev_priv = to_i915(dev);
3707         int pipe = crtc->pipe;
3708         i915_reg_t reg;
3709         u32 temp, tries;
3710
3711         /* FDI needs bits from pipe first */
3712         assert_pipe_enabled(dev_priv, pipe);
3713
3714         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3715            for train result */
3716         reg = FDI_RX_IMR(pipe);
3717         temp = I915_READ(reg);
3718         temp &= ~FDI_RX_SYMBOL_LOCK;
3719         temp &= ~FDI_RX_BIT_LOCK;
3720         I915_WRITE(reg, temp);
3721         I915_READ(reg);
3722         udelay(150);
3723
3724         /* enable CPU FDI TX and PCH FDI RX */
3725         reg = FDI_TX_CTL(pipe);
3726         temp = I915_READ(reg);
3727         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3728         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3729         temp &= ~FDI_LINK_TRAIN_NONE;
3730         temp |= FDI_LINK_TRAIN_PATTERN_1;
3731         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3732
3733         reg = FDI_RX_CTL(pipe);
3734         temp = I915_READ(reg);
3735         temp &= ~FDI_LINK_TRAIN_NONE;
3736         temp |= FDI_LINK_TRAIN_PATTERN_1;
3737         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3738
3739         POSTING_READ(reg);
3740         udelay(150);
3741
3742         /* Ironlake workaround, enable clock pointer after FDI enable*/
3743         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3744         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3745                    FDI_RX_PHASE_SYNC_POINTER_EN);
3746
3747         reg = FDI_RX_IIR(pipe);
3748         for (tries = 0; tries < 5; tries++) {
3749                 temp = I915_READ(reg);
3750                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3751
3752                 if ((temp & FDI_RX_BIT_LOCK)) {
3753                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3754                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3755                         break;
3756                 }
3757         }
3758         if (tries == 5)
3759                 DRM_ERROR("FDI train 1 fail!\n");
3760
3761         /* Train 2 */
3762         reg = FDI_TX_CTL(pipe);
3763         temp = I915_READ(reg);
3764         temp &= ~FDI_LINK_TRAIN_NONE;
3765         temp |= FDI_LINK_TRAIN_PATTERN_2;
3766         I915_WRITE(reg, temp);
3767
3768         reg = FDI_RX_CTL(pipe);
3769         temp = I915_READ(reg);
3770         temp &= ~FDI_LINK_TRAIN_NONE;
3771         temp |= FDI_LINK_TRAIN_PATTERN_2;
3772         I915_WRITE(reg, temp);
3773
3774         POSTING_READ(reg);
3775         udelay(150);
3776
3777         reg = FDI_RX_IIR(pipe);
3778         for (tries = 0; tries < 5; tries++) {
3779                 temp = I915_READ(reg);
3780                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3781
3782                 if (temp & FDI_RX_SYMBOL_LOCK) {
3783                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3784                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3785                         break;
3786                 }
3787         }
3788         if (tries == 5)
3789                 DRM_ERROR("FDI train 2 fail!\n");
3790
3791         DRM_DEBUG_KMS("FDI train done\n");
3792
3793 }
3794
3795 static const int snb_b_fdi_train_param[] = {
3796         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3797         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3798         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3799         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3800 };
3801
3802 /* The FDI link training functions for SNB/Cougarpoint. */
3803 static void gen6_fdi_link_train(struct intel_crtc *crtc,
3804                                 const struct intel_crtc_state *crtc_state)
3805 {
3806         struct drm_device *dev = crtc->base.dev;
3807         struct drm_i915_private *dev_priv = to_i915(dev);
3808         int pipe = crtc->pipe;
3809         i915_reg_t reg;
3810         u32 temp, i, retry;
3811
3812         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3813            for train result */
3814         reg = FDI_RX_IMR(pipe);
3815         temp = I915_READ(reg);
3816         temp &= ~FDI_RX_SYMBOL_LOCK;
3817         temp &= ~FDI_RX_BIT_LOCK;
3818         I915_WRITE(reg, temp);
3819
3820         POSTING_READ(reg);
3821         udelay(150);
3822
3823         /* enable CPU FDI TX and PCH FDI RX */
3824         reg = FDI_TX_CTL(pipe);
3825         temp = I915_READ(reg);
3826         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3827         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3828         temp &= ~FDI_LINK_TRAIN_NONE;
3829         temp |= FDI_LINK_TRAIN_PATTERN_1;
3830         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3831         /* SNB-B */
3832         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3833         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3834
3835         I915_WRITE(FDI_RX_MISC(pipe),
3836                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3837
3838         reg = FDI_RX_CTL(pipe);
3839         temp = I915_READ(reg);
3840         if (HAS_PCH_CPT(dev_priv)) {
3841                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3842                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3843         } else {
3844                 temp &= ~FDI_LINK_TRAIN_NONE;
3845                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3846         }
3847         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3848
3849         POSTING_READ(reg);
3850         udelay(150);
3851
3852         for (i = 0; i < 4; i++) {
3853                 reg = FDI_TX_CTL(pipe);
3854                 temp = I915_READ(reg);
3855                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3856                 temp |= snb_b_fdi_train_param[i];
3857                 I915_WRITE(reg, temp);
3858
3859                 POSTING_READ(reg);
3860                 udelay(500);
3861
3862                 for (retry = 0; retry < 5; retry++) {
3863                         reg = FDI_RX_IIR(pipe);
3864                         temp = I915_READ(reg);
3865                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3866                         if (temp & FDI_RX_BIT_LOCK) {
3867                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3868                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3869                                 break;
3870                         }
3871                         udelay(50);
3872                 }
3873                 if (retry < 5)
3874                         break;
3875         }
3876         if (i == 4)
3877                 DRM_ERROR("FDI train 1 fail!\n");
3878
3879         /* Train 2 */
3880         reg = FDI_TX_CTL(pipe);
3881         temp = I915_READ(reg);
3882         temp &= ~FDI_LINK_TRAIN_NONE;
3883         temp |= FDI_LINK_TRAIN_PATTERN_2;
3884         if (IS_GEN6(dev_priv)) {
3885                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3886                 /* SNB-B */
3887                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3888         }
3889         I915_WRITE(reg, temp);
3890
3891         reg = FDI_RX_CTL(pipe);
3892         temp = I915_READ(reg);
3893         if (HAS_PCH_CPT(dev_priv)) {
3894                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3895                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3896         } else {
3897                 temp &= ~FDI_LINK_TRAIN_NONE;
3898                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3899         }
3900         I915_WRITE(reg, temp);
3901
3902         POSTING_READ(reg);
3903         udelay(150);
3904
3905         for (i = 0; i < 4; i++) {
3906                 reg = FDI_TX_CTL(pipe);
3907                 temp = I915_READ(reg);
3908                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3909                 temp |= snb_b_fdi_train_param[i];
3910                 I915_WRITE(reg, temp);
3911
3912                 POSTING_READ(reg);
3913                 udelay(500);
3914
3915                 for (retry = 0; retry < 5; retry++) {
3916                         reg = FDI_RX_IIR(pipe);
3917                         temp = I915_READ(reg);
3918                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3919                         if (temp & FDI_RX_SYMBOL_LOCK) {
3920                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3921                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3922                                 break;
3923                         }
3924                         udelay(50);
3925                 }
3926                 if (retry < 5)
3927                         break;
3928         }
3929         if (i == 4)
3930                 DRM_ERROR("FDI train 2 fail!\n");
3931
3932         DRM_DEBUG_KMS("FDI train done.\n");
3933 }
3934
3935 /* Manual link training for Ivy Bridge A0 parts */
3936 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
3937                                       const struct intel_crtc_state *crtc_state)
3938 {
3939         struct drm_device *dev = crtc->base.dev;
3940         struct drm_i915_private *dev_priv = to_i915(dev);
3941         int pipe = crtc->pipe;
3942         i915_reg_t reg;
3943         u32 temp, i, j;
3944
3945         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3946            for train result */
3947         reg = FDI_RX_IMR(pipe);
3948         temp = I915_READ(reg);
3949         temp &= ~FDI_RX_SYMBOL_LOCK;
3950         temp &= ~FDI_RX_BIT_LOCK;
3951         I915_WRITE(reg, temp);
3952
3953         POSTING_READ(reg);
3954         udelay(150);
3955
3956         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3957                       I915_READ(FDI_RX_IIR(pipe)));
3958
3959         /* Try each vswing and preemphasis setting twice before moving on */
3960         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3961                 /* disable first in case we need to retry */
3962                 reg = FDI_TX_CTL(pipe);
3963                 temp = I915_READ(reg);
3964                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3965                 temp &= ~FDI_TX_ENABLE;
3966                 I915_WRITE(reg, temp);
3967
3968                 reg = FDI_RX_CTL(pipe);
3969                 temp = I915_READ(reg);
3970                 temp &= ~FDI_LINK_TRAIN_AUTO;
3971                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3972                 temp &= ~FDI_RX_ENABLE;
3973                 I915_WRITE(reg, temp);
3974
3975                 /* enable CPU FDI TX and PCH FDI RX */
3976                 reg = FDI_TX_CTL(pipe);
3977                 temp = I915_READ(reg);
3978                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3979                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3980                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3981                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3982                 temp |= snb_b_fdi_train_param[j/2];
3983                 temp |= FDI_COMPOSITE_SYNC;
3984                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3985
3986                 I915_WRITE(FDI_RX_MISC(pipe),
3987                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3988
3989                 reg = FDI_RX_CTL(pipe);
3990                 temp = I915_READ(reg);
3991                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3992                 temp |= FDI_COMPOSITE_SYNC;
3993                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3994
3995                 POSTING_READ(reg);
3996                 udelay(1); /* should be 0.5us */
3997
3998                 for (i = 0; i < 4; i++) {
3999                         reg = FDI_RX_IIR(pipe);
4000                         temp = I915_READ(reg);
4001                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4002
4003                         if (temp & FDI_RX_BIT_LOCK ||
4004                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4005                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4006                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4007                                               i);
4008                                 break;
4009                         }
4010                         udelay(1); /* should be 0.5us */
4011                 }
4012                 if (i == 4) {
4013                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4014                         continue;
4015                 }
4016
4017                 /* Train 2 */
4018                 reg = FDI_TX_CTL(pipe);
4019                 temp = I915_READ(reg);
4020                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4021                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4022                 I915_WRITE(reg, temp);
4023
4024                 reg = FDI_RX_CTL(pipe);
4025                 temp = I915_READ(reg);
4026                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4027                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4028                 I915_WRITE(reg, temp);
4029
4030                 POSTING_READ(reg);
4031                 udelay(2); /* should be 1.5us */
4032
4033                 for (i = 0; i < 4; i++) {
4034                         reg = FDI_RX_IIR(pipe);
4035                         temp = I915_READ(reg);
4036                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4037
4038                         if (temp & FDI_RX_SYMBOL_LOCK ||
4039                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4040                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4041                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4042                                               i);
4043                                 goto train_done;
4044                         }
4045                         udelay(2); /* should be 1.5us */
4046                 }
4047                 if (i == 4)
4048                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4049         }
4050
4051 train_done:
4052         DRM_DEBUG_KMS("FDI train done.\n");
4053 }
4054
4055 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4056 {
4057         struct drm_device *dev = intel_crtc->base.dev;
4058         struct drm_i915_private *dev_priv = to_i915(dev);
4059         int pipe = intel_crtc->pipe;
4060         i915_reg_t reg;
4061         u32 temp;
4062
4063         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4064         reg = FDI_RX_CTL(pipe);
4065         temp = I915_READ(reg);
4066         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4067         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4068         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4069         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4070
4071         POSTING_READ(reg);
4072         udelay(200);
4073
4074         /* Switch from Rawclk to PCDclk */
4075         temp = I915_READ(reg);
4076         I915_WRITE(reg, temp | FDI_PCDCLK);
4077
4078         POSTING_READ(reg);
4079         udelay(200);
4080
4081         /* Enable CPU FDI TX PLL, always on for Ironlake */
4082         reg = FDI_TX_CTL(pipe);
4083         temp = I915_READ(reg);
4084         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4085                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4086
4087                 POSTING_READ(reg);
4088                 udelay(100);
4089         }
4090 }
4091
4092 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4093 {
4094         struct drm_device *dev = intel_crtc->base.dev;
4095         struct drm_i915_private *dev_priv = to_i915(dev);
4096         int pipe = intel_crtc->pipe;
4097         i915_reg_t reg;
4098         u32 temp;
4099
4100         /* Switch from PCDclk to Rawclk */
4101         reg = FDI_RX_CTL(pipe);
4102         temp = I915_READ(reg);
4103         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4104
4105         /* Disable CPU FDI TX PLL */
4106         reg = FDI_TX_CTL(pipe);
4107         temp = I915_READ(reg);
4108         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4109
4110         POSTING_READ(reg);
4111         udelay(100);
4112
4113         reg = FDI_RX_CTL(pipe);
4114         temp = I915_READ(reg);
4115         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4116
4117         /* Wait for the clocks to turn off. */
4118         POSTING_READ(reg);
4119         udelay(100);
4120 }
4121
4122 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4123 {
4124         struct drm_device *dev = crtc->dev;
4125         struct drm_i915_private *dev_priv = to_i915(dev);
4126         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4127         int pipe = intel_crtc->pipe;
4128         i915_reg_t reg;
4129         u32 temp;
4130
4131         /* disable CPU FDI tx and PCH FDI rx */
4132         reg = FDI_TX_CTL(pipe);
4133         temp = I915_READ(reg);
4134         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4135         POSTING_READ(reg);
4136
4137         reg = FDI_RX_CTL(pipe);
4138         temp = I915_READ(reg);
4139         temp &= ~(0x7 << 16);
4140         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4141         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4142
4143         POSTING_READ(reg);
4144         udelay(100);
4145
4146         /* Ironlake workaround, disable clock pointer after downing FDI */
4147         if (HAS_PCH_IBX(dev_priv))
4148                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4149
4150         /* still set train pattern 1 */
4151         reg = FDI_TX_CTL(pipe);
4152         temp = I915_READ(reg);
4153         temp &= ~FDI_LINK_TRAIN_NONE;
4154         temp |= FDI_LINK_TRAIN_PATTERN_1;
4155         I915_WRITE(reg, temp);
4156
4157         reg = FDI_RX_CTL(pipe);
4158         temp = I915_READ(reg);
4159         if (HAS_PCH_CPT(dev_priv)) {
4160                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4161                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4162         } else {
4163                 temp &= ~FDI_LINK_TRAIN_NONE;
4164                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4165         }
4166         /* BPC in FDI rx is consistent with that in PIPECONF */
4167         temp &= ~(0x07 << 16);
4168         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4169         I915_WRITE(reg, temp);
4170
4171         POSTING_READ(reg);
4172         udelay(100);
4173 }
4174
4175 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4176 {
4177         struct intel_crtc *crtc;
4178
4179         /* Note that we don't need to be called with mode_config.lock here
4180          * as our list of CRTC objects is static for the lifetime of the
4181          * device and so cannot disappear as we iterate. Similarly, we can
4182          * happily treat the predicates as racy, atomic checks as userspace
4183          * cannot claim and pin a new fb without at least acquring the
4184          * struct_mutex and so serialising with us.
4185          */
4186         for_each_intel_crtc(&dev_priv->drm, crtc) {
4187                 if (atomic_read(&crtc->unpin_work_count) == 0)
4188                         continue;
4189
4190                 if (crtc->flip_work)
4191                         intel_wait_for_vblank(dev_priv, crtc->pipe);
4192
4193                 return true;
4194         }
4195
4196         return false;
4197 }
4198
4199 static void page_flip_completed(struct intel_crtc *intel_crtc)
4200 {
4201         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4202         struct intel_flip_work *work = intel_crtc->flip_work;
4203
4204         intel_crtc->flip_work = NULL;
4205
4206         if (work->event)
4207                 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
4208
4209         drm_crtc_vblank_put(&intel_crtc->base);
4210
4211         wake_up_all(&dev_priv->pending_flip_queue);
4212         trace_i915_flip_complete(intel_crtc->plane,
4213                                  work->pending_flip_obj);
4214
4215         queue_work(dev_priv->wq, &work->unpin_work);
4216 }
4217
4218 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
4219 {
4220         struct drm_device *dev = crtc->dev;
4221         struct drm_i915_private *dev_priv = to_i915(dev);
4222         long ret;
4223
4224         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
4225
4226         ret = wait_event_interruptible_timeout(
4227                                         dev_priv->pending_flip_queue,
4228                                         !intel_crtc_has_pending_flip(crtc),
4229                                         60*HZ);
4230
4231         if (ret < 0)
4232                 return ret;
4233
4234         if (ret == 0) {
4235                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4236                 struct intel_flip_work *work;
4237
4238                 spin_lock_irq(&dev->event_lock);
4239                 work = intel_crtc->flip_work;
4240                 if (work && !is_mmio_work(work)) {
4241                         WARN_ONCE(1, "Removing stuck page flip\n");
4242                         page_flip_completed(intel_crtc);
4243                 }
4244                 spin_unlock_irq(&dev->event_lock);
4245         }
4246
4247         return 0;
4248 }
4249
4250 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4251 {
4252         u32 temp;
4253
4254         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4255
4256         mutex_lock(&dev_priv->sb_lock);
4257
4258         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4259         temp |= SBI_SSCCTL_DISABLE;
4260         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4261
4262         mutex_unlock(&dev_priv->sb_lock);
4263 }
4264
4265 /* Program iCLKIP clock to the desired frequency */
4266 static void lpt_program_iclkip(struct intel_crtc *crtc)
4267 {
4268         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4269         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4270         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4271         u32 temp;
4272
4273         lpt_disable_iclkip(dev_priv);
4274
4275         /* The iCLK virtual clock root frequency is in MHz,
4276          * but the adjusted_mode->crtc_clock in in KHz. To get the
4277          * divisors, it is necessary to divide one by another, so we
4278          * convert the virtual clock precision to KHz here for higher
4279          * precision.
4280          */
4281         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4282                 u32 iclk_virtual_root_freq = 172800 * 1000;
4283                 u32 iclk_pi_range = 64;
4284                 u32 desired_divisor;
4285
4286                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4287                                                     clock << auxdiv);
4288                 divsel = (desired_divisor / iclk_pi_range) - 2;
4289                 phaseinc = desired_divisor % iclk_pi_range;
4290
4291                 /*
4292                  * Near 20MHz is a corner case which is
4293                  * out of range for the 7-bit divisor
4294                  */
4295                 if (divsel <= 0x7f)
4296                         break;
4297         }
4298
4299         /* This should not happen with any sane values */
4300         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4301                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4302         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4303                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4304
4305         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4306                         clock,
4307                         auxdiv,
4308                         divsel,
4309                         phasedir,
4310                         phaseinc);
4311
4312         mutex_lock(&dev_priv->sb_lock);
4313
4314         /* Program SSCDIVINTPHASE6 */
4315         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4316         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4317         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4318         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4319         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4320         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4321         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4322         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4323
4324         /* Program SSCAUXDIV */
4325         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4326         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4327         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4328         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4329
4330         /* Enable modulator and associated divider */
4331         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4332         temp &= ~SBI_SSCCTL_DISABLE;
4333         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4334
4335         mutex_unlock(&dev_priv->sb_lock);
4336
4337         /* Wait for initialization time */
4338         udelay(24);
4339
4340         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4341 }
4342
4343 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4344 {
4345         u32 divsel, phaseinc, auxdiv;
4346         u32 iclk_virtual_root_freq = 172800 * 1000;
4347         u32 iclk_pi_range = 64;
4348         u32 desired_divisor;
4349         u32 temp;
4350
4351         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4352                 return 0;
4353
4354         mutex_lock(&dev_priv->sb_lock);
4355
4356         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4357         if (temp & SBI_SSCCTL_DISABLE) {
4358                 mutex_unlock(&dev_priv->sb_lock);
4359                 return 0;
4360         }
4361
4362         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4363         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4364                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4365         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4366                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4367
4368         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4369         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4370                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4371
4372         mutex_unlock(&dev_priv->sb_lock);
4373
4374         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4375
4376         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4377                                  desired_divisor << auxdiv);
4378 }
4379
4380 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4381                                                 enum pipe pch_transcoder)
4382 {
4383         struct drm_device *dev = crtc->base.dev;
4384         struct drm_i915_private *dev_priv = to_i915(dev);
4385         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4386
4387         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4388                    I915_READ(HTOTAL(cpu_transcoder)));
4389         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4390                    I915_READ(HBLANK(cpu_transcoder)));
4391         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4392                    I915_READ(HSYNC(cpu_transcoder)));
4393
4394         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4395                    I915_READ(VTOTAL(cpu_transcoder)));
4396         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4397                    I915_READ(VBLANK(cpu_transcoder)));
4398         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4399                    I915_READ(VSYNC(cpu_transcoder)));
4400         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4401                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4402 }
4403
4404 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4405 {
4406         struct drm_i915_private *dev_priv = to_i915(dev);
4407         uint32_t temp;
4408
4409         temp = I915_READ(SOUTH_CHICKEN1);
4410         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4411                 return;
4412
4413         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4414         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4415
4416         temp &= ~FDI_BC_BIFURCATION_SELECT;
4417         if (enable)
4418                 temp |= FDI_BC_BIFURCATION_SELECT;
4419
4420         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4421         I915_WRITE(SOUTH_CHICKEN1, temp);
4422         POSTING_READ(SOUTH_CHICKEN1);
4423 }
4424
4425 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4426 {
4427         struct drm_device *dev = intel_crtc->base.dev;
4428
4429         switch (intel_crtc->pipe) {
4430         case PIPE_A:
4431                 break;
4432         case PIPE_B:
4433                 if (intel_crtc->config->fdi_lanes > 2)
4434                         cpt_set_fdi_bc_bifurcation(dev, false);
4435                 else
4436                         cpt_set_fdi_bc_bifurcation(dev, true);
4437
4438                 break;
4439         case PIPE_C:
4440                 cpt_set_fdi_bc_bifurcation(dev, true);
4441
4442                 break;
4443         default:
4444                 BUG();
4445         }
4446 }
4447
4448 /* Return which DP Port should be selected for Transcoder DP control */
4449 static enum port
4450 intel_trans_dp_port_sel(struct intel_crtc *crtc)
4451 {
4452         struct drm_device *dev = crtc->base.dev;
4453         struct intel_encoder *encoder;
4454
4455         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
4456                 if (encoder->type == INTEL_OUTPUT_DP ||
4457                     encoder->type == INTEL_OUTPUT_EDP)
4458                         return enc_to_dig_port(&encoder->base)->port;
4459         }
4460
4461         return -1;
4462 }
4463
4464 /*
4465  * Enable PCH resources required for PCH ports:
4466  *   - PCH PLLs
4467  *   - FDI training & RX/TX
4468  *   - update transcoder timings
4469  *   - DP transcoding bits
4470  *   - transcoder
4471  */
4472 static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
4473 {
4474         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4475         struct drm_device *dev = crtc->base.dev;
4476         struct drm_i915_private *dev_priv = to_i915(dev);
4477         int pipe = crtc->pipe;
4478         u32 temp;
4479
4480         assert_pch_transcoder_disabled(dev_priv, pipe);
4481
4482         if (IS_IVYBRIDGE(dev_priv))
4483                 ivybridge_update_fdi_bc_bifurcation(crtc);
4484
4485         /* Write the TU size bits before fdi link training, so that error
4486          * detection works. */
4487         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4488                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4489
4490         /* For PCH output, training FDI link */
4491         dev_priv->display.fdi_link_train(crtc, crtc_state);
4492
4493         /* We need to program the right clock selection before writing the pixel
4494          * mutliplier into the DPLL. */
4495         if (HAS_PCH_CPT(dev_priv)) {
4496                 u32 sel;
4497
4498                 temp = I915_READ(PCH_DPLL_SEL);
4499                 temp |= TRANS_DPLL_ENABLE(pipe);
4500                 sel = TRANS_DPLLB_SEL(pipe);
4501                 if (crtc_state->shared_dpll ==
4502                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4503                         temp |= sel;
4504                 else
4505                         temp &= ~sel;
4506                 I915_WRITE(PCH_DPLL_SEL, temp);
4507         }
4508
4509         /* XXX: pch pll's can be enabled any time before we enable the PCH
4510          * transcoder, and we actually should do this to not upset any PCH
4511          * transcoder that already use the clock when we share it.
4512          *
4513          * Note that enable_shared_dpll tries to do the right thing, but
4514          * get_shared_dpll unconditionally resets the pll - we need that to have
4515          * the right LVDS enable sequence. */
4516         intel_enable_shared_dpll(crtc);
4517
4518         /* set transcoder timing, panel must allow it */
4519         assert_panel_unlocked(dev_priv, pipe);
4520         ironlake_pch_transcoder_set_timings(crtc, pipe);
4521
4522         intel_fdi_normal_train(crtc);
4523
4524         /* For PCH DP, enable TRANS_DP_CTL */
4525         if (HAS_PCH_CPT(dev_priv) &&
4526             intel_crtc_has_dp_encoder(crtc_state)) {
4527                 const struct drm_display_mode *adjusted_mode =
4528                         &crtc_state->base.adjusted_mode;
4529                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4530                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4531                 temp = I915_READ(reg);
4532                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4533                           TRANS_DP_SYNC_MASK |
4534                           TRANS_DP_BPC_MASK);
4535                 temp |= TRANS_DP_OUTPUT_ENABLE;
4536                 temp |= bpc << 9; /* same format but at 11:9 */
4537
4538                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4539                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4540                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4541                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4542
4543                 switch (intel_trans_dp_port_sel(crtc)) {
4544                 case PORT_B:
4545                         temp |= TRANS_DP_PORT_SEL_B;
4546                         break;
4547                 case PORT_C:
4548                         temp |= TRANS_DP_PORT_SEL_C;
4549                         break;
4550                 case PORT_D:
4551                         temp |= TRANS_DP_PORT_SEL_D;
4552                         break;
4553                 default:
4554                         BUG();
4555                 }
4556
4557                 I915_WRITE(reg, temp);
4558         }
4559
4560         ironlake_enable_pch_transcoder(dev_priv, pipe);
4561 }
4562
4563 static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
4564 {
4565         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4566         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4567         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4568
4569         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4570
4571         lpt_program_iclkip(crtc);
4572
4573         /* Set transcoder timing. */
4574         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4575
4576         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4577 }
4578
4579 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4580 {
4581         struct drm_i915_private *dev_priv = to_i915(dev);
4582         i915_reg_t dslreg = PIPEDSL(pipe);
4583         u32 temp;
4584
4585         temp = I915_READ(dslreg);
4586         udelay(500);
4587         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4588                 if (wait_for(I915_READ(dslreg) != temp, 5))
4589                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4590         }
4591 }
4592
4593 static int
4594 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4595                   unsigned scaler_user, int *scaler_id, unsigned int rotation,
4596                   int src_w, int src_h, int dst_w, int dst_h)
4597 {
4598         struct intel_crtc_scaler_state *scaler_state =
4599                 &crtc_state->scaler_state;
4600         struct intel_crtc *intel_crtc =
4601                 to_intel_crtc(crtc_state->base.crtc);
4602         int need_scaling;
4603
4604         need_scaling = drm_rotation_90_or_270(rotation) ?
4605                 (src_h != dst_w || src_w != dst_h):
4606                 (src_w != dst_w || src_h != dst_h);
4607
4608         /*
4609          * if plane is being disabled or scaler is no more required or force detach
4610          *  - free scaler binded to this plane/crtc
4611          *  - in order to do this, update crtc->scaler_usage
4612          *
4613          * Here scaler state in crtc_state is set free so that
4614          * scaler can be assigned to other user. Actual register
4615          * update to free the scaler is done in plane/panel-fit programming.
4616          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4617          */
4618         if (force_detach || !need_scaling) {
4619                 if (*scaler_id >= 0) {
4620                         scaler_state->scaler_users &= ~(1 << scaler_user);
4621                         scaler_state->scalers[*scaler_id].in_use = 0;
4622
4623                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4624                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4625                                 intel_crtc->pipe, scaler_user, *scaler_id,
4626                                 scaler_state->scaler_users);
4627                         *scaler_id = -1;
4628                 }
4629                 return 0;
4630         }
4631
4632         /* range checks */
4633         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4634                 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4635
4636                 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4637                 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4638                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4639                         "size is out of scaler range\n",
4640                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4641                 return -EINVAL;
4642         }
4643
4644         /* mark this plane as a scaler user in crtc_state */
4645         scaler_state->scaler_users |= (1 << scaler_user);
4646         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4647                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4648                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4649                 scaler_state->scaler_users);
4650
4651         return 0;
4652 }
4653
4654 /**
4655  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4656  *
4657  * @state: crtc's scaler state
4658  *
4659  * Return
4660  *     0 - scaler_usage updated successfully
4661  *    error - requested scaling cannot be supported or other error condition
4662  */
4663 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4664 {
4665         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4666
4667         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4668                 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4669                 state->pipe_src_w, state->pipe_src_h,
4670                 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4671 }
4672
4673 /**
4674  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4675  *
4676  * @state: crtc's scaler state
4677  * @plane_state: atomic plane state to update
4678  *
4679  * Return
4680  *     0 - scaler_usage updated successfully
4681  *    error - requested scaling cannot be supported or other error condition
4682  */
4683 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4684                                    struct intel_plane_state *plane_state)
4685 {
4686
4687         struct intel_plane *intel_plane =
4688                 to_intel_plane(plane_state->base.plane);
4689         struct drm_framebuffer *fb = plane_state->base.fb;
4690         int ret;
4691
4692         bool force_detach = !fb || !plane_state->base.visible;
4693
4694         ret = skl_update_scaler(crtc_state, force_detach,
4695                                 drm_plane_index(&intel_plane->base),
4696                                 &plane_state->scaler_id,
4697                                 plane_state->base.rotation,
4698                                 drm_rect_width(&plane_state->base.src) >> 16,
4699                                 drm_rect_height(&plane_state->base.src) >> 16,
4700                                 drm_rect_width(&plane_state->base.dst),
4701                                 drm_rect_height(&plane_state->base.dst));
4702
4703         if (ret || plane_state->scaler_id < 0)
4704                 return ret;
4705
4706         /* check colorkey */
4707         if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4708                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4709                               intel_plane->base.base.id,
4710                               intel_plane->base.name);
4711                 return -EINVAL;
4712         }
4713
4714         /* Check src format */
4715         switch (fb->format->format) {
4716         case DRM_FORMAT_RGB565:
4717         case DRM_FORMAT_XBGR8888:
4718         case DRM_FORMAT_XRGB8888:
4719         case DRM_FORMAT_ABGR8888:
4720         case DRM_FORMAT_ARGB8888:
4721         case DRM_FORMAT_XRGB2101010:
4722         case DRM_FORMAT_XBGR2101010:
4723         case DRM_FORMAT_YUYV:
4724         case DRM_FORMAT_YVYU:
4725         case DRM_FORMAT_UYVY:
4726         case DRM_FORMAT_VYUY:
4727                 break;
4728         default:
4729                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4730                               intel_plane->base.base.id, intel_plane->base.name,
4731                               fb->base.id, fb->format->format);
4732                 return -EINVAL;
4733         }
4734
4735         return 0;
4736 }
4737
4738 static void skylake_scaler_disable(struct intel_crtc *crtc)
4739 {
4740         int i;
4741
4742         for (i = 0; i < crtc->num_scalers; i++)
4743                 skl_detach_scaler(crtc, i);
4744 }
4745
4746 static void skylake_pfit_enable(struct intel_crtc *crtc)
4747 {
4748         struct drm_device *dev = crtc->base.dev;
4749         struct drm_i915_private *dev_priv = to_i915(dev);
4750         int pipe = crtc->pipe;
4751         struct intel_crtc_scaler_state *scaler_state =
4752                 &crtc->config->scaler_state;
4753
4754         if (crtc->config->pch_pfit.enabled) {
4755                 int id;
4756
4757                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4758                         return;
4759
4760                 id = scaler_state->scaler_id;
4761                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4762                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4763                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4764                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4765         }
4766 }
4767
4768 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4769 {
4770         struct drm_device *dev = crtc->base.dev;
4771         struct drm_i915_private *dev_priv = to_i915(dev);
4772         int pipe = crtc->pipe;
4773
4774         if (crtc->config->pch_pfit.enabled) {
4775                 /* Force use of hard-coded filter coefficients
4776                  * as some pre-programmed values are broken,
4777                  * e.g. x201.
4778                  */
4779                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
4780                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4781                                                  PF_PIPE_SEL_IVB(pipe));
4782                 else
4783                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4784                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4785                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4786         }
4787 }
4788
4789 void hsw_enable_ips(struct intel_crtc *crtc)
4790 {
4791         struct drm_device *dev = crtc->base.dev;
4792         struct drm_i915_private *dev_priv = to_i915(dev);
4793
4794         if (!crtc->config->ips_enabled)
4795                 return;
4796
4797         /*
4798          * We can only enable IPS after we enable a plane and wait for a vblank
4799          * This function is called from post_plane_update, which is run after
4800          * a vblank wait.
4801          */
4802
4803         assert_plane_enabled(dev_priv, crtc->plane);
4804         if (IS_BROADWELL(dev_priv)) {
4805                 mutex_lock(&dev_priv->rps.hw_lock);
4806                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4807                 mutex_unlock(&dev_priv->rps.hw_lock);
4808                 /* Quoting Art Runyan: "its not safe to expect any particular
4809                  * value in IPS_CTL bit 31 after enabling IPS through the
4810                  * mailbox." Moreover, the mailbox may return a bogus state,
4811                  * so we need to just enable it and continue on.
4812                  */
4813         } else {
4814                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4815                 /* The bit only becomes 1 in the next vblank, so this wait here
4816                  * is essentially intel_wait_for_vblank. If we don't have this
4817                  * and don't wait for vblanks until the end of crtc_enable, then
4818                  * the HW state readout code will complain that the expected
4819                  * IPS_CTL value is not the one we read. */
4820                 if (intel_wait_for_register(dev_priv,
4821                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4822                                             50))
4823                         DRM_ERROR("Timed out waiting for IPS enable\n");
4824         }
4825 }
4826
4827 void hsw_disable_ips(struct intel_crtc *crtc)
4828 {
4829         struct drm_device *dev = crtc->base.dev;
4830         struct drm_i915_private *dev_priv = to_i915(dev);
4831
4832         if (!crtc->config->ips_enabled)
4833                 return;
4834
4835         assert_plane_enabled(dev_priv, crtc->plane);
4836         if (IS_BROADWELL(dev_priv)) {
4837                 mutex_lock(&dev_priv->rps.hw_lock);
4838                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4839                 mutex_unlock(&dev_priv->rps.hw_lock);
4840                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4841                 if (intel_wait_for_register(dev_priv,
4842                                             IPS_CTL, IPS_ENABLE, 0,
4843                                             42))
4844                         DRM_ERROR("Timed out waiting for IPS disable\n");
4845         } else {
4846                 I915_WRITE(IPS_CTL, 0);
4847                 POSTING_READ(IPS_CTL);
4848         }
4849
4850         /* We need to wait for a vblank before we can disable the plane. */
4851         intel_wait_for_vblank(dev_priv, crtc->pipe);
4852 }
4853
4854 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4855 {
4856         if (intel_crtc->overlay) {
4857                 struct drm_device *dev = intel_crtc->base.dev;
4858
4859                 mutex_lock(&dev->struct_mutex);
4860                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4861                 mutex_unlock(&dev->struct_mutex);
4862         }
4863
4864         /* Let userspace switch the overlay on again. In most cases userspace
4865          * has to recompute where to put it anyway.
4866          */
4867 }
4868
4869 /**
4870  * intel_post_enable_primary - Perform operations after enabling primary plane
4871  * @crtc: the CRTC whose primary plane was just enabled
4872  *
4873  * Performs potentially sleeping operations that must be done after the primary
4874  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4875  * called due to an explicit primary plane update, or due to an implicit
4876  * re-enable that is caused when a sprite plane is updated to no longer
4877  * completely hide the primary plane.
4878  */
4879 static void
4880 intel_post_enable_primary(struct drm_crtc *crtc)
4881 {
4882         struct drm_device *dev = crtc->dev;
4883         struct drm_i915_private *dev_priv = to_i915(dev);
4884         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4885         int pipe = intel_crtc->pipe;
4886
4887         /*
4888          * FIXME IPS should be fine as long as one plane is
4889          * enabled, but in practice it seems to have problems
4890          * when going from primary only to sprite only and vice
4891          * versa.
4892          */
4893         hsw_enable_ips(intel_crtc);
4894
4895         /*
4896          * Gen2 reports pipe underruns whenever all planes are disabled.
4897          * So don't enable underrun reporting before at least some planes
4898          * are enabled.
4899          * FIXME: Need to fix the logic to work when we turn off all planes
4900          * but leave the pipe running.
4901          */
4902         if (IS_GEN2(dev_priv))
4903                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4904
4905         /* Underruns don't always raise interrupts, so check manually. */
4906         intel_check_cpu_fifo_underruns(dev_priv);
4907         intel_check_pch_fifo_underruns(dev_priv);
4908 }
4909
4910 /* FIXME move all this to pre_plane_update() with proper state tracking */
4911 static void
4912 intel_pre_disable_primary(struct drm_crtc *crtc)
4913 {
4914         struct drm_device *dev = crtc->dev;
4915         struct drm_i915_private *dev_priv = to_i915(dev);
4916         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4917         int pipe = intel_crtc->pipe;
4918
4919         /*
4920          * Gen2 reports pipe underruns whenever all planes are disabled.
4921          * So diasble underrun reporting before all the planes get disabled.
4922          * FIXME: Need to fix the logic to work when we turn off all planes
4923          * but leave the pipe running.
4924          */
4925         if (IS_GEN2(dev_priv))
4926                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4927
4928         /*
4929          * FIXME IPS should be fine as long as one plane is
4930          * enabled, but in practice it seems to have problems
4931          * when going from primary only to sprite only and vice
4932          * versa.
4933          */
4934         hsw_disable_ips(intel_crtc);
4935 }
4936
4937 /* FIXME get rid of this and use pre_plane_update */
4938 static void
4939 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4940 {
4941         struct drm_device *dev = crtc->dev;
4942         struct drm_i915_private *dev_priv = to_i915(dev);
4943         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4944         int pipe = intel_crtc->pipe;
4945
4946         intel_pre_disable_primary(crtc);
4947
4948         /*
4949          * Vblank time updates from the shadow to live plane control register
4950          * are blocked if the memory self-refresh mode is active at that
4951          * moment. So to make sure the plane gets truly disabled, disable
4952          * first the self-refresh mode. The self-refresh enable bit in turn
4953          * will be checked/applied by the HW only at the next frame start
4954          * event which is after the vblank start event, so we need to have a
4955          * wait-for-vblank between disabling the plane and the pipe.
4956          */
4957         if (HAS_GMCH_DISPLAY(dev_priv) &&
4958             intel_set_memory_cxsr(dev_priv, false))
4959                 intel_wait_for_vblank(dev_priv, pipe);
4960 }
4961
4962 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4963 {
4964         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4965         struct drm_atomic_state *old_state = old_crtc_state->base.state;
4966         struct intel_crtc_state *pipe_config =
4967                 to_intel_crtc_state(crtc->base.state);
4968         struct drm_plane *primary = crtc->base.primary;
4969         struct drm_plane_state *old_pri_state =
4970                 drm_atomic_get_existing_plane_state(old_state, primary);
4971
4972         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
4973
4974         if (pipe_config->update_wm_post && pipe_config->base.active)
4975                 intel_update_watermarks(crtc);
4976
4977         if (old_pri_state) {
4978                 struct intel_plane_state *primary_state =
4979                         to_intel_plane_state(primary->state);
4980                 struct intel_plane_state *old_primary_state =
4981                         to_intel_plane_state(old_pri_state);
4982
4983                 intel_fbc_post_update(crtc);
4984
4985                 if (primary_state->base.visible &&
4986                     (needs_modeset(&pipe_config->base) ||
4987                      !old_primary_state->base.visible))
4988                         intel_post_enable_primary(&crtc->base);
4989         }
4990 }
4991
4992 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
4993                                    struct intel_crtc_state *pipe_config)
4994 {
4995         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4996         struct drm_device *dev = crtc->base.dev;
4997         struct drm_i915_private *dev_priv = to_i915(dev);
4998         struct drm_atomic_state *old_state = old_crtc_state->base.state;
4999         struct drm_plane *primary = crtc->base.primary;
5000         struct drm_plane_state *old_pri_state =
5001                 drm_atomic_get_existing_plane_state(old_state, primary);
5002         bool modeset = needs_modeset(&pipe_config->base);
5003         struct intel_atomic_state *old_intel_state =
5004                 to_intel_atomic_state(old_state);
5005
5006         if (old_pri_state) {
5007                 struct intel_plane_state *primary_state =
5008                         to_intel_plane_state(primary->state);
5009                 struct intel_plane_state *old_primary_state =
5010                         to_intel_plane_state(old_pri_state);
5011
5012                 intel_fbc_pre_update(crtc, pipe_config, primary_state);
5013
5014                 if (old_primary_state->base.visible &&
5015                     (modeset || !primary_state->base.visible))
5016                         intel_pre_disable_primary(&crtc->base);
5017         }
5018
5019         /*
5020          * Vblank time updates from the shadow to live plane control register
5021          * are blocked if the memory self-refresh mode is active at that
5022          * moment. So to make sure the plane gets truly disabled, disable
5023          * first the self-refresh mode. The self-refresh enable bit in turn
5024          * will be checked/applied by the HW only at the next frame start
5025          * event which is after the vblank start event, so we need to have a
5026          * wait-for-vblank between disabling the plane and the pipe.
5027          */
5028         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5029             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5030                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5031
5032         /*
5033          * IVB workaround: must disable low power watermarks for at least
5034          * one frame before enabling scaling.  LP watermarks can be re-enabled
5035          * when scaling is disabled.
5036          *
5037          * WaCxSRDisabledForSpriteScaling:ivb
5038          */
5039         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5040                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5041
5042         /*
5043          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5044          * watermark programming here.
5045          */
5046         if (needs_modeset(&pipe_config->base))
5047                 return;
5048
5049         /*
5050          * For platforms that support atomic watermarks, program the
5051          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5052          * will be the intermediate values that are safe for both pre- and
5053          * post- vblank; when vblank happens, the 'active' values will be set
5054          * to the final 'target' values and we'll do this again to get the
5055          * optimal watermarks.  For gen9+ platforms, the values we program here
5056          * will be the final target values which will get automatically latched
5057          * at vblank time; no further programming will be necessary.
5058          *
5059          * If a platform hasn't been transitioned to atomic watermarks yet,
5060          * we'll continue to update watermarks the old way, if flags tell
5061          * us to.
5062          */
5063         if (dev_priv->display.initial_watermarks != NULL)
5064                 dev_priv->display.initial_watermarks(old_intel_state,
5065                                                      pipe_config);
5066         else if (pipe_config->update_wm_pre)
5067                 intel_update_watermarks(crtc);
5068 }
5069
5070 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
5071 {
5072         struct drm_device *dev = crtc->dev;
5073         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5074         struct drm_plane *p;
5075         int pipe = intel_crtc->pipe;
5076
5077         intel_crtc_dpms_overlay_disable(intel_crtc);
5078
5079         drm_for_each_plane_mask(p, dev, plane_mask)
5080                 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
5081
5082         /*
5083          * FIXME: Once we grow proper nuclear flip support out of this we need
5084          * to compute the mask of flip planes precisely. For the time being
5085          * consider this a flip to a NULL plane.
5086          */
5087         intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5088 }
5089
5090 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5091                                           struct intel_crtc_state *crtc_state,
5092                                           struct drm_atomic_state *old_state)
5093 {
5094         struct drm_connector_state *conn_state;
5095         struct drm_connector *conn;
5096         int i;
5097
5098         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5099                 struct intel_encoder *encoder =
5100                         to_intel_encoder(conn_state->best_encoder);
5101
5102                 if (conn_state->crtc != crtc)
5103                         continue;
5104
5105                 if (encoder->pre_pll_enable)
5106                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5107         }
5108 }
5109
5110 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5111                                       struct intel_crtc_state *crtc_state,
5112                                       struct drm_atomic_state *old_state)
5113 {
5114         struct drm_connector_state *conn_state;
5115         struct drm_connector *conn;
5116         int i;
5117
5118         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5119                 struct intel_encoder *encoder =
5120                         to_intel_encoder(conn_state->best_encoder);
5121
5122                 if (conn_state->crtc != crtc)
5123                         continue;
5124
5125                 if (encoder->pre_enable)
5126                         encoder->pre_enable(encoder, crtc_state, conn_state);
5127         }
5128 }
5129
5130 static void intel_encoders_enable(struct drm_crtc *crtc,
5131                                   struct intel_crtc_state *crtc_state,
5132                                   struct drm_atomic_state *old_state)
5133 {
5134         struct drm_connector_state *conn_state;
5135         struct drm_connector *conn;
5136         int i;
5137
5138         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5139                 struct intel_encoder *encoder =
5140                         to_intel_encoder(conn_state->best_encoder);
5141
5142                 if (conn_state->crtc != crtc)
5143                         continue;
5144
5145                 encoder->enable(encoder, crtc_state, conn_state);
5146                 intel_opregion_notify_encoder(encoder, true);
5147         }
5148 }
5149
5150 static void intel_encoders_disable(struct drm_crtc *crtc,
5151                                    struct intel_crtc_state *old_crtc_state,
5152                                    struct drm_atomic_state *old_state)
5153 {
5154         struct drm_connector_state *old_conn_state;
5155         struct drm_connector *conn;
5156         int i;
5157
5158         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5159                 struct intel_encoder *encoder =
5160                         to_intel_encoder(old_conn_state->best_encoder);
5161
5162                 if (old_conn_state->crtc != crtc)
5163                         continue;
5164
5165                 intel_opregion_notify_encoder(encoder, false);
5166                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5167         }
5168 }
5169
5170 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5171                                         struct intel_crtc_state *old_crtc_state,
5172                                         struct drm_atomic_state *old_state)
5173 {
5174         struct drm_connector_state *old_conn_state;
5175         struct drm_connector *conn;
5176         int i;
5177
5178         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5179                 struct intel_encoder *encoder =
5180                         to_intel_encoder(old_conn_state->best_encoder);
5181
5182                 if (old_conn_state->crtc != crtc)
5183                         continue;
5184
5185                 if (encoder->post_disable)
5186                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5187         }
5188 }
5189
5190 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5191                                             struct intel_crtc_state *old_crtc_state,
5192                                             struct drm_atomic_state *old_state)
5193 {
5194         struct drm_connector_state *old_conn_state;
5195         struct drm_connector *conn;
5196         int i;
5197
5198         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5199                 struct intel_encoder *encoder =
5200                         to_intel_encoder(old_conn_state->best_encoder);
5201
5202                 if (old_conn_state->crtc != crtc)
5203                         continue;
5204
5205                 if (encoder->post_pll_disable)
5206                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5207         }
5208 }
5209
5210 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5211                                  struct drm_atomic_state *old_state)
5212 {
5213         struct drm_crtc *crtc = pipe_config->base.crtc;
5214         struct drm_device *dev = crtc->dev;
5215         struct drm_i915_private *dev_priv = to_i915(dev);
5216         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5217         int pipe = intel_crtc->pipe;
5218         struct intel_atomic_state *old_intel_state =
5219                 to_intel_atomic_state(old_state);
5220
5221         if (WARN_ON(intel_crtc->active))
5222                 return;
5223
5224         /*
5225          * Sometimes spurious CPU pipe underruns happen during FDI
5226          * training, at least with VGA+HDMI cloning. Suppress them.
5227          *
5228          * On ILK we get an occasional spurious CPU pipe underruns
5229          * between eDP port A enable and vdd enable. Also PCH port
5230          * enable seems to result in the occasional CPU pipe underrun.
5231          *
5232          * Spurious PCH underruns also occur during PCH enabling.
5233          */
5234         if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
5235                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5236         if (intel_crtc->config->has_pch_encoder)
5237                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5238
5239         if (intel_crtc->config->has_pch_encoder)
5240                 intel_prepare_shared_dpll(intel_crtc);
5241
5242         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5243                 intel_dp_set_m_n(intel_crtc, M1_N1);
5244
5245         intel_set_pipe_timings(intel_crtc);
5246         intel_set_pipe_src_size(intel_crtc);
5247
5248         if (intel_crtc->config->has_pch_encoder) {
5249                 intel_cpu_transcoder_set_m_n(intel_crtc,
5250                                      &intel_crtc->config->fdi_m_n, NULL);
5251         }
5252
5253         ironlake_set_pipeconf(crtc);
5254
5255         intel_crtc->active = true;
5256
5257         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5258
5259         if (intel_crtc->config->has_pch_encoder) {
5260                 /* Note: FDI PLL enabling _must_ be done before we enable the
5261                  * cpu pipes, hence this is separate from all the other fdi/pch
5262                  * enabling. */
5263                 ironlake_fdi_pll_enable(intel_crtc);
5264         } else {
5265                 assert_fdi_tx_disabled(dev_priv, pipe);
5266                 assert_fdi_rx_disabled(dev_priv, pipe);
5267         }
5268
5269         ironlake_pfit_enable(intel_crtc);
5270
5271         /*
5272          * On ILK+ LUT must be loaded before the pipe is running but with
5273          * clocks enabled
5274          */
5275         intel_color_load_luts(&pipe_config->base);
5276
5277         if (dev_priv->display.initial_watermarks != NULL)
5278                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5279         intel_enable_pipe(intel_crtc);
5280
5281         if (intel_crtc->config->has_pch_encoder)
5282                 ironlake_pch_enable(pipe_config);
5283
5284         assert_vblank_disabled(crtc);
5285         drm_crtc_vblank_on(crtc);
5286
5287         intel_encoders_enable(crtc, pipe_config, old_state);
5288
5289         if (HAS_PCH_CPT(dev_priv))
5290                 cpt_verify_modeset(dev, intel_crtc->pipe);
5291
5292         /* Must wait for vblank to avoid spurious PCH FIFO underruns */
5293         if (intel_crtc->config->has_pch_encoder)
5294                 intel_wait_for_vblank(dev_priv, pipe);
5295         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5296         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5297 }
5298
5299 /* IPS only exists on ULT machines and is tied to pipe A. */
5300 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5301 {
5302         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5303 }
5304
5305 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5306                                 struct drm_atomic_state *old_state)
5307 {
5308         struct drm_crtc *crtc = pipe_config->base.crtc;
5309         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5310         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5311         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5312         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5313         struct intel_atomic_state *old_intel_state =
5314                 to_intel_atomic_state(old_state);
5315
5316         if (WARN_ON(intel_crtc->active))
5317                 return;
5318
5319         if (intel_crtc->config->has_pch_encoder)
5320                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5321                                                       false);
5322
5323         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5324
5325         if (intel_crtc->config->shared_dpll)
5326                 intel_enable_shared_dpll(intel_crtc);
5327
5328         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5329                 intel_dp_set_m_n(intel_crtc, M1_N1);
5330
5331         if (!transcoder_is_dsi(cpu_transcoder))
5332                 intel_set_pipe_timings(intel_crtc);
5333
5334         intel_set_pipe_src_size(intel_crtc);
5335
5336         if (cpu_transcoder != TRANSCODER_EDP &&
5337             !transcoder_is_dsi(cpu_transcoder)) {
5338                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5339                            intel_crtc->config->pixel_multiplier - 1);
5340         }
5341
5342         if (intel_crtc->config->has_pch_encoder) {
5343                 intel_cpu_transcoder_set_m_n(intel_crtc,
5344                                      &intel_crtc->config->fdi_m_n, NULL);
5345         }
5346
5347         if (!transcoder_is_dsi(cpu_transcoder))
5348                 haswell_set_pipeconf(crtc);
5349
5350         haswell_set_pipemisc(crtc);
5351
5352         intel_color_set_csc(&pipe_config->base);
5353
5354         intel_crtc->active = true;
5355
5356         if (intel_crtc->config->has_pch_encoder)
5357                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5358         else
5359                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5360
5361         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5362
5363         if (intel_crtc->config->has_pch_encoder)
5364                 dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
5365
5366         if (!transcoder_is_dsi(cpu_transcoder))
5367                 intel_ddi_enable_pipe_clock(pipe_config);
5368
5369         if (INTEL_GEN(dev_priv) >= 9)
5370                 skylake_pfit_enable(intel_crtc);
5371         else
5372                 ironlake_pfit_enable(intel_crtc);
5373
5374         /*
5375          * On ILK+ LUT must be loaded before the pipe is running but with
5376          * clocks enabled
5377          */
5378         intel_color_load_luts(&pipe_config->base);
5379
5380         intel_ddi_set_pipe_settings(pipe_config);
5381         if (!transcoder_is_dsi(cpu_transcoder))
5382                 intel_ddi_enable_transcoder_func(pipe_config);
5383
5384         if (dev_priv->display.initial_watermarks != NULL)
5385                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5386
5387         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5388         if (!transcoder_is_dsi(cpu_transcoder))
5389                 intel_enable_pipe(intel_crtc);
5390
5391         if (intel_crtc->config->has_pch_encoder)
5392                 lpt_pch_enable(pipe_config);
5393
5394         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5395                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5396
5397         assert_vblank_disabled(crtc);
5398         drm_crtc_vblank_on(crtc);
5399
5400         intel_encoders_enable(crtc, pipe_config, old_state);
5401
5402         if (intel_crtc->config->has_pch_encoder) {
5403                 intel_wait_for_vblank(dev_priv, pipe);
5404                 intel_wait_for_vblank(dev_priv, pipe);
5405                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5406                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5407                                                       true);
5408         }
5409
5410         /* If we change the relative order between pipe/planes enabling, we need
5411          * to change the workaround. */
5412         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5413         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5414                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5415                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5416         }
5417 }
5418
5419 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5420 {
5421         struct drm_device *dev = crtc->base.dev;
5422         struct drm_i915_private *dev_priv = to_i915(dev);
5423         int pipe = crtc->pipe;
5424
5425         /* To avoid upsetting the power well on haswell only disable the pfit if
5426          * it's in use. The hw state code will make sure we get this right. */
5427         if (force || crtc->config->pch_pfit.enabled) {
5428                 I915_WRITE(PF_CTL(pipe), 0);
5429                 I915_WRITE(PF_WIN_POS(pipe), 0);
5430                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5431         }
5432 }
5433
5434 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5435                                   struct drm_atomic_state *old_state)
5436 {
5437         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5438         struct drm_device *dev = crtc->dev;
5439         struct drm_i915_private *dev_priv = to_i915(dev);
5440         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5441         int pipe = intel_crtc->pipe;
5442
5443         /*
5444          * Sometimes spurious CPU pipe underruns happen when the
5445          * pipe is already disabled, but FDI RX/TX is still enabled.
5446          * Happens at least with VGA+HDMI cloning. Suppress them.
5447          */
5448         if (intel_crtc->config->has_pch_encoder) {
5449                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5450                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5451         }
5452
5453         intel_encoders_disable(crtc, old_crtc_state, old_state);
5454
5455         drm_crtc_vblank_off(crtc);
5456         assert_vblank_disabled(crtc);
5457
5458         intel_disable_pipe(intel_crtc);
5459
5460         ironlake_pfit_disable(intel_crtc, false);
5461
5462         if (intel_crtc->config->has_pch_encoder)
5463                 ironlake_fdi_disable(crtc);
5464
5465         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5466
5467         if (intel_crtc->config->has_pch_encoder) {
5468                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5469
5470                 if (HAS_PCH_CPT(dev_priv)) {
5471                         i915_reg_t reg;
5472                         u32 temp;
5473
5474                         /* disable TRANS_DP_CTL */
5475                         reg = TRANS_DP_CTL(pipe);
5476                         temp = I915_READ(reg);
5477                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5478                                   TRANS_DP_PORT_SEL_MASK);
5479                         temp |= TRANS_DP_PORT_SEL_NONE;
5480                         I915_WRITE(reg, temp);
5481
5482                         /* disable DPLL_SEL */
5483                         temp = I915_READ(PCH_DPLL_SEL);
5484                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5485                         I915_WRITE(PCH_DPLL_SEL, temp);
5486                 }
5487
5488                 ironlake_fdi_pll_disable(intel_crtc);
5489         }
5490
5491         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5492         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5493 }
5494
5495 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5496                                  struct drm_atomic_state *old_state)
5497 {
5498         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5499         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5500         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5501         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5502
5503         if (intel_crtc->config->has_pch_encoder)
5504                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5505                                                       false);
5506
5507         intel_encoders_disable(crtc, old_crtc_state, old_state);
5508
5509         drm_crtc_vblank_off(crtc);
5510         assert_vblank_disabled(crtc);
5511
5512         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5513         if (!transcoder_is_dsi(cpu_transcoder))
5514                 intel_disable_pipe(intel_crtc);
5515
5516         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5517                 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
5518
5519         if (!transcoder_is_dsi(cpu_transcoder))
5520                 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5521
5522         if (INTEL_GEN(dev_priv) >= 9)
5523                 skylake_scaler_disable(intel_crtc);
5524         else
5525                 ironlake_pfit_disable(intel_crtc, false);
5526
5527         if (!transcoder_is_dsi(cpu_transcoder))
5528                 intel_ddi_disable_pipe_clock(intel_crtc->config);
5529
5530         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5531
5532         if (old_crtc_state->has_pch_encoder)
5533                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5534                                                       true);
5535 }
5536
5537 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5538 {
5539         struct drm_device *dev = crtc->base.dev;
5540         struct drm_i915_private *dev_priv = to_i915(dev);
5541         struct intel_crtc_state *pipe_config = crtc->config;
5542
5543         if (!pipe_config->gmch_pfit.control)
5544                 return;
5545
5546         /*
5547          * The panel fitter should only be adjusted whilst the pipe is disabled,
5548          * according to register description and PRM.
5549          */
5550         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5551         assert_pipe_disabled(dev_priv, crtc->pipe);
5552
5553         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5554         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5555
5556         /* Border color in case we don't scale up to the full screen. Black by
5557          * default, change to something else for debugging. */
5558         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5559 }
5560
5561 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5562 {
5563         switch (port) {
5564         case PORT_A:
5565                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5566         case PORT_B:
5567                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5568         case PORT_C:
5569                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5570         case PORT_D:
5571                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5572         case PORT_E:
5573                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5574         default:
5575                 MISSING_CASE(port);
5576                 return POWER_DOMAIN_PORT_OTHER;
5577         }
5578 }
5579
5580 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5581                                   struct intel_crtc_state *crtc_state)
5582 {
5583         struct drm_device *dev = crtc->dev;
5584         struct drm_i915_private *dev_priv = to_i915(dev);
5585         struct drm_encoder *encoder;
5586         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5587         enum pipe pipe = intel_crtc->pipe;
5588         u64 mask;
5589         enum transcoder transcoder = crtc_state->cpu_transcoder;
5590
5591         if (!crtc_state->base.active)
5592                 return 0;
5593
5594         mask = BIT(POWER_DOMAIN_PIPE(pipe));
5595         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5596         if (crtc_state->pch_pfit.enabled ||
5597             crtc_state->pch_pfit.force_thru)
5598                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5599
5600         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5601                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5602
5603                 mask |= BIT_ULL(intel_encoder->power_domain);
5604         }
5605
5606         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5607                 mask |= BIT(POWER_DOMAIN_AUDIO);
5608
5609         if (crtc_state->shared_dpll)
5610                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
5611
5612         return mask;
5613 }
5614
5615 static u64
5616 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5617                                struct intel_crtc_state *crtc_state)
5618 {
5619         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5620         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5621         enum intel_display_power_domain domain;
5622         u64 domains, new_domains, old_domains;
5623
5624         old_domains = intel_crtc->enabled_power_domains;
5625         intel_crtc->enabled_power_domains = new_domains =
5626                 get_crtc_power_domains(crtc, crtc_state);
5627
5628         domains = new_domains & ~old_domains;
5629
5630         for_each_power_domain(domain, domains)
5631                 intel_display_power_get(dev_priv, domain);
5632
5633         return old_domains & ~new_domains;
5634 }
5635
5636 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5637                                       u64 domains)
5638 {
5639         enum intel_display_power_domain domain;
5640
5641         for_each_power_domain(domain, domains)
5642                 intel_display_power_put(dev_priv, domain);
5643 }
5644
5645 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
5646                                    struct drm_atomic_state *old_state)
5647 {
5648         struct intel_atomic_state *old_intel_state =
5649                 to_intel_atomic_state(old_state);
5650         struct drm_crtc *crtc = pipe_config->base.crtc;
5651         struct drm_device *dev = crtc->dev;
5652         struct drm_i915_private *dev_priv = to_i915(dev);
5653         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5654         int pipe = intel_crtc->pipe;
5655
5656         if (WARN_ON(intel_crtc->active))
5657                 return;
5658
5659         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5660                 intel_dp_set_m_n(intel_crtc, M1_N1);
5661
5662         intel_set_pipe_timings(intel_crtc);
5663         intel_set_pipe_src_size(intel_crtc);
5664
5665         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
5666                 struct drm_i915_private *dev_priv = to_i915(dev);
5667
5668                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5669                 I915_WRITE(CHV_CANVAS(pipe), 0);
5670         }
5671
5672         i9xx_set_pipeconf(intel_crtc);
5673
5674         intel_crtc->active = true;
5675
5676         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5677
5678         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5679
5680         if (IS_CHERRYVIEW(dev_priv)) {
5681                 chv_prepare_pll(intel_crtc, intel_crtc->config);
5682                 chv_enable_pll(intel_crtc, intel_crtc->config);
5683         } else {
5684                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
5685                 vlv_enable_pll(intel_crtc, intel_crtc->config);
5686         }
5687
5688         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5689
5690         i9xx_pfit_enable(intel_crtc);
5691
5692         intel_color_load_luts(&pipe_config->base);
5693
5694         dev_priv->display.initial_watermarks(old_intel_state,
5695                                              pipe_config);
5696         intel_enable_pipe(intel_crtc);
5697
5698         assert_vblank_disabled(crtc);
5699         drm_crtc_vblank_on(crtc);
5700
5701         intel_encoders_enable(crtc, pipe_config, old_state);
5702 }
5703
5704 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5705 {
5706         struct drm_device *dev = crtc->base.dev;
5707         struct drm_i915_private *dev_priv = to_i915(dev);
5708
5709         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
5710         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5711 }
5712
5713 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
5714                              struct drm_atomic_state *old_state)
5715 {
5716         struct intel_atomic_state *old_intel_state =
5717                 to_intel_atomic_state(old_state);
5718         struct drm_crtc *crtc = pipe_config->base.crtc;
5719         struct drm_device *dev = crtc->dev;
5720         struct drm_i915_private *dev_priv = to_i915(dev);
5721         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5722         enum pipe pipe = intel_crtc->pipe;
5723
5724         if (WARN_ON(intel_crtc->active))
5725                 return;
5726
5727         i9xx_set_pll_dividers(intel_crtc);
5728
5729         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5730                 intel_dp_set_m_n(intel_crtc, M1_N1);
5731
5732         intel_set_pipe_timings(intel_crtc);
5733         intel_set_pipe_src_size(intel_crtc);
5734
5735         i9xx_set_pipeconf(intel_crtc);
5736
5737         intel_crtc->active = true;
5738
5739         if (!IS_GEN2(dev_priv))
5740                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5741
5742         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5743
5744         i9xx_enable_pll(intel_crtc);
5745
5746         i9xx_pfit_enable(intel_crtc);
5747
5748         intel_color_load_luts(&pipe_config->base);
5749
5750         if (dev_priv->display.initial_watermarks != NULL)
5751                 dev_priv->display.initial_watermarks(old_intel_state,
5752                                                      intel_crtc->config);
5753         else
5754                 intel_update_watermarks(intel_crtc);
5755         intel_enable_pipe(intel_crtc);
5756
5757         assert_vblank_disabled(crtc);
5758         drm_crtc_vblank_on(crtc);
5759
5760         intel_encoders_enable(crtc, pipe_config, old_state);
5761 }
5762
5763 static void i9xx_pfit_disable(struct intel_crtc *crtc)
5764 {
5765         struct drm_device *dev = crtc->base.dev;
5766         struct drm_i915_private *dev_priv = to_i915(dev);
5767
5768         if (!crtc->config->gmch_pfit.control)
5769                 return;
5770
5771         assert_pipe_disabled(dev_priv, crtc->pipe);
5772
5773         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5774                          I915_READ(PFIT_CONTROL));
5775         I915_WRITE(PFIT_CONTROL, 0);
5776 }
5777
5778 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
5779                               struct drm_atomic_state *old_state)
5780 {
5781         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5782         struct drm_device *dev = crtc->dev;
5783         struct drm_i915_private *dev_priv = to_i915(dev);
5784         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5785         int pipe = intel_crtc->pipe;
5786
5787         /*
5788          * On gen2 planes are double buffered but the pipe isn't, so we must
5789          * wait for planes to fully turn off before disabling the pipe.
5790          */
5791         if (IS_GEN2(dev_priv))
5792                 intel_wait_for_vblank(dev_priv, pipe);
5793
5794         intel_encoders_disable(crtc, old_crtc_state, old_state);
5795
5796         drm_crtc_vblank_off(crtc);
5797         assert_vblank_disabled(crtc);
5798
5799         intel_disable_pipe(intel_crtc);
5800
5801         i9xx_pfit_disable(intel_crtc);
5802
5803         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5804
5805         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
5806                 if (IS_CHERRYVIEW(dev_priv))
5807                         chv_disable_pll(dev_priv, pipe);
5808                 else if (IS_VALLEYVIEW(dev_priv))
5809                         vlv_disable_pll(dev_priv, pipe);
5810                 else
5811                         i9xx_disable_pll(intel_crtc);
5812         }
5813
5814         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
5815
5816         if (!IS_GEN2(dev_priv))
5817                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5818
5819         if (!dev_priv->display.initial_watermarks)
5820                 intel_update_watermarks(intel_crtc);
5821 }
5822
5823 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
5824 {
5825         struct intel_encoder *encoder;
5826         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5827         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5828         enum intel_display_power_domain domain;
5829         u64 domains;
5830         struct drm_atomic_state *state;
5831         struct intel_crtc_state *crtc_state;
5832         int ret;
5833
5834         if (!intel_crtc->active)
5835                 return;
5836
5837         if (crtc->primary->state->visible) {
5838                 WARN_ON(intel_crtc->flip_work);
5839
5840                 intel_pre_disable_primary_noatomic(crtc);
5841
5842                 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
5843                 crtc->primary->state->visible = false;
5844         }
5845
5846         state = drm_atomic_state_alloc(crtc->dev);
5847         if (!state) {
5848                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
5849                               crtc->base.id, crtc->name);
5850                 return;
5851         }
5852
5853         state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
5854
5855         /* Everything's already locked, -EDEADLK can't happen. */
5856         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
5857         ret = drm_atomic_add_affected_connectors(state, crtc);
5858
5859         WARN_ON(IS_ERR(crtc_state) || ret);
5860
5861         dev_priv->display.crtc_disable(crtc_state, state);
5862
5863         drm_atomic_state_put(state);
5864
5865         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
5866                       crtc->base.id, crtc->name);
5867
5868         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
5869         crtc->state->active = false;
5870         intel_crtc->active = false;
5871         crtc->enabled = false;
5872         crtc->state->connector_mask = 0;
5873         crtc->state->encoder_mask = 0;
5874
5875         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
5876                 encoder->base.crtc = NULL;
5877
5878         intel_fbc_disable(intel_crtc);
5879         intel_update_watermarks(intel_crtc);
5880         intel_disable_shared_dpll(intel_crtc);
5881
5882         domains = intel_crtc->enabled_power_domains;
5883         for_each_power_domain(domain, domains)
5884                 intel_display_power_put(dev_priv, domain);
5885         intel_crtc->enabled_power_domains = 0;
5886
5887         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
5888         dev_priv->min_pixclk[intel_crtc->pipe] = 0;
5889 }
5890
5891 /*
5892  * turn all crtc's off, but do not adjust state
5893  * This has to be paired with a call to intel_modeset_setup_hw_state.
5894  */
5895 int intel_display_suspend(struct drm_device *dev)
5896 {
5897         struct drm_i915_private *dev_priv = to_i915(dev);
5898         struct drm_atomic_state *state;
5899         int ret;
5900
5901         state = drm_atomic_helper_suspend(dev);
5902         ret = PTR_ERR_OR_ZERO(state);
5903         if (ret)
5904                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
5905         else
5906                 dev_priv->modeset_restore_state = state;
5907         return ret;
5908 }
5909
5910 void intel_encoder_destroy(struct drm_encoder *encoder)
5911 {
5912         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5913
5914         drm_encoder_cleanup(encoder);
5915         kfree(intel_encoder);
5916 }
5917
5918 /* Cross check the actual hw state with our own modeset state tracking (and it's
5919  * internal consistency). */
5920 static void intel_connector_verify_state(struct intel_connector *connector)
5921 {
5922         struct drm_crtc *crtc = connector->base.state->crtc;
5923
5924         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5925                       connector->base.base.id,
5926                       connector->base.name);
5927
5928         if (connector->get_hw_state(connector)) {
5929                 struct intel_encoder *encoder = connector->encoder;
5930                 struct drm_connector_state *conn_state = connector->base.state;
5931
5932                 I915_STATE_WARN(!crtc,
5933                          "connector enabled without attached crtc\n");
5934
5935                 if (!crtc)
5936                         return;
5937
5938                 I915_STATE_WARN(!crtc->state->active,
5939                       "connector is active, but attached crtc isn't\n");
5940
5941                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
5942                         return;
5943
5944                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
5945                         "atomic encoder doesn't match attached encoder\n");
5946
5947                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
5948                         "attached encoder crtc differs from connector crtc\n");
5949         } else {
5950                 I915_STATE_WARN(crtc && crtc->state->active,
5951                         "attached crtc is active, but connector isn't\n");
5952                 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
5953                         "best encoder set without crtc!\n");
5954         }
5955 }
5956
5957 int intel_connector_init(struct intel_connector *connector)
5958 {
5959         drm_atomic_helper_connector_reset(&connector->base);
5960
5961         if (!connector->base.state)
5962                 return -ENOMEM;
5963
5964         return 0;
5965 }
5966
5967 struct intel_connector *intel_connector_alloc(void)
5968 {
5969         struct intel_connector *connector;
5970
5971         connector = kzalloc(sizeof *connector, GFP_KERNEL);
5972         if (!connector)
5973                 return NULL;
5974
5975         if (intel_connector_init(connector) < 0) {
5976                 kfree(connector);
5977                 return NULL;
5978         }
5979
5980         return connector;
5981 }
5982
5983 /* Simple connector->get_hw_state implementation for encoders that support only
5984  * one connector and no cloning and hence the encoder state determines the state
5985  * of the connector. */
5986 bool intel_connector_get_hw_state(struct intel_connector *connector)
5987 {
5988         enum pipe pipe = 0;
5989         struct intel_encoder *encoder = connector->encoder;
5990
5991         return encoder->get_hw_state(encoder, &pipe);
5992 }
5993
5994 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
5995 {
5996         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
5997                 return crtc_state->fdi_lanes;
5998
5999         return 0;
6000 }
6001
6002 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6003                                      struct intel_crtc_state *pipe_config)
6004 {
6005         struct drm_i915_private *dev_priv = to_i915(dev);
6006         struct drm_atomic_state *state = pipe_config->base.state;
6007         struct intel_crtc *other_crtc;
6008         struct intel_crtc_state *other_crtc_state;
6009
6010         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6011                       pipe_name(pipe), pipe_config->fdi_lanes);
6012         if (pipe_config->fdi_lanes > 4) {
6013                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6014                               pipe_name(pipe), pipe_config->fdi_lanes);
6015                 return -EINVAL;
6016         }
6017
6018         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6019                 if (pipe_config->fdi_lanes > 2) {
6020                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6021                                       pipe_config->fdi_lanes);
6022                         return -EINVAL;
6023                 } else {
6024                         return 0;
6025                 }
6026         }
6027
6028         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6029                 return 0;
6030
6031         /* Ivybridge 3 pipe is really complicated */
6032         switch (pipe) {
6033         case PIPE_A:
6034                 return 0;
6035         case PIPE_B:
6036                 if (pipe_config->fdi_lanes <= 2)
6037                         return 0;
6038
6039                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6040                 other_crtc_state =
6041                         intel_atomic_get_crtc_state(state, other_crtc);
6042                 if (IS_ERR(other_crtc_state))
6043                         return PTR_ERR(other_crtc_state);
6044
6045                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6046                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6047                                       pipe_name(pipe), pipe_config->fdi_lanes);
6048                         return -EINVAL;
6049                 }
6050                 return 0;
6051         case PIPE_C:
6052                 if (pipe_config->fdi_lanes > 2) {
6053                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6054                                       pipe_name(pipe), pipe_config->fdi_lanes);
6055                         return -EINVAL;
6056                 }
6057
6058                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6059                 other_crtc_state =
6060                         intel_atomic_get_crtc_state(state, other_crtc);
6061                 if (IS_ERR(other_crtc_state))
6062                         return PTR_ERR(other_crtc_state);
6063
6064                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6065                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6066                         return -EINVAL;
6067                 }
6068                 return 0;
6069         default:
6070                 BUG();
6071         }
6072 }
6073
6074 #define RETRY 1
6075 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6076                                        struct intel_crtc_state *pipe_config)
6077 {
6078         struct drm_device *dev = intel_crtc->base.dev;
6079         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6080         int lane, link_bw, fdi_dotclock, ret;
6081         bool needs_recompute = false;
6082
6083 retry:
6084         /* FDI is a binary signal running at ~2.7GHz, encoding
6085          * each output octet as 10 bits. The actual frequency
6086          * is stored as a divider into a 100MHz clock, and the
6087          * mode pixel clock is stored in units of 1KHz.
6088          * Hence the bw of each lane in terms of the mode signal
6089          * is:
6090          */
6091         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6092
6093         fdi_dotclock = adjusted_mode->crtc_clock;
6094
6095         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6096                                            pipe_config->pipe_bpp);
6097
6098         pipe_config->fdi_lanes = lane;
6099
6100         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6101                                link_bw, &pipe_config->fdi_m_n);
6102
6103         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6104         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6105                 pipe_config->pipe_bpp -= 2*3;
6106                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6107                               pipe_config->pipe_bpp);
6108                 needs_recompute = true;
6109                 pipe_config->bw_constrained = true;
6110
6111                 goto retry;
6112         }
6113
6114         if (needs_recompute)
6115                 return RETRY;
6116
6117         return ret;
6118 }
6119
6120 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6121                                      struct intel_crtc_state *pipe_config)
6122 {
6123         if (pipe_config->pipe_bpp > 24)
6124                 return false;
6125
6126         /* HSW can handle pixel rate up to cdclk? */
6127         if (IS_HASWELL(dev_priv))
6128                 return true;
6129
6130         /*
6131          * We compare against max which means we must take
6132          * the increased cdclk requirement into account when
6133          * calculating the new cdclk.
6134          *
6135          * Should measure whether using a lower cdclk w/o IPS
6136          */
6137         return pipe_config->pixel_rate <=
6138                 dev_priv->max_cdclk_freq * 95 / 100;
6139 }
6140
6141 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6142                                    struct intel_crtc_state *pipe_config)
6143 {
6144         struct drm_device *dev = crtc->base.dev;
6145         struct drm_i915_private *dev_priv = to_i915(dev);
6146
6147         pipe_config->ips_enabled = i915.enable_ips &&
6148                 hsw_crtc_supports_ips(crtc) &&
6149                 pipe_config_supports_ips(dev_priv, pipe_config);
6150 }
6151
6152 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6153 {
6154         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6155
6156         /* GDG double wide on either pipe, otherwise pipe A only */
6157         return INTEL_INFO(dev_priv)->gen < 4 &&
6158                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6159 }
6160
6161 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6162 {
6163         uint32_t pixel_rate;
6164
6165         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6166
6167         /*
6168          * We only use IF-ID interlacing. If we ever use
6169          * PF-ID we'll need to adjust the pixel_rate here.
6170          */
6171
6172         if (pipe_config->pch_pfit.enabled) {
6173                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6174                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6175
6176                 pipe_w = pipe_config->pipe_src_w;
6177                 pipe_h = pipe_config->pipe_src_h;
6178
6179                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6180                 pfit_h = pfit_size & 0xFFFF;
6181                 if (pipe_w < pfit_w)
6182                         pipe_w = pfit_w;
6183                 if (pipe_h < pfit_h)
6184                         pipe_h = pfit_h;
6185
6186                 if (WARN_ON(!pfit_w || !pfit_h))
6187                         return pixel_rate;
6188
6189                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6190                                      pfit_w * pfit_h);
6191         }
6192
6193         return pixel_rate;
6194 }
6195
6196 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6197 {
6198         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6199
6200         if (HAS_GMCH_DISPLAY(dev_priv))
6201                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6202                 crtc_state->pixel_rate =
6203                         crtc_state->base.adjusted_mode.crtc_clock;
6204         else
6205                 crtc_state->pixel_rate =
6206                         ilk_pipe_pixel_rate(crtc_state);
6207 }
6208
6209 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6210                                      struct intel_crtc_state *pipe_config)
6211 {
6212         struct drm_device *dev = crtc->base.dev;
6213         struct drm_i915_private *dev_priv = to_i915(dev);
6214         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6215         int clock_limit = dev_priv->max_dotclk_freq;
6216
6217         if (INTEL_GEN(dev_priv) < 4) {
6218                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6219
6220                 /*
6221                  * Enable double wide mode when the dot clock
6222                  * is > 90% of the (display) core speed.
6223                  */
6224                 if (intel_crtc_supports_double_wide(crtc) &&
6225                     adjusted_mode->crtc_clock > clock_limit) {
6226                         clock_limit = dev_priv->max_dotclk_freq;
6227                         pipe_config->double_wide = true;
6228                 }
6229         }
6230
6231         if (adjusted_mode->crtc_clock > clock_limit) {
6232                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6233                               adjusted_mode->crtc_clock, clock_limit,
6234                               yesno(pipe_config->double_wide));
6235                 return -EINVAL;
6236         }
6237
6238         /*
6239          * Pipe horizontal size must be even in:
6240          * - DVO ganged mode
6241          * - LVDS dual channel mode
6242          * - Double wide pipe
6243          */
6244         if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6245              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6246                 pipe_config->pipe_src_w &= ~1;
6247
6248         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6249          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6250          */
6251         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6252                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6253                 return -EINVAL;
6254
6255         intel_crtc_compute_pixel_rate(pipe_config);
6256
6257         if (HAS_IPS(dev_priv))
6258                 hsw_compute_ips_config(crtc, pipe_config);
6259
6260         if (pipe_config->has_pch_encoder)
6261                 return ironlake_fdi_compute_config(crtc, pipe_config);
6262
6263         return 0;
6264 }
6265
6266 static void
6267 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6268 {
6269         while (*num > DATA_LINK_M_N_MASK ||
6270                *den > DATA_LINK_M_N_MASK) {
6271                 *num >>= 1;
6272                 *den >>= 1;
6273         }
6274 }
6275
6276 static void compute_m_n(unsigned int m, unsigned int n,
6277                         uint32_t *ret_m, uint32_t *ret_n)
6278 {
6279         /*
6280          * Reduce M/N as much as possible without loss in precision. Several DP
6281          * dongles in particular seem to be fussy about too large *link* M/N
6282          * values. The passed in values are more likely to have the least
6283          * significant bits zero than M after rounding below, so do this first.
6284          */
6285         while ((m & 1) == 0 && (n & 1) == 0) {
6286                 m >>= 1;
6287                 n >>= 1;
6288         }
6289
6290         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6291         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6292         intel_reduce_m_n_ratio(ret_m, ret_n);
6293 }
6294
6295 void
6296 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6297                        int pixel_clock, int link_clock,
6298                        struct intel_link_m_n *m_n)
6299 {
6300         m_n->tu = 64;
6301
6302         compute_m_n(bits_per_pixel * pixel_clock,
6303                     link_clock * nlanes * 8,
6304                     &m_n->gmch_m, &m_n->gmch_n);
6305
6306         compute_m_n(pixel_clock, link_clock,
6307                     &m_n->link_m, &m_n->link_n);
6308 }
6309
6310 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6311 {
6312         if (i915.panel_use_ssc >= 0)
6313                 return i915.panel_use_ssc != 0;
6314         return dev_priv->vbt.lvds_use_ssc
6315                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6316 }
6317
6318 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6319 {
6320         return (1 << dpll->n) << 16 | dpll->m2;
6321 }
6322
6323 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6324 {
6325         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6326 }
6327
6328 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6329                                      struct intel_crtc_state *crtc_state,
6330                                      struct dpll *reduced_clock)
6331 {
6332         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6333         u32 fp, fp2 = 0;
6334
6335         if (IS_PINEVIEW(dev_priv)) {
6336                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6337                 if (reduced_clock)
6338                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6339         } else {
6340                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6341                 if (reduced_clock)
6342                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6343         }
6344
6345         crtc_state->dpll_hw_state.fp0 = fp;
6346
6347         crtc->lowfreq_avail = false;
6348         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6349             reduced_clock) {
6350                 crtc_state->dpll_hw_state.fp1 = fp2;
6351                 crtc->lowfreq_avail = true;
6352         } else {
6353                 crtc_state->dpll_hw_state.fp1 = fp;
6354         }
6355 }
6356
6357 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6358                 pipe)
6359 {
6360         u32 reg_val;
6361
6362         /*
6363          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6364          * and set it to a reasonable value instead.
6365          */
6366         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6367         reg_val &= 0xffffff00;
6368         reg_val |= 0x00000030;
6369         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6370
6371         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6372         reg_val &= 0x00ffffff;
6373         reg_val |= 0x8c000000;
6374         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6375
6376         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6377         reg_val &= 0xffffff00;
6378         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6379
6380         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6381         reg_val &= 0x00ffffff;
6382         reg_val |= 0xb0000000;
6383         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6384 }
6385
6386 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6387                                          struct intel_link_m_n *m_n)
6388 {
6389         struct drm_device *dev = crtc->base.dev;
6390         struct drm_i915_private *dev_priv = to_i915(dev);
6391         int pipe = crtc->pipe;
6392
6393         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6394         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6395         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6396         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6397 }
6398
6399 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6400                                          struct intel_link_m_n *m_n,
6401                                          struct intel_link_m_n *m2_n2)
6402 {
6403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6404         int pipe = crtc->pipe;
6405         enum transcoder transcoder = crtc->config->cpu_transcoder;
6406
6407         if (INTEL_GEN(dev_priv) >= 5) {
6408                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6409                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6410                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6411                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6412                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6413                  * for gen < 8) and if DRRS is supported (to make sure the
6414                  * registers are not unnecessarily accessed).
6415                  */
6416                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6417                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6418                         I915_WRITE(PIPE_DATA_M2(transcoder),
6419                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6420                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6421                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6422                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6423                 }
6424         } else {
6425                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6426                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6427                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6428                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6429         }
6430 }
6431
6432 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6433 {
6434         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6435
6436         if (m_n == M1_N1) {
6437                 dp_m_n = &crtc->config->dp_m_n;
6438                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6439         } else if (m_n == M2_N2) {
6440
6441                 /*
6442                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6443                  * needs to be programmed into M1_N1.
6444                  */
6445                 dp_m_n = &crtc->config->dp_m2_n2;
6446         } else {
6447                 DRM_ERROR("Unsupported divider value\n");
6448                 return;
6449         }
6450
6451         if (crtc->config->has_pch_encoder)
6452                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6453         else
6454                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6455 }
6456
6457 static void vlv_compute_dpll(struct intel_crtc *crtc,
6458                              struct intel_crtc_state *pipe_config)
6459 {
6460         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6461                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6462         if (crtc->pipe != PIPE_A)
6463                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6464
6465         /* DPLL not used with DSI, but still need the rest set up */
6466         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6467                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6468                         DPLL_EXT_BUFFER_ENABLE_VLV;
6469
6470         pipe_config->dpll_hw_state.dpll_md =
6471                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6472 }
6473
6474 static void chv_compute_dpll(struct intel_crtc *crtc,
6475                              struct intel_crtc_state *pipe_config)
6476 {
6477         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6478                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6479         if (crtc->pipe != PIPE_A)
6480                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6481
6482         /* DPLL not used with DSI, but still need the rest set up */
6483         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6484                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6485
6486         pipe_config->dpll_hw_state.dpll_md =
6487                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6488 }
6489
6490 static void vlv_prepare_pll(struct intel_crtc *crtc,
6491                             const struct intel_crtc_state *pipe_config)
6492 {
6493         struct drm_device *dev = crtc->base.dev;
6494         struct drm_i915_private *dev_priv = to_i915(dev);
6495         enum pipe pipe = crtc->pipe;
6496         u32 mdiv;
6497         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6498         u32 coreclk, reg_val;
6499
6500         /* Enable Refclk */
6501         I915_WRITE(DPLL(pipe),
6502                    pipe_config->dpll_hw_state.dpll &
6503                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6504
6505         /* No need to actually set up the DPLL with DSI */
6506         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6507                 return;
6508
6509         mutex_lock(&dev_priv->sb_lock);
6510
6511         bestn = pipe_config->dpll.n;
6512         bestm1 = pipe_config->dpll.m1;
6513         bestm2 = pipe_config->dpll.m2;
6514         bestp1 = pipe_config->dpll.p1;
6515         bestp2 = pipe_config->dpll.p2;
6516
6517         /* See eDP HDMI DPIO driver vbios notes doc */
6518
6519         /* PLL B needs special handling */
6520         if (pipe == PIPE_B)
6521                 vlv_pllb_recal_opamp(dev_priv, pipe);
6522
6523         /* Set up Tx target for periodic Rcomp update */
6524         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6525
6526         /* Disable target IRef on PLL */
6527         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6528         reg_val &= 0x00ffffff;
6529         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6530
6531         /* Disable fast lock */
6532         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6533
6534         /* Set idtafcrecal before PLL is enabled */
6535         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6536         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6537         mdiv |= ((bestn << DPIO_N_SHIFT));
6538         mdiv |= (1 << DPIO_K_SHIFT);
6539
6540         /*
6541          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6542          * but we don't support that).
6543          * Note: don't use the DAC post divider as it seems unstable.
6544          */
6545         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
6546         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6547
6548         mdiv |= DPIO_ENABLE_CALIBRATION;
6549         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6550
6551         /* Set HBR and RBR LPF coefficients */
6552         if (pipe_config->port_clock == 162000 ||
6553             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
6554             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
6555                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6556                                  0x009f0003);
6557         else
6558                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6559                                  0x00d0000f);
6560
6561         if (intel_crtc_has_dp_encoder(pipe_config)) {
6562                 /* Use SSC source */
6563                 if (pipe == PIPE_A)
6564                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6565                                          0x0df40000);
6566                 else
6567                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6568                                          0x0df70000);
6569         } else { /* HDMI or VGA */
6570                 /* Use bend source */
6571                 if (pipe == PIPE_A)
6572                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6573                                          0x0df70000);
6574                 else
6575                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6576                                          0x0df40000);
6577         }
6578
6579         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
6580         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
6581         if (intel_crtc_has_dp_encoder(crtc->config))
6582                 coreclk |= 0x01000000;
6583         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
6584
6585         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6586         mutex_unlock(&dev_priv->sb_lock);
6587 }
6588
6589 static void chv_prepare_pll(struct intel_crtc *crtc,
6590                             const struct intel_crtc_state *pipe_config)
6591 {
6592         struct drm_device *dev = crtc->base.dev;
6593         struct drm_i915_private *dev_priv = to_i915(dev);
6594         enum pipe pipe = crtc->pipe;
6595         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6596         u32 loopfilter, tribuf_calcntr;
6597         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6598         u32 dpio_val;
6599         int vco;
6600
6601         /* Enable Refclk and SSC */
6602         I915_WRITE(DPLL(pipe),
6603                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6604
6605         /* No need to actually set up the DPLL with DSI */
6606         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6607                 return;
6608
6609         bestn = pipe_config->dpll.n;
6610         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6611         bestm1 = pipe_config->dpll.m1;
6612         bestm2 = pipe_config->dpll.m2 >> 22;
6613         bestp1 = pipe_config->dpll.p1;
6614         bestp2 = pipe_config->dpll.p2;
6615         vco = pipe_config->dpll.vco;
6616         dpio_val = 0;
6617         loopfilter = 0;
6618
6619         mutex_lock(&dev_priv->sb_lock);
6620
6621         /* p1 and p2 divider */
6622         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6623                         5 << DPIO_CHV_S1_DIV_SHIFT |
6624                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6625                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6626                         1 << DPIO_CHV_K_DIV_SHIFT);
6627
6628         /* Feedback post-divider - m2 */
6629         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6630
6631         /* Feedback refclk divider - n and m1 */
6632         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6633                         DPIO_CHV_M1_DIV_BY_2 |
6634                         1 << DPIO_CHV_N_DIV_SHIFT);
6635
6636         /* M2 fraction division */
6637         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6638
6639         /* M2 fraction division enable */
6640         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6641         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
6642         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
6643         if (bestm2_frac)
6644                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
6645         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
6646
6647         /* Program digital lock detect threshold */
6648         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
6649         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
6650                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
6651         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
6652         if (!bestm2_frac)
6653                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
6654         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
6655
6656         /* Loop filter */
6657         if (vco == 5400000) {
6658                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
6659                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
6660                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
6661                 tribuf_calcntr = 0x9;
6662         } else if (vco <= 6200000) {
6663                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
6664                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
6665                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6666                 tribuf_calcntr = 0x9;
6667         } else if (vco <= 6480000) {
6668                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6669                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6670                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6671                 tribuf_calcntr = 0x8;
6672         } else {
6673                 /* Not supported. Apply the same limits as in the max case */
6674                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6675                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6676                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6677                 tribuf_calcntr = 0;
6678         }
6679         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6680
6681         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
6682         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
6683         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
6684         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
6685
6686         /* AFC Recal */
6687         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6688                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6689                         DPIO_AFC_RECAL);
6690
6691         mutex_unlock(&dev_priv->sb_lock);
6692 }
6693
6694 /**
6695  * vlv_force_pll_on - forcibly enable just the PLL
6696  * @dev_priv: i915 private structure
6697  * @pipe: pipe PLL to enable
6698  * @dpll: PLL configuration
6699  *
6700  * Enable the PLL for @pipe using the supplied @dpll config. To be used
6701  * in cases where we need the PLL enabled even when @pipe is not going to
6702  * be enabled.
6703  */
6704 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
6705                      const struct dpll *dpll)
6706 {
6707         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6708         struct intel_crtc_state *pipe_config;
6709
6710         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
6711         if (!pipe_config)
6712                 return -ENOMEM;
6713
6714         pipe_config->base.crtc = &crtc->base;
6715         pipe_config->pixel_multiplier = 1;
6716         pipe_config->dpll = *dpll;
6717
6718         if (IS_CHERRYVIEW(dev_priv)) {
6719                 chv_compute_dpll(crtc, pipe_config);
6720                 chv_prepare_pll(crtc, pipe_config);
6721                 chv_enable_pll(crtc, pipe_config);
6722         } else {
6723                 vlv_compute_dpll(crtc, pipe_config);
6724                 vlv_prepare_pll(crtc, pipe_config);
6725                 vlv_enable_pll(crtc, pipe_config);
6726         }
6727
6728         kfree(pipe_config);
6729
6730         return 0;
6731 }
6732
6733 /**
6734  * vlv_force_pll_off - forcibly disable just the PLL
6735  * @dev_priv: i915 private structure
6736  * @pipe: pipe PLL to disable
6737  *
6738  * Disable the PLL for @pipe. To be used in cases where we need
6739  * the PLL enabled even when @pipe is not going to be enabled.
6740  */
6741 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
6742 {
6743         if (IS_CHERRYVIEW(dev_priv))
6744                 chv_disable_pll(dev_priv, pipe);
6745         else
6746                 vlv_disable_pll(dev_priv, pipe);
6747 }
6748
6749 static void i9xx_compute_dpll(struct intel_crtc *crtc,
6750                               struct intel_crtc_state *crtc_state,
6751                               struct dpll *reduced_clock)
6752 {
6753         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6754         u32 dpll;
6755         struct dpll *clock = &crtc_state->dpll;
6756
6757         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6758
6759         dpll = DPLL_VGA_MODE_DIS;
6760
6761         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
6762                 dpll |= DPLLB_MODE_LVDS;
6763         else
6764                 dpll |= DPLLB_MODE_DAC_SERIAL;
6765
6766         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
6767             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
6768                 dpll |= (crtc_state->pixel_multiplier - 1)
6769                         << SDVO_MULTIPLIER_SHIFT_HIRES;
6770         }
6771
6772         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
6773             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
6774                 dpll |= DPLL_SDVO_HIGH_SPEED;
6775
6776         if (intel_crtc_has_dp_encoder(crtc_state))
6777                 dpll |= DPLL_SDVO_HIGH_SPEED;
6778
6779         /* compute bitmask from p1 value */
6780         if (IS_PINEVIEW(dev_priv))
6781                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6782         else {
6783                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6784                 if (IS_G4X(dev_priv) && reduced_clock)
6785                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6786         }
6787         switch (clock->p2) {
6788         case 5:
6789                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6790                 break;
6791         case 7:
6792                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6793                 break;
6794         case 10:
6795                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6796                 break;
6797         case 14:
6798                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6799                 break;
6800         }
6801         if (INTEL_GEN(dev_priv) >= 4)
6802                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
6803
6804         if (crtc_state->sdvo_tv_clock)
6805                 dpll |= PLL_REF_INPUT_TVCLKINBC;
6806         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6807                  intel_panel_use_ssc(dev_priv))
6808                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6809         else
6810                 dpll |= PLL_REF_INPUT_DREFCLK;
6811
6812         dpll |= DPLL_VCO_ENABLE;
6813         crtc_state->dpll_hw_state.dpll = dpll;
6814
6815         if (INTEL_GEN(dev_priv) >= 4) {
6816                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
6817                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6818                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
6819         }
6820 }
6821
6822 static void i8xx_compute_dpll(struct intel_crtc *crtc,
6823                               struct intel_crtc_state *crtc_state,
6824                               struct dpll *reduced_clock)
6825 {
6826         struct drm_device *dev = crtc->base.dev;
6827         struct drm_i915_private *dev_priv = to_i915(dev);
6828         u32 dpll;
6829         struct dpll *clock = &crtc_state->dpll;
6830
6831         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6832
6833         dpll = DPLL_VGA_MODE_DIS;
6834
6835         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
6836                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6837         } else {
6838                 if (clock->p1 == 2)
6839                         dpll |= PLL_P1_DIVIDE_BY_TWO;
6840                 else
6841                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6842                 if (clock->p2 == 4)
6843                         dpll |= PLL_P2_DIVIDE_BY_4;
6844         }
6845
6846         if (!IS_I830(dev_priv) &&
6847             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
6848                 dpll |= DPLL_DVO_2X_MODE;
6849
6850         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6851             intel_panel_use_ssc(dev_priv))
6852                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6853         else
6854                 dpll |= PLL_REF_INPUT_DREFCLK;
6855
6856         dpll |= DPLL_VCO_ENABLE;
6857         crtc_state->dpll_hw_state.dpll = dpll;
6858 }
6859
6860 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6861 {
6862         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
6863         enum pipe pipe = intel_crtc->pipe;
6864         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
6865         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
6866         uint32_t crtc_vtotal, crtc_vblank_end;
6867         int vsyncshift = 0;
6868
6869         /* We need to be careful not to changed the adjusted mode, for otherwise
6870          * the hw state checker will get angry at the mismatch. */
6871         crtc_vtotal = adjusted_mode->crtc_vtotal;
6872         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6873
6874         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6875                 /* the chip adds 2 halflines automatically */
6876                 crtc_vtotal -= 1;
6877                 crtc_vblank_end -= 1;
6878
6879                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
6880                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6881                 else
6882                         vsyncshift = adjusted_mode->crtc_hsync_start -
6883                                 adjusted_mode->crtc_htotal / 2;
6884                 if (vsyncshift < 0)
6885                         vsyncshift += adjusted_mode->crtc_htotal;
6886         }
6887
6888         if (INTEL_GEN(dev_priv) > 3)
6889                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6890
6891         I915_WRITE(HTOTAL(cpu_transcoder),
6892                    (adjusted_mode->crtc_hdisplay - 1) |
6893                    ((adjusted_mode->crtc_htotal - 1) << 16));
6894         I915_WRITE(HBLANK(cpu_transcoder),
6895                    (adjusted_mode->crtc_hblank_start - 1) |
6896                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
6897         I915_WRITE(HSYNC(cpu_transcoder),
6898                    (adjusted_mode->crtc_hsync_start - 1) |
6899                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
6900
6901         I915_WRITE(VTOTAL(cpu_transcoder),
6902                    (adjusted_mode->crtc_vdisplay - 1) |
6903                    ((crtc_vtotal - 1) << 16));
6904         I915_WRITE(VBLANK(cpu_transcoder),
6905                    (adjusted_mode->crtc_vblank_start - 1) |
6906                    ((crtc_vblank_end - 1) << 16));
6907         I915_WRITE(VSYNC(cpu_transcoder),
6908                    (adjusted_mode->crtc_vsync_start - 1) |
6909                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
6910
6911         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6912          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6913          * documented on the DDI_FUNC_CTL register description, EDP Input Select
6914          * bits. */
6915         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
6916             (pipe == PIPE_B || pipe == PIPE_C))
6917                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6918
6919 }
6920
6921 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
6922 {
6923         struct drm_device *dev = intel_crtc->base.dev;
6924         struct drm_i915_private *dev_priv = to_i915(dev);
6925         enum pipe pipe = intel_crtc->pipe;
6926
6927         /* pipesrc controls the size that is scaled from, which should
6928          * always be the user's requested size.
6929          */
6930         I915_WRITE(PIPESRC(pipe),
6931                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
6932                    (intel_crtc->config->pipe_src_h - 1));
6933 }
6934
6935 static void intel_get_pipe_timings(struct intel_crtc *crtc,
6936                                    struct intel_crtc_state *pipe_config)
6937 {
6938         struct drm_device *dev = crtc->base.dev;
6939         struct drm_i915_private *dev_priv = to_i915(dev);
6940         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6941         uint32_t tmp;
6942
6943         tmp = I915_READ(HTOTAL(cpu_transcoder));
6944         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6945         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6946         tmp = I915_READ(HBLANK(cpu_transcoder));
6947         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6948         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6949         tmp = I915_READ(HSYNC(cpu_transcoder));
6950         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6951         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6952
6953         tmp = I915_READ(VTOTAL(cpu_transcoder));
6954         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6955         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6956         tmp = I915_READ(VBLANK(cpu_transcoder));
6957         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6958         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6959         tmp = I915_READ(VSYNC(cpu_transcoder));
6960         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6961         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6962
6963         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6964                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6965                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
6966                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
6967         }
6968 }
6969
6970 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
6971                                     struct intel_crtc_state *pipe_config)
6972 {
6973         struct drm_device *dev = crtc->base.dev;
6974         struct drm_i915_private *dev_priv = to_i915(dev);
6975         u32 tmp;
6976
6977         tmp = I915_READ(PIPESRC(crtc->pipe));
6978         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6979         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6980
6981         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
6982         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
6983 }
6984
6985 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6986                                  struct intel_crtc_state *pipe_config)
6987 {
6988         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
6989         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
6990         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
6991         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
6992
6993         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
6994         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
6995         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
6996         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
6997
6998         mode->flags = pipe_config->base.adjusted_mode.flags;
6999         mode->type = DRM_MODE_TYPE_DRIVER;
7000
7001         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7002
7003         mode->hsync = drm_mode_hsync(mode);
7004         mode->vrefresh = drm_mode_vrefresh(mode);
7005         drm_mode_set_name(mode);
7006 }
7007
7008 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7009 {
7010         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7011         uint32_t pipeconf;
7012
7013         pipeconf = 0;
7014
7015         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7016             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7017                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7018
7019         if (intel_crtc->config->double_wide)
7020                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7021
7022         /* only g4x and later have fancy bpc/dither controls */
7023         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7024             IS_CHERRYVIEW(dev_priv)) {
7025                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7026                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7027                         pipeconf |= PIPECONF_DITHER_EN |
7028                                     PIPECONF_DITHER_TYPE_SP;
7029
7030                 switch (intel_crtc->config->pipe_bpp) {
7031                 case 18:
7032                         pipeconf |= PIPECONF_6BPC;
7033                         break;
7034                 case 24:
7035                         pipeconf |= PIPECONF_8BPC;
7036                         break;
7037                 case 30:
7038                         pipeconf |= PIPECONF_10BPC;
7039                         break;
7040                 default:
7041                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7042                         BUG();
7043                 }
7044         }
7045
7046         if (HAS_PIPE_CXSR(dev_priv)) {
7047                 if (intel_crtc->lowfreq_avail) {
7048                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7049                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7050                 } else {
7051                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7052                 }
7053         }
7054
7055         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7056                 if (INTEL_GEN(dev_priv) < 4 ||
7057                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7058                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7059                 else
7060                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7061         } else
7062                 pipeconf |= PIPECONF_PROGRESSIVE;
7063
7064         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7065              intel_crtc->config->limited_color_range)
7066                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7067
7068         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7069         POSTING_READ(PIPECONF(intel_crtc->pipe));
7070 }
7071
7072 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7073                                    struct intel_crtc_state *crtc_state)
7074 {
7075         struct drm_device *dev = crtc->base.dev;
7076         struct drm_i915_private *dev_priv = to_i915(dev);
7077         const struct intel_limit *limit;
7078         int refclk = 48000;
7079
7080         memset(&crtc_state->dpll_hw_state, 0,
7081                sizeof(crtc_state->dpll_hw_state));
7082
7083         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7084                 if (intel_panel_use_ssc(dev_priv)) {
7085                         refclk = dev_priv->vbt.lvds_ssc_freq;
7086                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7087                 }
7088
7089                 limit = &intel_limits_i8xx_lvds;
7090         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7091                 limit = &intel_limits_i8xx_dvo;
7092         } else {
7093                 limit = &intel_limits_i8xx_dac;
7094         }
7095
7096         if (!crtc_state->clock_set &&
7097             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7098                                  refclk, NULL, &crtc_state->dpll)) {
7099                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7100                 return -EINVAL;
7101         }
7102
7103         i8xx_compute_dpll(crtc, crtc_state, NULL);
7104
7105         return 0;
7106 }
7107
7108 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7109                                   struct intel_crtc_state *crtc_state)
7110 {
7111         struct drm_device *dev = crtc->base.dev;
7112         struct drm_i915_private *dev_priv = to_i915(dev);
7113         const struct intel_limit *limit;
7114         int refclk = 96000;
7115
7116         memset(&crtc_state->dpll_hw_state, 0,
7117                sizeof(crtc_state->dpll_hw_state));
7118
7119         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7120                 if (intel_panel_use_ssc(dev_priv)) {
7121                         refclk = dev_priv->vbt.lvds_ssc_freq;
7122                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7123                 }
7124
7125                 if (intel_is_dual_link_lvds(dev))
7126                         limit = &intel_limits_g4x_dual_channel_lvds;
7127                 else
7128                         limit = &intel_limits_g4x_single_channel_lvds;
7129         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7130                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7131                 limit = &intel_limits_g4x_hdmi;
7132         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7133                 limit = &intel_limits_g4x_sdvo;
7134         } else {
7135                 /* The option is for other outputs */
7136                 limit = &intel_limits_i9xx_sdvo;
7137         }
7138
7139         if (!crtc_state->clock_set &&
7140             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7141                                 refclk, NULL, &crtc_state->dpll)) {
7142                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7143                 return -EINVAL;
7144         }
7145
7146         i9xx_compute_dpll(crtc, crtc_state, NULL);
7147
7148         return 0;
7149 }
7150
7151 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7152                                   struct intel_crtc_state *crtc_state)
7153 {
7154         struct drm_device *dev = crtc->base.dev;
7155         struct drm_i915_private *dev_priv = to_i915(dev);
7156         const struct intel_limit *limit;
7157         int refclk = 96000;
7158
7159         memset(&crtc_state->dpll_hw_state, 0,
7160                sizeof(crtc_state->dpll_hw_state));
7161
7162         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7163                 if (intel_panel_use_ssc(dev_priv)) {
7164                         refclk = dev_priv->vbt.lvds_ssc_freq;
7165                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7166                 }
7167
7168                 limit = &intel_limits_pineview_lvds;
7169         } else {
7170                 limit = &intel_limits_pineview_sdvo;
7171         }
7172
7173         if (!crtc_state->clock_set &&
7174             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7175                                 refclk, NULL, &crtc_state->dpll)) {
7176                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7177                 return -EINVAL;
7178         }
7179
7180         i9xx_compute_dpll(crtc, crtc_state, NULL);
7181
7182         return 0;
7183 }
7184
7185 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7186                                    struct intel_crtc_state *crtc_state)
7187 {
7188         struct drm_device *dev = crtc->base.dev;
7189         struct drm_i915_private *dev_priv = to_i915(dev);
7190         const struct intel_limit *limit;
7191         int refclk = 96000;
7192
7193         memset(&crtc_state->dpll_hw_state, 0,
7194                sizeof(crtc_state->dpll_hw_state));
7195
7196         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7197                 if (intel_panel_use_ssc(dev_priv)) {
7198                         refclk = dev_priv->vbt.lvds_ssc_freq;
7199                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7200                 }
7201
7202                 limit = &intel_limits_i9xx_lvds;
7203         } else {
7204                 limit = &intel_limits_i9xx_sdvo;
7205         }
7206
7207         if (!crtc_state->clock_set &&
7208             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7209                                  refclk, NULL, &crtc_state->dpll)) {
7210                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7211                 return -EINVAL;
7212         }
7213
7214         i9xx_compute_dpll(crtc, crtc_state, NULL);
7215
7216         return 0;
7217 }
7218
7219 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7220                                   struct intel_crtc_state *crtc_state)
7221 {
7222         int refclk = 100000;
7223         const struct intel_limit *limit = &intel_limits_chv;
7224
7225         memset(&crtc_state->dpll_hw_state, 0,
7226                sizeof(crtc_state->dpll_hw_state));
7227
7228         if (!crtc_state->clock_set &&
7229             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7230                                 refclk, NULL, &crtc_state->dpll)) {
7231                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7232                 return -EINVAL;
7233         }
7234
7235         chv_compute_dpll(crtc, crtc_state);
7236
7237         return 0;
7238 }
7239
7240 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7241                                   struct intel_crtc_state *crtc_state)
7242 {
7243         int refclk = 100000;
7244         const struct intel_limit *limit = &intel_limits_vlv;
7245
7246         memset(&crtc_state->dpll_hw_state, 0,
7247                sizeof(crtc_state->dpll_hw_state));
7248
7249         if (!crtc_state->clock_set &&
7250             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7251                                 refclk, NULL, &crtc_state->dpll)) {
7252                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7253                 return -EINVAL;
7254         }
7255
7256         vlv_compute_dpll(crtc, crtc_state);
7257
7258         return 0;
7259 }
7260
7261 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7262                                  struct intel_crtc_state *pipe_config)
7263 {
7264         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7265         uint32_t tmp;
7266
7267         if (INTEL_GEN(dev_priv) <= 3 &&
7268             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7269                 return;
7270
7271         tmp = I915_READ(PFIT_CONTROL);
7272         if (!(tmp & PFIT_ENABLE))
7273                 return;
7274
7275         /* Check whether the pfit is attached to our pipe. */
7276         if (INTEL_GEN(dev_priv) < 4) {
7277                 if (crtc->pipe != PIPE_B)
7278                         return;
7279         } else {
7280                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7281                         return;
7282         }
7283
7284         pipe_config->gmch_pfit.control = tmp;
7285         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7286 }
7287
7288 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7289                                struct intel_crtc_state *pipe_config)
7290 {
7291         struct drm_device *dev = crtc->base.dev;
7292         struct drm_i915_private *dev_priv = to_i915(dev);
7293         int pipe = pipe_config->cpu_transcoder;
7294         struct dpll clock;
7295         u32 mdiv;
7296         int refclk = 100000;
7297
7298         /* In case of DSI, DPLL will not be used */
7299         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7300                 return;
7301
7302         mutex_lock(&dev_priv->sb_lock);
7303         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7304         mutex_unlock(&dev_priv->sb_lock);
7305
7306         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7307         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7308         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7309         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7310         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7311
7312         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7313 }
7314
7315 static void
7316 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7317                               struct intel_initial_plane_config *plane_config)
7318 {
7319         struct drm_device *dev = crtc->base.dev;
7320         struct drm_i915_private *dev_priv = to_i915(dev);
7321         u32 val, base, offset;
7322         int pipe = crtc->pipe, plane = crtc->plane;
7323         int fourcc, pixel_format;
7324         unsigned int aligned_height;
7325         struct drm_framebuffer *fb;
7326         struct intel_framebuffer *intel_fb;
7327
7328         val = I915_READ(DSPCNTR(plane));
7329         if (!(val & DISPLAY_PLANE_ENABLE))
7330                 return;
7331
7332         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7333         if (!intel_fb) {
7334                 DRM_DEBUG_KMS("failed to alloc fb\n");
7335                 return;
7336         }
7337
7338         fb = &intel_fb->base;
7339
7340         fb->dev = dev;
7341
7342         if (INTEL_GEN(dev_priv) >= 4) {
7343                 if (val & DISPPLANE_TILED) {
7344                         plane_config->tiling = I915_TILING_X;
7345                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7346                 }
7347         }
7348
7349         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7350         fourcc = i9xx_format_to_fourcc(pixel_format);
7351         fb->format = drm_format_info(fourcc);
7352
7353         if (INTEL_GEN(dev_priv) >= 4) {
7354                 if (plane_config->tiling)
7355                         offset = I915_READ(DSPTILEOFF(plane));
7356                 else
7357                         offset = I915_READ(DSPLINOFF(plane));
7358                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7359         } else {
7360                 base = I915_READ(DSPADDR(plane));
7361         }
7362         plane_config->base = base;
7363
7364         val = I915_READ(PIPESRC(pipe));
7365         fb->width = ((val >> 16) & 0xfff) + 1;
7366         fb->height = ((val >> 0) & 0xfff) + 1;
7367
7368         val = I915_READ(DSPSTRIDE(pipe));
7369         fb->pitches[0] = val & 0xffffffc0;
7370
7371         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7372
7373         plane_config->size = fb->pitches[0] * aligned_height;
7374
7375         DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7376                       pipe_name(pipe), plane, fb->width, fb->height,
7377                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7378                       plane_config->size);
7379
7380         plane_config->fb = intel_fb;
7381 }
7382
7383 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7384                                struct intel_crtc_state *pipe_config)
7385 {
7386         struct drm_device *dev = crtc->base.dev;
7387         struct drm_i915_private *dev_priv = to_i915(dev);
7388         int pipe = pipe_config->cpu_transcoder;
7389         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7390         struct dpll clock;
7391         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7392         int refclk = 100000;
7393
7394         /* In case of DSI, DPLL will not be used */
7395         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7396                 return;
7397
7398         mutex_lock(&dev_priv->sb_lock);
7399         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7400         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7401         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7402         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7403         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7404         mutex_unlock(&dev_priv->sb_lock);
7405
7406         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7407         clock.m2 = (pll_dw0 & 0xff) << 22;
7408         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7409                 clock.m2 |= pll_dw2 & 0x3fffff;
7410         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7411         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7412         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7413
7414         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7415 }
7416
7417 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7418                                  struct intel_crtc_state *pipe_config)
7419 {
7420         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7421         enum intel_display_power_domain power_domain;
7422         uint32_t tmp;
7423         bool ret;
7424
7425         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7426         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7427                 return false;
7428
7429         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7430         pipe_config->shared_dpll = NULL;
7431
7432         ret = false;
7433
7434         tmp = I915_READ(PIPECONF(crtc->pipe));
7435         if (!(tmp & PIPECONF_ENABLE))
7436                 goto out;
7437
7438         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7439             IS_CHERRYVIEW(dev_priv)) {
7440                 switch (tmp & PIPECONF_BPC_MASK) {
7441                 case PIPECONF_6BPC:
7442                         pipe_config->pipe_bpp = 18;
7443                         break;
7444                 case PIPECONF_8BPC:
7445                         pipe_config->pipe_bpp = 24;
7446                         break;
7447                 case PIPECONF_10BPC:
7448                         pipe_config->pipe_bpp = 30;
7449                         break;
7450                 default:
7451                         break;
7452                 }
7453         }
7454
7455         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7456             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7457                 pipe_config->limited_color_range = true;
7458
7459         if (INTEL_GEN(dev_priv) < 4)
7460                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7461
7462         intel_get_pipe_timings(crtc, pipe_config);
7463         intel_get_pipe_src_size(crtc, pipe_config);
7464
7465         i9xx_get_pfit_config(crtc, pipe_config);
7466
7467         if (INTEL_GEN(dev_priv) >= 4) {
7468                 /* No way to read it out on pipes B and C */
7469                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7470                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7471                 else
7472                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7473                 pipe_config->pixel_multiplier =
7474                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7475                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7476                 pipe_config->dpll_hw_state.dpll_md = tmp;
7477         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7478                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7479                 tmp = I915_READ(DPLL(crtc->pipe));
7480                 pipe_config->pixel_multiplier =
7481                         ((tmp & SDVO_MULTIPLIER_MASK)
7482                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7483         } else {
7484                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7485                  * port and will be fixed up in the encoder->get_config
7486                  * function. */
7487                 pipe_config->pixel_multiplier = 1;
7488         }
7489         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7490         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7491                 /*
7492                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7493                  * on 830. Filter it out here so that we don't
7494                  * report errors due to that.
7495                  */
7496                 if (IS_I830(dev_priv))
7497                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7498
7499                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7500                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7501         } else {
7502                 /* Mask out read-only status bits. */
7503                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7504                                                      DPLL_PORTC_READY_MASK |
7505                                                      DPLL_PORTB_READY_MASK);
7506         }
7507
7508         if (IS_CHERRYVIEW(dev_priv))
7509                 chv_crtc_clock_get(crtc, pipe_config);
7510         else if (IS_VALLEYVIEW(dev_priv))
7511                 vlv_crtc_clock_get(crtc, pipe_config);
7512         else
7513                 i9xx_crtc_clock_get(crtc, pipe_config);
7514
7515         /*
7516          * Normally the dotclock is filled in by the encoder .get_config()
7517          * but in case the pipe is enabled w/o any ports we need a sane
7518          * default.
7519          */
7520         pipe_config->base.adjusted_mode.crtc_clock =
7521                 pipe_config->port_clock / pipe_config->pixel_multiplier;
7522
7523         ret = true;
7524
7525 out:
7526         intel_display_power_put(dev_priv, power_domain);
7527
7528         return ret;
7529 }
7530
7531 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
7532 {
7533         struct intel_encoder *encoder;
7534         int i;
7535         u32 val, final;
7536         bool has_lvds = false;
7537         bool has_cpu_edp = false;
7538         bool has_panel = false;
7539         bool has_ck505 = false;
7540         bool can_ssc = false;
7541         bool using_ssc_source = false;
7542
7543         /* We need to take the global config into account */
7544         for_each_intel_encoder(&dev_priv->drm, encoder) {
7545                 switch (encoder->type) {
7546                 case INTEL_OUTPUT_LVDS:
7547                         has_panel = true;
7548                         has_lvds = true;
7549                         break;
7550                 case INTEL_OUTPUT_EDP:
7551                         has_panel = true;
7552                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
7553                                 has_cpu_edp = true;
7554                         break;
7555                 default:
7556                         break;
7557                 }
7558         }
7559
7560         if (HAS_PCH_IBX(dev_priv)) {
7561                 has_ck505 = dev_priv->vbt.display_clock_mode;
7562                 can_ssc = has_ck505;
7563         } else {
7564                 has_ck505 = false;
7565                 can_ssc = true;
7566         }
7567
7568         /* Check if any DPLLs are using the SSC source */
7569         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
7570                 u32 temp = I915_READ(PCH_DPLL(i));
7571
7572                 if (!(temp & DPLL_VCO_ENABLE))
7573                         continue;
7574
7575                 if ((temp & PLL_REF_INPUT_MASK) ==
7576                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7577                         using_ssc_source = true;
7578                         break;
7579                 }
7580         }
7581
7582         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
7583                       has_panel, has_lvds, has_ck505, using_ssc_source);
7584
7585         /* Ironlake: try to setup display ref clock before DPLL
7586          * enabling. This is only under driver's control after
7587          * PCH B stepping, previous chipset stepping should be
7588          * ignoring this setting.
7589          */
7590         val = I915_READ(PCH_DREF_CONTROL);
7591
7592         /* As we must carefully and slowly disable/enable each source in turn,
7593          * compute the final state we want first and check if we need to
7594          * make any changes at all.
7595          */
7596         final = val;
7597         final &= ~DREF_NONSPREAD_SOURCE_MASK;
7598         if (has_ck505)
7599                 final |= DREF_NONSPREAD_CK505_ENABLE;
7600         else
7601                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
7602
7603         final &= ~DREF_SSC_SOURCE_MASK;
7604         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7605         final &= ~DREF_SSC1_ENABLE;
7606
7607         if (has_panel) {
7608                 final |= DREF_SSC_SOURCE_ENABLE;
7609
7610                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
7611                         final |= DREF_SSC1_ENABLE;
7612
7613                 if (has_cpu_edp) {
7614                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
7615                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
7616                         else
7617                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
7618                 } else
7619                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7620         } else if (using_ssc_source) {
7621                 final |= DREF_SSC_SOURCE_ENABLE;
7622                 final |= DREF_SSC1_ENABLE;
7623         }
7624
7625         if (final == val)
7626                 return;
7627
7628         /* Always enable nonspread source */
7629         val &= ~DREF_NONSPREAD_SOURCE_MASK;
7630
7631         if (has_ck505)
7632                 val |= DREF_NONSPREAD_CK505_ENABLE;
7633         else
7634                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
7635
7636         if (has_panel) {
7637                 val &= ~DREF_SSC_SOURCE_MASK;
7638                 val |= DREF_SSC_SOURCE_ENABLE;
7639
7640                 /* SSC must be turned on before enabling the CPU output  */
7641                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
7642                         DRM_DEBUG_KMS("Using SSC on panel\n");
7643                         val |= DREF_SSC1_ENABLE;
7644                 } else
7645                         val &= ~DREF_SSC1_ENABLE;
7646
7647                 /* Get SSC going before enabling the outputs */
7648                 I915_WRITE(PCH_DREF_CONTROL, val);
7649                 POSTING_READ(PCH_DREF_CONTROL);
7650                 udelay(200);
7651
7652                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7653
7654                 /* Enable CPU source on CPU attached eDP */
7655                 if (has_cpu_edp) {
7656                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
7657                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
7658                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
7659                         } else
7660                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
7661                 } else
7662                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7663
7664                 I915_WRITE(PCH_DREF_CONTROL, val);
7665                 POSTING_READ(PCH_DREF_CONTROL);
7666                 udelay(200);
7667         } else {
7668                 DRM_DEBUG_KMS("Disabling CPU source output\n");
7669
7670                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7671
7672                 /* Turn off CPU output */
7673                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7674
7675                 I915_WRITE(PCH_DREF_CONTROL, val);
7676                 POSTING_READ(PCH_DREF_CONTROL);
7677                 udelay(200);
7678
7679                 if (!using_ssc_source) {
7680                         DRM_DEBUG_KMS("Disabling SSC source\n");
7681
7682                         /* Turn off the SSC source */
7683                         val &= ~DREF_SSC_SOURCE_MASK;
7684                         val |= DREF_SSC_SOURCE_DISABLE;
7685
7686                         /* Turn off SSC1 */
7687                         val &= ~DREF_SSC1_ENABLE;
7688
7689                         I915_WRITE(PCH_DREF_CONTROL, val);
7690                         POSTING_READ(PCH_DREF_CONTROL);
7691                         udelay(200);
7692                 }
7693         }
7694
7695         BUG_ON(val != final);
7696 }
7697
7698 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
7699 {
7700         uint32_t tmp;
7701
7702         tmp = I915_READ(SOUTH_CHICKEN2);
7703         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
7704         I915_WRITE(SOUTH_CHICKEN2, tmp);
7705
7706         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
7707                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
7708                 DRM_ERROR("FDI mPHY reset assert timeout\n");
7709
7710         tmp = I915_READ(SOUTH_CHICKEN2);
7711         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
7712         I915_WRITE(SOUTH_CHICKEN2, tmp);
7713
7714         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
7715                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
7716                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
7717 }
7718
7719 /* WaMPhyProgramming:hsw */
7720 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
7721 {
7722         uint32_t tmp;
7723
7724         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
7725         tmp &= ~(0xFF << 24);
7726         tmp |= (0x12 << 24);
7727         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
7728
7729         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
7730         tmp |= (1 << 11);
7731         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
7732
7733         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
7734         tmp |= (1 << 11);
7735         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
7736
7737         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
7738         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
7739         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
7740
7741         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
7742         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
7743         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
7744
7745         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
7746         tmp &= ~(7 << 13);
7747         tmp |= (5 << 13);
7748         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
7749
7750         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
7751         tmp &= ~(7 << 13);
7752         tmp |= (5 << 13);
7753         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
7754
7755         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
7756         tmp &= ~0xFF;
7757         tmp |= 0x1C;
7758         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
7759
7760         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
7761         tmp &= ~0xFF;
7762         tmp |= 0x1C;
7763         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
7764
7765         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
7766         tmp &= ~(0xFF << 16);
7767         tmp |= (0x1C << 16);
7768         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
7769
7770         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
7771         tmp &= ~(0xFF << 16);
7772         tmp |= (0x1C << 16);
7773         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
7774
7775         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
7776         tmp |= (1 << 27);
7777         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
7778
7779         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
7780         tmp |= (1 << 27);
7781         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7782
7783         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7784         tmp &= ~(0xF << 28);
7785         tmp |= (4 << 28);
7786         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7787
7788         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7789         tmp &= ~(0xF << 28);
7790         tmp |= (4 << 28);
7791         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
7792 }
7793
7794 /* Implements 3 different sequences from BSpec chapter "Display iCLK
7795  * Programming" based on the parameters passed:
7796  * - Sequence to enable CLKOUT_DP
7797  * - Sequence to enable CLKOUT_DP without spread
7798  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7799  */
7800 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
7801                                  bool with_spread, bool with_fdi)
7802 {
7803         uint32_t reg, tmp;
7804
7805         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7806                 with_spread = true;
7807         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
7808             with_fdi, "LP PCH doesn't have FDI\n"))
7809                 with_fdi = false;
7810
7811         mutex_lock(&dev_priv->sb_lock);
7812
7813         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7814         tmp &= ~SBI_SSCCTL_DISABLE;
7815         tmp |= SBI_SSCCTL_PATHALT;
7816         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7817
7818         udelay(24);
7819
7820         if (with_spread) {
7821                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7822                 tmp &= ~SBI_SSCCTL_PATHALT;
7823                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7824
7825                 if (with_fdi) {
7826                         lpt_reset_fdi_mphy(dev_priv);
7827                         lpt_program_fdi_mphy(dev_priv);
7828                 }
7829         }
7830
7831         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7832         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7833         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7834         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7835
7836         mutex_unlock(&dev_priv->sb_lock);
7837 }
7838
7839 /* Sequence to disable CLKOUT_DP */
7840 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
7841 {
7842         uint32_t reg, tmp;
7843
7844         mutex_lock(&dev_priv->sb_lock);
7845
7846         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7847         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7848         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7849         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7850
7851         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7852         if (!(tmp & SBI_SSCCTL_DISABLE)) {
7853                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
7854                         tmp |= SBI_SSCCTL_PATHALT;
7855                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7856                         udelay(32);
7857                 }
7858                 tmp |= SBI_SSCCTL_DISABLE;
7859                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7860         }
7861
7862         mutex_unlock(&dev_priv->sb_lock);
7863 }
7864
7865 #define BEND_IDX(steps) ((50 + (steps)) / 5)
7866
7867 static const uint16_t sscdivintphase[] = {
7868         [BEND_IDX( 50)] = 0x3B23,
7869         [BEND_IDX( 45)] = 0x3B23,
7870         [BEND_IDX( 40)] = 0x3C23,
7871         [BEND_IDX( 35)] = 0x3C23,
7872         [BEND_IDX( 30)] = 0x3D23,
7873         [BEND_IDX( 25)] = 0x3D23,
7874         [BEND_IDX( 20)] = 0x3E23,
7875         [BEND_IDX( 15)] = 0x3E23,
7876         [BEND_IDX( 10)] = 0x3F23,
7877         [BEND_IDX(  5)] = 0x3F23,
7878         [BEND_IDX(  0)] = 0x0025,
7879         [BEND_IDX( -5)] = 0x0025,
7880         [BEND_IDX(-10)] = 0x0125,
7881         [BEND_IDX(-15)] = 0x0125,
7882         [BEND_IDX(-20)] = 0x0225,
7883         [BEND_IDX(-25)] = 0x0225,
7884         [BEND_IDX(-30)] = 0x0325,
7885         [BEND_IDX(-35)] = 0x0325,
7886         [BEND_IDX(-40)] = 0x0425,
7887         [BEND_IDX(-45)] = 0x0425,
7888         [BEND_IDX(-50)] = 0x0525,
7889 };
7890
7891 /*
7892  * Bend CLKOUT_DP
7893  * steps -50 to 50 inclusive, in steps of 5
7894  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
7895  * change in clock period = -(steps / 10) * 5.787 ps
7896  */
7897 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
7898 {
7899         uint32_t tmp;
7900         int idx = BEND_IDX(steps);
7901
7902         if (WARN_ON(steps % 5 != 0))
7903                 return;
7904
7905         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
7906                 return;
7907
7908         mutex_lock(&dev_priv->sb_lock);
7909
7910         if (steps % 10 != 0)
7911                 tmp = 0xAAAAAAAB;
7912         else
7913                 tmp = 0x00000000;
7914         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
7915
7916         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
7917         tmp &= 0xffff0000;
7918         tmp |= sscdivintphase[idx];
7919         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
7920
7921         mutex_unlock(&dev_priv->sb_lock);
7922 }
7923
7924 #undef BEND_IDX
7925
7926 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
7927 {
7928         struct intel_encoder *encoder;
7929         bool has_vga = false;
7930
7931         for_each_intel_encoder(&dev_priv->drm, encoder) {
7932                 switch (encoder->type) {
7933                 case INTEL_OUTPUT_ANALOG:
7934                         has_vga = true;
7935                         break;
7936                 default:
7937                         break;
7938                 }
7939         }
7940
7941         if (has_vga) {
7942                 lpt_bend_clkout_dp(dev_priv, 0);
7943                 lpt_enable_clkout_dp(dev_priv, true, true);
7944         } else {
7945                 lpt_disable_clkout_dp(dev_priv);
7946         }
7947 }
7948
7949 /*
7950  * Initialize reference clocks when the driver loads
7951  */
7952 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
7953 {
7954         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
7955                 ironlake_init_pch_refclk(dev_priv);
7956         else if (HAS_PCH_LPT(dev_priv))
7957                 lpt_init_pch_refclk(dev_priv);
7958 }
7959
7960 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
7961 {
7962         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7963         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7964         int pipe = intel_crtc->pipe;
7965         uint32_t val;
7966
7967         val = 0;
7968
7969         switch (intel_crtc->config->pipe_bpp) {
7970         case 18:
7971                 val |= PIPECONF_6BPC;
7972                 break;
7973         case 24:
7974                 val |= PIPECONF_8BPC;
7975                 break;
7976         case 30:
7977                 val |= PIPECONF_10BPC;
7978                 break;
7979         case 36:
7980                 val |= PIPECONF_12BPC;
7981                 break;
7982         default:
7983                 /* Case prevented by intel_choose_pipe_bpp_dither. */
7984                 BUG();
7985         }
7986
7987         if (intel_crtc->config->dither)
7988                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7989
7990         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7991                 val |= PIPECONF_INTERLACED_ILK;
7992         else
7993                 val |= PIPECONF_PROGRESSIVE;
7994
7995         if (intel_crtc->config->limited_color_range)
7996                 val |= PIPECONF_COLOR_RANGE_SELECT;
7997
7998         I915_WRITE(PIPECONF(pipe), val);
7999         POSTING_READ(PIPECONF(pipe));
8000 }
8001
8002 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8003 {
8004         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8005         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8006         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8007         u32 val = 0;
8008
8009         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8010                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8011
8012         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8013                 val |= PIPECONF_INTERLACED_ILK;
8014         else
8015                 val |= PIPECONF_PROGRESSIVE;
8016
8017         I915_WRITE(PIPECONF(cpu_transcoder), val);
8018         POSTING_READ(PIPECONF(cpu_transcoder));
8019 }
8020
8021 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8022 {
8023         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8024         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8025
8026         if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8027                 u32 val = 0;
8028
8029                 switch (intel_crtc->config->pipe_bpp) {
8030                 case 18:
8031                         val |= PIPEMISC_DITHER_6_BPC;
8032                         break;
8033                 case 24:
8034                         val |= PIPEMISC_DITHER_8_BPC;
8035                         break;
8036                 case 30:
8037                         val |= PIPEMISC_DITHER_10_BPC;
8038                         break;
8039                 case 36:
8040                         val |= PIPEMISC_DITHER_12_BPC;
8041                         break;
8042                 default:
8043                         /* Case prevented by pipe_config_set_bpp. */
8044                         BUG();
8045                 }
8046
8047                 if (intel_crtc->config->dither)
8048                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8049
8050                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8051         }
8052 }
8053
8054 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8055 {
8056         /*
8057          * Account for spread spectrum to avoid
8058          * oversubscribing the link. Max center spread
8059          * is 2.5%; use 5% for safety's sake.
8060          */
8061         u32 bps = target_clock * bpp * 21 / 20;
8062         return DIV_ROUND_UP(bps, link_bw * 8);
8063 }
8064
8065 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8066 {
8067         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8068 }
8069
8070 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8071                                   struct intel_crtc_state *crtc_state,
8072                                   struct dpll *reduced_clock)
8073 {
8074         struct drm_crtc *crtc = &intel_crtc->base;
8075         struct drm_device *dev = crtc->dev;
8076         struct drm_i915_private *dev_priv = to_i915(dev);
8077         u32 dpll, fp, fp2;
8078         int factor;
8079
8080         /* Enable autotuning of the PLL clock (if permissible) */
8081         factor = 21;
8082         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8083                 if ((intel_panel_use_ssc(dev_priv) &&
8084                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8085                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8086                         factor = 25;
8087         } else if (crtc_state->sdvo_tv_clock)
8088                 factor = 20;
8089
8090         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8091
8092         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8093                 fp |= FP_CB_TUNE;
8094
8095         if (reduced_clock) {
8096                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8097
8098                 if (reduced_clock->m < factor * reduced_clock->n)
8099                         fp2 |= FP_CB_TUNE;
8100         } else {
8101                 fp2 = fp;
8102         }
8103
8104         dpll = 0;
8105
8106         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8107                 dpll |= DPLLB_MODE_LVDS;
8108         else
8109                 dpll |= DPLLB_MODE_DAC_SERIAL;
8110
8111         dpll |= (crtc_state->pixel_multiplier - 1)
8112                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8113
8114         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8115             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8116                 dpll |= DPLL_SDVO_HIGH_SPEED;
8117
8118         if (intel_crtc_has_dp_encoder(crtc_state))
8119                 dpll |= DPLL_SDVO_HIGH_SPEED;
8120
8121         /*
8122          * The high speed IO clock is only really required for
8123          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8124          * possible to share the DPLL between CRT and HDMI. Enabling
8125          * the clock needlessly does no real harm, except use up a
8126          * bit of power potentially.
8127          *
8128          * We'll limit this to IVB with 3 pipes, since it has only two
8129          * DPLLs and so DPLL sharing is the only way to get three pipes
8130          * driving PCH ports at the same time. On SNB we could do this,
8131          * and potentially avoid enabling the second DPLL, but it's not
8132          * clear if it''s a win or loss power wise. No point in doing
8133          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8134          */
8135         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8136             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8137                 dpll |= DPLL_SDVO_HIGH_SPEED;
8138
8139         /* compute bitmask from p1 value */
8140         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8141         /* also FPA1 */
8142         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8143
8144         switch (crtc_state->dpll.p2) {
8145         case 5:
8146                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8147                 break;
8148         case 7:
8149                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8150                 break;
8151         case 10:
8152                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8153                 break;
8154         case 14:
8155                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8156                 break;
8157         }
8158
8159         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8160             intel_panel_use_ssc(dev_priv))
8161                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8162         else
8163                 dpll |= PLL_REF_INPUT_DREFCLK;
8164
8165         dpll |= DPLL_VCO_ENABLE;
8166
8167         crtc_state->dpll_hw_state.dpll = dpll;
8168         crtc_state->dpll_hw_state.fp0 = fp;
8169         crtc_state->dpll_hw_state.fp1 = fp2;
8170 }
8171
8172 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8173                                        struct intel_crtc_state *crtc_state)
8174 {
8175         struct drm_device *dev = crtc->base.dev;
8176         struct drm_i915_private *dev_priv = to_i915(dev);
8177         struct dpll reduced_clock;
8178         bool has_reduced_clock = false;
8179         struct intel_shared_dpll *pll;
8180         const struct intel_limit *limit;
8181         int refclk = 120000;
8182
8183         memset(&crtc_state->dpll_hw_state, 0,
8184                sizeof(crtc_state->dpll_hw_state));
8185
8186         crtc->lowfreq_avail = false;
8187
8188         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8189         if (!crtc_state->has_pch_encoder)
8190                 return 0;
8191
8192         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8193                 if (intel_panel_use_ssc(dev_priv)) {
8194                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8195                                       dev_priv->vbt.lvds_ssc_freq);
8196                         refclk = dev_priv->vbt.lvds_ssc_freq;
8197                 }
8198
8199                 if (intel_is_dual_link_lvds(dev)) {
8200                         if (refclk == 100000)
8201                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8202                         else
8203                                 limit = &intel_limits_ironlake_dual_lvds;
8204                 } else {
8205                         if (refclk == 100000)
8206                                 limit = &intel_limits_ironlake_single_lvds_100m;
8207                         else
8208                                 limit = &intel_limits_ironlake_single_lvds;
8209                 }
8210         } else {
8211                 limit = &intel_limits_ironlake_dac;
8212         }
8213
8214         if (!crtc_state->clock_set &&
8215             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8216                                 refclk, NULL, &crtc_state->dpll)) {
8217                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8218                 return -EINVAL;
8219         }
8220
8221         ironlake_compute_dpll(crtc, crtc_state,
8222                               has_reduced_clock ? &reduced_clock : NULL);
8223
8224         pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8225         if (pll == NULL) {
8226                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8227                                  pipe_name(crtc->pipe));
8228                 return -EINVAL;
8229         }
8230
8231         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8232             has_reduced_clock)
8233                 crtc->lowfreq_avail = true;
8234
8235         return 0;
8236 }
8237
8238 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8239                                          struct intel_link_m_n *m_n)
8240 {
8241         struct drm_device *dev = crtc->base.dev;
8242         struct drm_i915_private *dev_priv = to_i915(dev);
8243         enum pipe pipe = crtc->pipe;
8244
8245         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8246         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8247         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8248                 & ~TU_SIZE_MASK;
8249         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8250         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8251                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8252 }
8253
8254 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8255                                          enum transcoder transcoder,
8256                                          struct intel_link_m_n *m_n,
8257                                          struct intel_link_m_n *m2_n2)
8258 {
8259         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8260         enum pipe pipe = crtc->pipe;
8261
8262         if (INTEL_GEN(dev_priv) >= 5) {
8263                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8264                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8265                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8266                         & ~TU_SIZE_MASK;
8267                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8268                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8269                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8270                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8271                  * gen < 8) and if DRRS is supported (to make sure the
8272                  * registers are not unnecessarily read).
8273                  */
8274                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8275                         crtc->config->has_drrs) {
8276                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8277                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8278                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8279                                         & ~TU_SIZE_MASK;
8280                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8281                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8282                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8283                 }
8284         } else {
8285                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8286                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8287                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8288                         & ~TU_SIZE_MASK;
8289                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8290                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8291                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8292         }
8293 }
8294
8295 void intel_dp_get_m_n(struct intel_crtc *crtc,
8296                       struct intel_crtc_state *pipe_config)
8297 {
8298         if (pipe_config->has_pch_encoder)
8299                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8300         else
8301                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8302                                              &pipe_config->dp_m_n,
8303                                              &pipe_config->dp_m2_n2);
8304 }
8305
8306 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8307                                         struct intel_crtc_state *pipe_config)
8308 {
8309         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8310                                      &pipe_config->fdi_m_n, NULL);
8311 }
8312
8313 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8314                                     struct intel_crtc_state *pipe_config)
8315 {
8316         struct drm_device *dev = crtc->base.dev;
8317         struct drm_i915_private *dev_priv = to_i915(dev);
8318         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8319         uint32_t ps_ctrl = 0;
8320         int id = -1;
8321         int i;
8322
8323         /* find scaler attached to this pipe */
8324         for (i = 0; i < crtc->num_scalers; i++) {
8325                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8326                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8327                         id = i;
8328                         pipe_config->pch_pfit.enabled = true;
8329                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8330                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8331                         break;
8332                 }
8333         }
8334
8335         scaler_state->scaler_id = id;
8336         if (id >= 0) {
8337                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8338         } else {
8339                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8340         }
8341 }
8342
8343 static void
8344 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8345                                  struct intel_initial_plane_config *plane_config)
8346 {
8347         struct drm_device *dev = crtc->base.dev;
8348         struct drm_i915_private *dev_priv = to_i915(dev);
8349         u32 val, base, offset, stride_mult, tiling;
8350         int pipe = crtc->pipe;
8351         int fourcc, pixel_format;
8352         unsigned int aligned_height;
8353         struct drm_framebuffer *fb;
8354         struct intel_framebuffer *intel_fb;
8355
8356         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8357         if (!intel_fb) {
8358                 DRM_DEBUG_KMS("failed to alloc fb\n");
8359                 return;
8360         }
8361
8362         fb = &intel_fb->base;
8363
8364         fb->dev = dev;
8365
8366         val = I915_READ(PLANE_CTL(pipe, 0));
8367         if (!(val & PLANE_CTL_ENABLE))
8368                 goto error;
8369
8370         pixel_format = val & PLANE_CTL_FORMAT_MASK;
8371         fourcc = skl_format_to_fourcc(pixel_format,
8372                                       val & PLANE_CTL_ORDER_RGBX,
8373                                       val & PLANE_CTL_ALPHA_MASK);
8374         fb->format = drm_format_info(fourcc);
8375
8376         tiling = val & PLANE_CTL_TILED_MASK;
8377         switch (tiling) {
8378         case PLANE_CTL_TILED_LINEAR:
8379                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8380                 break;
8381         case PLANE_CTL_TILED_X:
8382                 plane_config->tiling = I915_TILING_X;
8383                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8384                 break;
8385         case PLANE_CTL_TILED_Y:
8386                 fb->modifier = I915_FORMAT_MOD_Y_TILED;
8387                 break;
8388         case PLANE_CTL_TILED_YF:
8389                 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8390                 break;
8391         default:
8392                 MISSING_CASE(tiling);
8393                 goto error;
8394         }
8395
8396         base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
8397         plane_config->base = base;
8398
8399         offset = I915_READ(PLANE_OFFSET(pipe, 0));
8400
8401         val = I915_READ(PLANE_SIZE(pipe, 0));
8402         fb->height = ((val >> 16) & 0xfff) + 1;
8403         fb->width = ((val >> 0) & 0x1fff) + 1;
8404
8405         val = I915_READ(PLANE_STRIDE(pipe, 0));
8406         stride_mult = intel_fb_stride_alignment(fb, 0);
8407         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8408
8409         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8410
8411         plane_config->size = fb->pitches[0] * aligned_height;
8412
8413         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8414                       pipe_name(pipe), fb->width, fb->height,
8415                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8416                       plane_config->size);
8417
8418         plane_config->fb = intel_fb;
8419         return;
8420
8421 error:
8422         kfree(intel_fb);
8423 }
8424
8425 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8426                                      struct intel_crtc_state *pipe_config)
8427 {
8428         struct drm_device *dev = crtc->base.dev;
8429         struct drm_i915_private *dev_priv = to_i915(dev);
8430         uint32_t tmp;
8431
8432         tmp = I915_READ(PF_CTL(crtc->pipe));
8433
8434         if (tmp & PF_ENABLE) {
8435                 pipe_config->pch_pfit.enabled = true;
8436                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8437                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8438
8439                 /* We currently do not free assignements of panel fitters on
8440                  * ivb/hsw (since we don't use the higher upscaling modes which
8441                  * differentiates them) so just WARN about this case for now. */
8442                 if (IS_GEN7(dev_priv)) {
8443                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8444                                 PF_PIPE_SEL_IVB(crtc->pipe));
8445                 }
8446         }
8447 }
8448
8449 static void
8450 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
8451                                   struct intel_initial_plane_config *plane_config)
8452 {
8453         struct drm_device *dev = crtc->base.dev;
8454         struct drm_i915_private *dev_priv = to_i915(dev);
8455         u32 val, base, offset;
8456         int pipe = crtc->pipe;
8457         int fourcc, pixel_format;
8458         unsigned int aligned_height;
8459         struct drm_framebuffer *fb;
8460         struct intel_framebuffer *intel_fb;
8461
8462         val = I915_READ(DSPCNTR(pipe));
8463         if (!(val & DISPLAY_PLANE_ENABLE))
8464                 return;
8465
8466         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8467         if (!intel_fb) {
8468                 DRM_DEBUG_KMS("failed to alloc fb\n");
8469                 return;
8470         }
8471
8472         fb = &intel_fb->base;
8473
8474         fb->dev = dev;
8475
8476         if (INTEL_GEN(dev_priv) >= 4) {
8477                 if (val & DISPPLANE_TILED) {
8478                         plane_config->tiling = I915_TILING_X;
8479                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8480                 }
8481         }
8482
8483         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8484         fourcc = i9xx_format_to_fourcc(pixel_format);
8485         fb->format = drm_format_info(fourcc);
8486
8487         base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
8488         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8489                 offset = I915_READ(DSPOFFSET(pipe));
8490         } else {
8491                 if (plane_config->tiling)
8492                         offset = I915_READ(DSPTILEOFF(pipe));
8493                 else
8494                         offset = I915_READ(DSPLINOFF(pipe));
8495         }
8496         plane_config->base = base;
8497
8498         val = I915_READ(PIPESRC(pipe));
8499         fb->width = ((val >> 16) & 0xfff) + 1;
8500         fb->height = ((val >> 0) & 0xfff) + 1;
8501
8502         val = I915_READ(DSPSTRIDE(pipe));
8503         fb->pitches[0] = val & 0xffffffc0;
8504
8505         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8506
8507         plane_config->size = fb->pitches[0] * aligned_height;
8508
8509         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8510                       pipe_name(pipe), fb->width, fb->height,
8511                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8512                       plane_config->size);
8513
8514         plane_config->fb = intel_fb;
8515 }
8516
8517 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8518                                      struct intel_crtc_state *pipe_config)
8519 {
8520         struct drm_device *dev = crtc->base.dev;
8521         struct drm_i915_private *dev_priv = to_i915(dev);
8522         enum intel_display_power_domain power_domain;
8523         uint32_t tmp;
8524         bool ret;
8525
8526         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8527         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8528                 return false;
8529
8530         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8531         pipe_config->shared_dpll = NULL;
8532
8533         ret = false;
8534         tmp = I915_READ(PIPECONF(crtc->pipe));
8535         if (!(tmp & PIPECONF_ENABLE))
8536                 goto out;
8537
8538         switch (tmp & PIPECONF_BPC_MASK) {
8539         case PIPECONF_6BPC:
8540                 pipe_config->pipe_bpp = 18;
8541                 break;
8542         case PIPECONF_8BPC:
8543                 pipe_config->pipe_bpp = 24;
8544                 break;
8545         case PIPECONF_10BPC:
8546                 pipe_config->pipe_bpp = 30;
8547                 break;
8548         case PIPECONF_12BPC:
8549                 pipe_config->pipe_bpp = 36;
8550                 break;
8551         default:
8552                 break;
8553         }
8554
8555         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8556                 pipe_config->limited_color_range = true;
8557
8558         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8559                 struct intel_shared_dpll *pll;
8560                 enum intel_dpll_id pll_id;
8561
8562                 pipe_config->has_pch_encoder = true;
8563
8564                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8565                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8566                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8567
8568                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8569
8570                 if (HAS_PCH_IBX(dev_priv)) {
8571                         /*
8572                          * The pipe->pch transcoder and pch transcoder->pll
8573                          * mapping is fixed.
8574                          */
8575                         pll_id = (enum intel_dpll_id) crtc->pipe;
8576                 } else {
8577                         tmp = I915_READ(PCH_DPLL_SEL);
8578                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8579                                 pll_id = DPLL_ID_PCH_PLL_B;
8580                         else
8581                                 pll_id= DPLL_ID_PCH_PLL_A;
8582                 }
8583
8584                 pipe_config->shared_dpll =
8585                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
8586                 pll = pipe_config->shared_dpll;
8587
8588                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
8589                                                  &pipe_config->dpll_hw_state));
8590
8591                 tmp = pipe_config->dpll_hw_state.dpll;
8592                 pipe_config->pixel_multiplier =
8593                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8594                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
8595
8596                 ironlake_pch_clock_get(crtc, pipe_config);
8597         } else {
8598                 pipe_config->pixel_multiplier = 1;
8599         }
8600
8601         intel_get_pipe_timings(crtc, pipe_config);
8602         intel_get_pipe_src_size(crtc, pipe_config);
8603
8604         ironlake_get_pfit_config(crtc, pipe_config);
8605
8606         ret = true;
8607
8608 out:
8609         intel_display_power_put(dev_priv, power_domain);
8610
8611         return ret;
8612 }
8613
8614 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
8615 {
8616         struct drm_device *dev = &dev_priv->drm;
8617         struct intel_crtc *crtc;
8618
8619         for_each_intel_crtc(dev, crtc)
8620                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
8621                      pipe_name(crtc->pipe));
8622
8623         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
8624         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
8625         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
8626         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
8627         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
8628         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
8629              "CPU PWM1 enabled\n");
8630         if (IS_HASWELL(dev_priv))
8631                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
8632                      "CPU PWM2 enabled\n");
8633         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
8634              "PCH PWM1 enabled\n");
8635         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
8636              "Utility pin enabled\n");
8637         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
8638
8639         /*
8640          * In theory we can still leave IRQs enabled, as long as only the HPD
8641          * interrupts remain enabled. We used to check for that, but since it's
8642          * gen-specific and since we only disable LCPLL after we fully disable
8643          * the interrupts, the check below should be enough.
8644          */
8645         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
8646 }
8647
8648 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
8649 {
8650         if (IS_HASWELL(dev_priv))
8651                 return I915_READ(D_COMP_HSW);
8652         else
8653                 return I915_READ(D_COMP_BDW);
8654 }
8655
8656 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
8657 {
8658         if (IS_HASWELL(dev_priv)) {
8659                 mutex_lock(&dev_priv->rps.hw_lock);
8660                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
8661                                             val))
8662                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
8663                 mutex_unlock(&dev_priv->rps.hw_lock);
8664         } else {
8665                 I915_WRITE(D_COMP_BDW, val);
8666                 POSTING_READ(D_COMP_BDW);
8667         }
8668 }
8669
8670 /*
8671  * This function implements pieces of two sequences from BSpec:
8672  * - Sequence for display software to disable LCPLL
8673  * - Sequence for display software to allow package C8+
8674  * The steps implemented here are just the steps that actually touch the LCPLL
8675  * register. Callers should take care of disabling all the display engine
8676  * functions, doing the mode unset, fixing interrupts, etc.
8677  */
8678 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
8679                               bool switch_to_fclk, bool allow_power_down)
8680 {
8681         uint32_t val;
8682
8683         assert_can_disable_lcpll(dev_priv);
8684
8685         val = I915_READ(LCPLL_CTL);
8686
8687         if (switch_to_fclk) {
8688                 val |= LCPLL_CD_SOURCE_FCLK;
8689                 I915_WRITE(LCPLL_CTL, val);
8690
8691                 if (wait_for_us(I915_READ(LCPLL_CTL) &
8692                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
8693                         DRM_ERROR("Switching to FCLK failed\n");
8694
8695                 val = I915_READ(LCPLL_CTL);
8696         }
8697
8698         val |= LCPLL_PLL_DISABLE;
8699         I915_WRITE(LCPLL_CTL, val);
8700         POSTING_READ(LCPLL_CTL);
8701
8702         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
8703                 DRM_ERROR("LCPLL still locked\n");
8704
8705         val = hsw_read_dcomp(dev_priv);
8706         val |= D_COMP_COMP_DISABLE;
8707         hsw_write_dcomp(dev_priv, val);
8708         ndelay(100);
8709
8710         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
8711                      1))
8712                 DRM_ERROR("D_COMP RCOMP still in progress\n");
8713
8714         if (allow_power_down) {
8715                 val = I915_READ(LCPLL_CTL);
8716                 val |= LCPLL_POWER_DOWN_ALLOW;
8717                 I915_WRITE(LCPLL_CTL, val);
8718                 POSTING_READ(LCPLL_CTL);
8719         }
8720 }
8721
8722 /*
8723  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
8724  * source.
8725  */
8726 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
8727 {
8728         uint32_t val;
8729
8730         val = I915_READ(LCPLL_CTL);
8731
8732         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
8733                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
8734                 return;
8735
8736         /*
8737          * Make sure we're not on PC8 state before disabling PC8, otherwise
8738          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
8739          */
8740         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
8741
8742         if (val & LCPLL_POWER_DOWN_ALLOW) {
8743                 val &= ~LCPLL_POWER_DOWN_ALLOW;
8744                 I915_WRITE(LCPLL_CTL, val);
8745                 POSTING_READ(LCPLL_CTL);
8746         }
8747
8748         val = hsw_read_dcomp(dev_priv);
8749         val |= D_COMP_COMP_FORCE;
8750         val &= ~D_COMP_COMP_DISABLE;
8751         hsw_write_dcomp(dev_priv, val);
8752
8753         val = I915_READ(LCPLL_CTL);
8754         val &= ~LCPLL_PLL_DISABLE;
8755         I915_WRITE(LCPLL_CTL, val);
8756
8757         if (intel_wait_for_register(dev_priv,
8758                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
8759                                     5))
8760                 DRM_ERROR("LCPLL not locked yet\n");
8761
8762         if (val & LCPLL_CD_SOURCE_FCLK) {
8763                 val = I915_READ(LCPLL_CTL);
8764                 val &= ~LCPLL_CD_SOURCE_FCLK;
8765                 I915_WRITE(LCPLL_CTL, val);
8766
8767                 if (wait_for_us((I915_READ(LCPLL_CTL) &
8768                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
8769                         DRM_ERROR("Switching back to LCPLL failed\n");
8770         }
8771
8772         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
8773         intel_update_cdclk(dev_priv);
8774 }
8775
8776 /*
8777  * Package states C8 and deeper are really deep PC states that can only be
8778  * reached when all the devices on the system allow it, so even if the graphics
8779  * device allows PC8+, it doesn't mean the system will actually get to these
8780  * states. Our driver only allows PC8+ when going into runtime PM.
8781  *
8782  * The requirements for PC8+ are that all the outputs are disabled, the power
8783  * well is disabled and most interrupts are disabled, and these are also
8784  * requirements for runtime PM. When these conditions are met, we manually do
8785  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
8786  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
8787  * hang the machine.
8788  *
8789  * When we really reach PC8 or deeper states (not just when we allow it) we lose
8790  * the state of some registers, so when we come back from PC8+ we need to
8791  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
8792  * need to take care of the registers kept by RC6. Notice that this happens even
8793  * if we don't put the device in PCI D3 state (which is what currently happens
8794  * because of the runtime PM support).
8795  *
8796  * For more, read "Display Sequences for Package C8" on the hardware
8797  * documentation.
8798  */
8799 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
8800 {
8801         uint32_t val;
8802
8803         DRM_DEBUG_KMS("Enabling package C8+\n");
8804
8805         if (HAS_PCH_LPT_LP(dev_priv)) {
8806                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
8807                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8808                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8809         }
8810
8811         lpt_disable_clkout_dp(dev_priv);
8812         hsw_disable_lcpll(dev_priv, true, true);
8813 }
8814
8815 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
8816 {
8817         uint32_t val;
8818
8819         DRM_DEBUG_KMS("Disabling package C8+\n");
8820
8821         hsw_restore_lcpll(dev_priv);
8822         lpt_init_pch_refclk(dev_priv);
8823
8824         if (HAS_PCH_LPT_LP(dev_priv)) {
8825                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
8826                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8827                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8828         }
8829 }
8830
8831 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
8832                                       struct intel_crtc_state *crtc_state)
8833 {
8834         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
8835                 struct intel_encoder *encoder =
8836                         intel_ddi_get_crtc_new_encoder(crtc_state);
8837
8838                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
8839                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8840                                          pipe_name(crtc->pipe));
8841                         return -EINVAL;
8842                 }
8843         }
8844
8845         crtc->lowfreq_avail = false;
8846
8847         return 0;
8848 }
8849
8850 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
8851                                 enum port port,
8852                                 struct intel_crtc_state *pipe_config)
8853 {
8854         enum intel_dpll_id id;
8855
8856         switch (port) {
8857         case PORT_A:
8858                 id = DPLL_ID_SKL_DPLL0;
8859                 break;
8860         case PORT_B:
8861                 id = DPLL_ID_SKL_DPLL1;
8862                 break;
8863         case PORT_C:
8864                 id = DPLL_ID_SKL_DPLL2;
8865                 break;
8866         default:
8867                 DRM_ERROR("Incorrect port type\n");
8868                 return;
8869         }
8870
8871         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
8872 }
8873
8874 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
8875                                 enum port port,
8876                                 struct intel_crtc_state *pipe_config)
8877 {
8878         enum intel_dpll_id id;
8879         u32 temp;
8880
8881         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8882         id = temp >> (port * 3 + 1);
8883
8884         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
8885                 return;
8886
8887         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
8888 }
8889
8890 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
8891                                 enum port port,
8892                                 struct intel_crtc_state *pipe_config)
8893 {
8894         enum intel_dpll_id id;
8895         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
8896
8897         switch (ddi_pll_sel) {
8898         case PORT_CLK_SEL_WRPLL1:
8899                 id = DPLL_ID_WRPLL1;
8900                 break;
8901         case PORT_CLK_SEL_WRPLL2:
8902                 id = DPLL_ID_WRPLL2;
8903                 break;
8904         case PORT_CLK_SEL_SPLL:
8905                 id = DPLL_ID_SPLL;
8906                 break;
8907         case PORT_CLK_SEL_LCPLL_810:
8908                 id = DPLL_ID_LCPLL_810;
8909                 break;
8910         case PORT_CLK_SEL_LCPLL_1350:
8911                 id = DPLL_ID_LCPLL_1350;
8912                 break;
8913         case PORT_CLK_SEL_LCPLL_2700:
8914                 id = DPLL_ID_LCPLL_2700;
8915                 break;
8916         default:
8917                 MISSING_CASE(ddi_pll_sel);
8918                 /* fall through */
8919         case PORT_CLK_SEL_NONE:
8920                 return;
8921         }
8922
8923         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
8924 }
8925
8926 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
8927                                      struct intel_crtc_state *pipe_config,
8928                                      u64 *power_domain_mask)
8929 {
8930         struct drm_device *dev = crtc->base.dev;
8931         struct drm_i915_private *dev_priv = to_i915(dev);
8932         enum intel_display_power_domain power_domain;
8933         u32 tmp;
8934
8935         /*
8936          * The pipe->transcoder mapping is fixed with the exception of the eDP
8937          * transcoder handled below.
8938          */
8939         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8940
8941         /*
8942          * XXX: Do intel_display_power_get_if_enabled before reading this (for
8943          * consistency and less surprising code; it's in always on power).
8944          */
8945         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8946         if (tmp & TRANS_DDI_FUNC_ENABLE) {
8947                 enum pipe trans_edp_pipe;
8948                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8949                 default:
8950                         WARN(1, "unknown pipe linked to edp transcoder\n");
8951                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
8952                 case TRANS_DDI_EDP_INPUT_A_ON:
8953                         trans_edp_pipe = PIPE_A;
8954                         break;
8955                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
8956                         trans_edp_pipe = PIPE_B;
8957                         break;
8958                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
8959                         trans_edp_pipe = PIPE_C;
8960                         break;
8961                 }
8962
8963                 if (trans_edp_pipe == crtc->pipe)
8964                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
8965         }
8966
8967         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
8968         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8969                 return false;
8970         *power_domain_mask |= BIT_ULL(power_domain);
8971
8972         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
8973
8974         return tmp & PIPECONF_ENABLE;
8975 }
8976
8977 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
8978                                          struct intel_crtc_state *pipe_config,
8979                                          u64 *power_domain_mask)
8980 {
8981         struct drm_device *dev = crtc->base.dev;
8982         struct drm_i915_private *dev_priv = to_i915(dev);
8983         enum intel_display_power_domain power_domain;
8984         enum port port;
8985         enum transcoder cpu_transcoder;
8986         u32 tmp;
8987
8988         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
8989                 if (port == PORT_A)
8990                         cpu_transcoder = TRANSCODER_DSI_A;
8991                 else
8992                         cpu_transcoder = TRANSCODER_DSI_C;
8993
8994                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
8995                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8996                         continue;
8997                 *power_domain_mask |= BIT_ULL(power_domain);
8998
8999                 /*
9000                  * The PLL needs to be enabled with a valid divider
9001                  * configuration, otherwise accessing DSI registers will hang
9002                  * the machine. See BSpec North Display Engine
9003                  * registers/MIPI[BXT]. We can break out here early, since we
9004                  * need the same DSI PLL to be enabled for both DSI ports.
9005                  */
9006                 if (!intel_dsi_pll_is_enabled(dev_priv))
9007                         break;
9008
9009                 /* XXX: this works for video mode only */
9010                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9011                 if (!(tmp & DPI_ENABLE))
9012                         continue;
9013
9014                 tmp = I915_READ(MIPI_CTRL(port));
9015                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9016                         continue;
9017
9018                 pipe_config->cpu_transcoder = cpu_transcoder;
9019                 break;
9020         }
9021
9022         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9023 }
9024
9025 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9026                                        struct intel_crtc_state *pipe_config)
9027 {
9028         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9029         struct intel_shared_dpll *pll;
9030         enum port port;
9031         uint32_t tmp;
9032
9033         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9034
9035         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9036
9037         if (IS_GEN9_BC(dev_priv))
9038                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9039         else if (IS_GEN9_LP(dev_priv))
9040                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9041         else
9042                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9043
9044         pll = pipe_config->shared_dpll;
9045         if (pll) {
9046                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9047                                                  &pipe_config->dpll_hw_state));
9048         }
9049
9050         /*
9051          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9052          * DDI E. So just check whether this pipe is wired to DDI E and whether
9053          * the PCH transcoder is on.
9054          */
9055         if (INTEL_GEN(dev_priv) < 9 &&
9056             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9057                 pipe_config->has_pch_encoder = true;
9058
9059                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9060                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9061                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9062
9063                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9064         }
9065 }
9066
9067 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9068                                     struct intel_crtc_state *pipe_config)
9069 {
9070         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9071         enum intel_display_power_domain power_domain;
9072         u64 power_domain_mask;
9073         bool active;
9074
9075         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9076         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9077                 return false;
9078         power_domain_mask = BIT_ULL(power_domain);
9079
9080         pipe_config->shared_dpll = NULL;
9081
9082         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9083
9084         if (IS_GEN9_LP(dev_priv) &&
9085             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9086                 WARN_ON(active);
9087                 active = true;
9088         }
9089
9090         if (!active)
9091                 goto out;
9092
9093         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9094                 haswell_get_ddi_port_state(crtc, pipe_config);
9095                 intel_get_pipe_timings(crtc, pipe_config);
9096         }
9097
9098         intel_get_pipe_src_size(crtc, pipe_config);
9099
9100         pipe_config->gamma_mode =
9101                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9102
9103         if (INTEL_GEN(dev_priv) >= 9) {
9104                 intel_crtc_init_scalers(crtc, pipe_config);
9105
9106                 pipe_config->scaler_state.scaler_id = -1;
9107                 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9108         }
9109
9110         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9111         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9112                 power_domain_mask |= BIT_ULL(power_domain);
9113                 if (INTEL_GEN(dev_priv) >= 9)
9114                         skylake_get_pfit_config(crtc, pipe_config);
9115                 else
9116                         ironlake_get_pfit_config(crtc, pipe_config);
9117         }
9118
9119         if (IS_HASWELL(dev_priv))
9120                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9121                         (I915_READ(IPS_CTL) & IPS_ENABLE);
9122
9123         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9124             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9125                 pipe_config->pixel_multiplier =
9126                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9127         } else {
9128                 pipe_config->pixel_multiplier = 1;
9129         }
9130
9131 out:
9132         for_each_power_domain(power_domain, power_domain_mask)
9133                 intel_display_power_put(dev_priv, power_domain);
9134
9135         return active;
9136 }
9137
9138 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9139 {
9140         struct drm_i915_private *dev_priv =
9141                 to_i915(plane_state->base.plane->dev);
9142         const struct drm_framebuffer *fb = plane_state->base.fb;
9143         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9144         u32 base;
9145
9146         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9147                 base = obj->phys_handle->busaddr;
9148         else
9149                 base = intel_plane_ggtt_offset(plane_state);
9150
9151         /* ILK+ do this automagically */
9152         if (HAS_GMCH_DISPLAY(dev_priv) &&
9153             plane_state->base.rotation & DRM_ROTATE_180)
9154                 base += (plane_state->base.crtc_h *
9155                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9156
9157         return base;
9158 }
9159
9160 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9161 {
9162         int x = plane_state->base.crtc_x;
9163         int y = plane_state->base.crtc_y;
9164         u32 pos = 0;
9165
9166         if (x < 0) {
9167                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9168                 x = -x;
9169         }
9170         pos |= x << CURSOR_X_SHIFT;
9171
9172         if (y < 0) {
9173                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9174                 y = -y;
9175         }
9176         pos |= y << CURSOR_Y_SHIFT;
9177
9178         return pos;
9179 }
9180
9181 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9182 {
9183         const struct drm_mode_config *config =
9184                 &plane_state->base.plane->dev->mode_config;
9185         int width = plane_state->base.crtc_w;
9186         int height = plane_state->base.crtc_h;
9187
9188         return width > 0 && width <= config->cursor_width &&
9189                 height > 0 && height <= config->cursor_height;
9190 }
9191
9192 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9193                               struct intel_plane_state *plane_state)
9194 {
9195         const struct drm_framebuffer *fb = plane_state->base.fb;
9196         int ret;
9197
9198         ret = drm_plane_helper_check_state(&plane_state->base,
9199                                            &plane_state->clip,
9200                                            DRM_PLANE_HELPER_NO_SCALING,
9201                                            DRM_PLANE_HELPER_NO_SCALING,
9202                                            true, true);
9203         if (ret)
9204                 return ret;
9205
9206         if (!fb)
9207                 return 0;
9208
9209         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9210                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9211                 return -EINVAL;
9212         }
9213
9214         return 0;
9215 }
9216
9217 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9218                            const struct intel_plane_state *plane_state)
9219 {
9220         unsigned int width = plane_state->base.crtc_w;
9221         unsigned int stride = roundup_pow_of_two(width) * 4;
9222
9223         switch (stride) {
9224         default:
9225                 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9226                           width, stride);
9227                 stride = 256;
9228                 /* fallthrough */
9229         case 256:
9230         case 512:
9231         case 1024:
9232         case 2048:
9233                 break;
9234         }
9235
9236         return CURSOR_ENABLE |
9237                 CURSOR_GAMMA_ENABLE |
9238                 CURSOR_FORMAT_ARGB |
9239                 CURSOR_STRIDE(stride);
9240 }
9241
9242 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9243 {
9244         int width = plane_state->base.crtc_w;
9245
9246         /*
9247          * 845g/865g are only limited by the width of their cursors,
9248          * the height is arbitrary up to the precision of the register.
9249          */
9250         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9251 }
9252
9253 static int i845_check_cursor(struct intel_plane *plane,
9254                              struct intel_crtc_state *crtc_state,
9255                              struct intel_plane_state *plane_state)
9256 {
9257         const struct drm_framebuffer *fb = plane_state->base.fb;
9258         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9259         unsigned int stride;
9260         int ret;
9261
9262         ret = intel_check_cursor(crtc_state, plane_state);
9263         if (ret)
9264                 return ret;
9265
9266         /* if we want to turn off the cursor ignore width and height */
9267         if (!obj)
9268                 return 0;
9269
9270         /* Check for which cursor types we support */
9271         if (!i845_cursor_size_ok(plane_state)) {
9272                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9273                           plane_state->base.crtc_w,
9274                           plane_state->base.crtc_h);
9275                 return -EINVAL;
9276         }
9277
9278         stride = roundup_pow_of_two(plane_state->base.crtc_w) * 4;
9279         if (obj->base.size < stride * plane_state->base.crtc_h) {
9280                 DRM_DEBUG_KMS("buffer is too small\n");
9281                 return -ENOMEM;
9282         }
9283
9284         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9285
9286         return 0;
9287 }
9288
9289 static void i845_update_cursor(struct intel_plane *plane,
9290                                const struct intel_crtc_state *crtc_state,
9291                                const struct intel_plane_state *plane_state)
9292 {
9293         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9294         u32 cntl = 0, base = 0, pos = 0, size = 0;
9295         unsigned long irqflags;
9296
9297         if (plane_state && plane_state->base.visible) {
9298                 unsigned int width = plane_state->base.crtc_w;
9299                 unsigned int height = plane_state->base.crtc_h;
9300
9301                 cntl = plane_state->ctl;
9302                 size = (height << 12) | width;
9303
9304                 base = intel_cursor_base(plane_state);
9305                 pos = intel_cursor_position(plane_state);
9306         }
9307
9308         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9309
9310         if (plane->cursor.cntl != 0 &&
9311             (plane->cursor.base != base ||
9312              plane->cursor.size != size ||
9313              plane->cursor.cntl != cntl)) {
9314                 /* On these chipsets we can only modify the base/size/stride
9315                  * whilst the cursor is disabled.
9316                  */
9317                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9318                 plane->cursor.cntl = 0;
9319         }
9320
9321         if (plane->cursor.base != base)
9322                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9323
9324         if (plane->cursor.size != size)
9325                 I915_WRITE_FW(CURSIZE, size);
9326
9327         if (cntl)
9328                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9329
9330         if (plane->cursor.cntl != cntl)
9331                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9332
9333         POSTING_READ_FW(CURCNTR(PIPE_A));
9334
9335         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9336
9337         plane->cursor.cntl = cntl;
9338         plane->cursor.base = base;
9339         plane->cursor.size = size;
9340 }
9341
9342 static void i845_disable_cursor(struct intel_plane *plane,
9343                                 struct intel_crtc *crtc)
9344 {
9345         i845_update_cursor(plane, NULL, NULL);
9346 }
9347
9348 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9349                            const struct intel_plane_state *plane_state)
9350 {
9351         struct drm_i915_private *dev_priv =
9352                 to_i915(plane_state->base.plane->dev);
9353         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9354         u32 cntl;
9355
9356         cntl = MCURSOR_GAMMA_ENABLE;
9357
9358         if (HAS_DDI(dev_priv))
9359                 cntl |= CURSOR_PIPE_CSC_ENABLE;
9360
9361         cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9362
9363         switch (plane_state->base.crtc_w) {
9364         case 64:
9365                 cntl |= CURSOR_MODE_64_ARGB_AX;
9366                 break;
9367         case 128:
9368                 cntl |= CURSOR_MODE_128_ARGB_AX;
9369                 break;
9370         case 256:
9371                 cntl |= CURSOR_MODE_256_ARGB_AX;
9372                 break;
9373         default:
9374                 MISSING_CASE(plane_state->base.crtc_w);
9375                 return 0;
9376         }
9377
9378         if (plane_state->base.rotation & DRM_ROTATE_180)
9379                 cntl |= CURSOR_ROTATE_180;
9380
9381         return cntl;
9382 }
9383
9384 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9385 {
9386         int width = plane_state->base.crtc_w;
9387         int height = plane_state->base.crtc_h;
9388
9389         if (!intel_cursor_size_ok(plane_state))
9390                 return false;
9391
9392         /*
9393          * Cursors are limited to a few power-of-two
9394          * sizes, and they must be square.
9395          */
9396         switch (width | height) {
9397         case 256:
9398         case 128:
9399         case 64:
9400                 break;
9401         default:
9402                 return false;
9403         }
9404
9405         return true;
9406 }
9407
9408 static int i9xx_check_cursor(struct intel_plane *plane,
9409                              struct intel_crtc_state *crtc_state,
9410                              struct intel_plane_state *plane_state)
9411 {
9412         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9413         const struct drm_framebuffer *fb = plane_state->base.fb;
9414         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9415         enum pipe pipe = plane->pipe;
9416         unsigned int stride;
9417         int ret;
9418
9419         ret = intel_check_cursor(crtc_state, plane_state);
9420         if (ret)
9421                 return ret;
9422
9423         /* if we want to turn off the cursor ignore width and height */
9424         if (!obj)
9425                 return 0;
9426
9427         /* Check for which cursor types we support */
9428         if (!i9xx_cursor_size_ok(plane_state)) {
9429                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9430                           plane_state->base.crtc_w,
9431                           plane_state->base.crtc_h);
9432                 return -EINVAL;
9433         }
9434
9435         stride = roundup_pow_of_two(plane_state->base.crtc_w) * 4;
9436         if (obj->base.size < stride * plane_state->base.crtc_h) {
9437                 DRM_DEBUG_KMS("buffer is too small\n");
9438                 return -ENOMEM;
9439         }
9440
9441         /*
9442          * There's something wrong with the cursor on CHV pipe C.
9443          * If it straddles the left edge of the screen then
9444          * moving it away from the edge or disabling it often
9445          * results in a pipe underrun, and often that can lead to
9446          * dead pipe (constant underrun reported, and it scans
9447          * out just a solid color). To recover from that, the
9448          * display power well must be turned off and on again.
9449          * Refuse the put the cursor into that compromised position.
9450          */
9451         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
9452             plane_state->base.visible && plane_state->base.crtc_x < 0) {
9453                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
9454                 return -EINVAL;
9455         }
9456
9457         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
9458
9459         return 0;
9460 }
9461
9462 static void i9xx_update_cursor(struct intel_plane *plane,
9463                                const struct intel_crtc_state *crtc_state,
9464                                const struct intel_plane_state *plane_state)
9465 {
9466         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9467         enum pipe pipe = plane->pipe;
9468         u32 cntl = 0, base = 0, pos = 0;
9469         unsigned long irqflags;
9470
9471         if (plane_state && plane_state->base.visible) {
9472                 cntl = plane_state->ctl;
9473
9474                 base = intel_cursor_base(plane_state);
9475                 pos = intel_cursor_position(plane_state);
9476         }
9477
9478         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9479
9480         if (plane->cursor.cntl != cntl)
9481                 I915_WRITE_FW(CURCNTR(pipe), cntl);
9482
9483         if (cntl)
9484                 I915_WRITE_FW(CURPOS(pipe), pos);
9485
9486         if (plane->cursor.cntl != cntl ||
9487             plane->cursor.base != base)
9488                 I915_WRITE_FW(CURBASE(pipe), base);
9489
9490         POSTING_READ_FW(CURBASE(pipe));
9491
9492         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9493
9494         plane->cursor.cntl = cntl;
9495         plane->cursor.base = base;
9496 }
9497
9498 static void i9xx_disable_cursor(struct intel_plane *plane,
9499                                 struct intel_crtc *crtc)
9500 {
9501         i9xx_update_cursor(plane, NULL, NULL);
9502 }
9503
9504
9505 /* VESA 640x480x72Hz mode to set on the pipe */
9506 static struct drm_display_mode load_detect_mode = {
9507         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
9508                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
9509 };
9510
9511 struct drm_framebuffer *
9512 intel_framebuffer_create(struct drm_i915_gem_object *obj,
9513                          struct drm_mode_fb_cmd2 *mode_cmd)
9514 {
9515         struct intel_framebuffer *intel_fb;
9516         int ret;
9517
9518         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9519         if (!intel_fb)
9520                 return ERR_PTR(-ENOMEM);
9521
9522         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
9523         if (ret)
9524                 goto err;
9525
9526         return &intel_fb->base;
9527
9528 err:
9529         kfree(intel_fb);
9530         return ERR_PTR(ret);
9531 }
9532
9533 static u32
9534 intel_framebuffer_pitch_for_width(int width, int bpp)
9535 {
9536         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
9537         return ALIGN(pitch, 64);
9538 }
9539
9540 static u32
9541 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
9542 {
9543         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
9544         return PAGE_ALIGN(pitch * mode->vdisplay);
9545 }
9546
9547 static struct drm_framebuffer *
9548 intel_framebuffer_create_for_mode(struct drm_device *dev,
9549                                   struct drm_display_mode *mode,
9550                                   int depth, int bpp)
9551 {
9552         struct drm_framebuffer *fb;
9553         struct drm_i915_gem_object *obj;
9554         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
9555
9556         obj = i915_gem_object_create(to_i915(dev),
9557                                     intel_framebuffer_size_for_mode(mode, bpp));
9558         if (IS_ERR(obj))
9559                 return ERR_CAST(obj);
9560
9561         mode_cmd.width = mode->hdisplay;
9562         mode_cmd.height = mode->vdisplay;
9563         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
9564                                                                 bpp);
9565         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
9566
9567         fb = intel_framebuffer_create(obj, &mode_cmd);
9568         if (IS_ERR(fb))
9569                 i915_gem_object_put(obj);
9570
9571         return fb;
9572 }
9573
9574 static struct drm_framebuffer *
9575 mode_fits_in_fbdev(struct drm_device *dev,
9576                    struct drm_display_mode *mode)
9577 {
9578 #ifdef CONFIG_DRM_FBDEV_EMULATION
9579         struct drm_i915_private *dev_priv = to_i915(dev);
9580         struct drm_i915_gem_object *obj;
9581         struct drm_framebuffer *fb;
9582
9583         if (!dev_priv->fbdev)
9584                 return NULL;
9585
9586         if (!dev_priv->fbdev->fb)
9587                 return NULL;
9588
9589         obj = dev_priv->fbdev->fb->obj;
9590         BUG_ON(!obj);
9591
9592         fb = &dev_priv->fbdev->fb->base;
9593         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
9594                                                                fb->format->cpp[0] * 8))
9595                 return NULL;
9596
9597         if (obj->base.size < mode->vdisplay * fb->pitches[0])
9598                 return NULL;
9599
9600         drm_framebuffer_reference(fb);
9601         return fb;
9602 #else
9603         return NULL;
9604 #endif
9605 }
9606
9607 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
9608                                            struct drm_crtc *crtc,
9609                                            struct drm_display_mode *mode,
9610                                            struct drm_framebuffer *fb,
9611                                            int x, int y)
9612 {
9613         struct drm_plane_state *plane_state;
9614         int hdisplay, vdisplay;
9615         int ret;
9616
9617         plane_state = drm_atomic_get_plane_state(state, crtc->primary);
9618         if (IS_ERR(plane_state))
9619                 return PTR_ERR(plane_state);
9620
9621         if (mode)
9622                 drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
9623         else
9624                 hdisplay = vdisplay = 0;
9625
9626         ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
9627         if (ret)
9628                 return ret;
9629         drm_atomic_set_fb_for_plane(plane_state, fb);
9630         plane_state->crtc_x = 0;
9631         plane_state->crtc_y = 0;
9632         plane_state->crtc_w = hdisplay;
9633         plane_state->crtc_h = vdisplay;
9634         plane_state->src_x = x << 16;
9635         plane_state->src_y = y << 16;
9636         plane_state->src_w = hdisplay << 16;
9637         plane_state->src_h = vdisplay << 16;
9638
9639         return 0;
9640 }
9641
9642 int intel_get_load_detect_pipe(struct drm_connector *connector,
9643                                struct drm_display_mode *mode,
9644                                struct intel_load_detect_pipe *old,
9645                                struct drm_modeset_acquire_ctx *ctx)
9646 {
9647         struct intel_crtc *intel_crtc;
9648         struct intel_encoder *intel_encoder =
9649                 intel_attached_encoder(connector);
9650         struct drm_crtc *possible_crtc;
9651         struct drm_encoder *encoder = &intel_encoder->base;
9652         struct drm_crtc *crtc = NULL;
9653         struct drm_device *dev = encoder->dev;
9654         struct drm_i915_private *dev_priv = to_i915(dev);
9655         struct drm_framebuffer *fb;
9656         struct drm_mode_config *config = &dev->mode_config;
9657         struct drm_atomic_state *state = NULL, *restore_state = NULL;
9658         struct drm_connector_state *connector_state;
9659         struct intel_crtc_state *crtc_state;
9660         int ret, i = -1;
9661
9662         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
9663                       connector->base.id, connector->name,
9664                       encoder->base.id, encoder->name);
9665
9666         old->restore_state = NULL;
9667
9668         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
9669
9670         /*
9671          * Algorithm gets a little messy:
9672          *
9673          *   - if the connector already has an assigned crtc, use it (but make
9674          *     sure it's on first)
9675          *
9676          *   - try to find the first unused crtc that can drive this connector,
9677          *     and use that if we find one
9678          */
9679
9680         /* See if we already have a CRTC for this connector */
9681         if (connector->state->crtc) {
9682                 crtc = connector->state->crtc;
9683
9684                 ret = drm_modeset_lock(&crtc->mutex, ctx);
9685                 if (ret)
9686                         goto fail;
9687
9688                 /* Make sure the crtc and connector are running */
9689                 goto found;
9690         }
9691
9692         /* Find an unused one (if possible) */
9693         for_each_crtc(dev, possible_crtc) {
9694                 i++;
9695                 if (!(encoder->possible_crtcs & (1 << i)))
9696                         continue;
9697
9698                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
9699                 if (ret)
9700                         goto fail;
9701
9702                 if (possible_crtc->state->enable) {
9703                         drm_modeset_unlock(&possible_crtc->mutex);
9704                         continue;
9705                 }
9706
9707                 crtc = possible_crtc;
9708                 break;
9709         }
9710
9711         /*
9712          * If we didn't find an unused CRTC, don't use any.
9713          */
9714         if (!crtc) {
9715                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
9716                 ret = -ENODEV;
9717                 goto fail;
9718         }
9719
9720 found:
9721         intel_crtc = to_intel_crtc(crtc);
9722
9723         ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
9724         if (ret)
9725                 goto fail;
9726
9727         state = drm_atomic_state_alloc(dev);
9728         restore_state = drm_atomic_state_alloc(dev);
9729         if (!state || !restore_state) {
9730                 ret = -ENOMEM;
9731                 goto fail;
9732         }
9733
9734         state->acquire_ctx = ctx;
9735         restore_state->acquire_ctx = ctx;
9736
9737         connector_state = drm_atomic_get_connector_state(state, connector);
9738         if (IS_ERR(connector_state)) {
9739                 ret = PTR_ERR(connector_state);
9740                 goto fail;
9741         }
9742
9743         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
9744         if (ret)
9745                 goto fail;
9746
9747         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9748         if (IS_ERR(crtc_state)) {
9749                 ret = PTR_ERR(crtc_state);
9750                 goto fail;
9751         }
9752
9753         crtc_state->base.active = crtc_state->base.enable = true;
9754
9755         if (!mode)
9756                 mode = &load_detect_mode;
9757
9758         /* We need a framebuffer large enough to accommodate all accesses
9759          * that the plane may generate whilst we perform load detection.
9760          * We can not rely on the fbcon either being present (we get called
9761          * during its initialisation to detect all boot displays, or it may
9762          * not even exist) or that it is large enough to satisfy the
9763          * requested mode.
9764          */
9765         fb = mode_fits_in_fbdev(dev, mode);
9766         if (fb == NULL) {
9767                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
9768                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
9769         } else
9770                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
9771         if (IS_ERR(fb)) {
9772                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
9773                 ret = PTR_ERR(fb);
9774                 goto fail;
9775         }
9776
9777         ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
9778         if (ret)
9779                 goto fail;
9780
9781         drm_framebuffer_unreference(fb);
9782
9783         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
9784         if (ret)
9785                 goto fail;
9786
9787         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
9788         if (!ret)
9789                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
9790         if (!ret)
9791                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
9792         if (ret) {
9793                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
9794                 goto fail;
9795         }
9796
9797         ret = drm_atomic_commit(state);
9798         if (ret) {
9799                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
9800                 goto fail;
9801         }
9802
9803         old->restore_state = restore_state;
9804         drm_atomic_state_put(state);
9805
9806         /* let the connector get through one full cycle before testing */
9807         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
9808         return true;
9809
9810 fail:
9811         if (state) {
9812                 drm_atomic_state_put(state);
9813                 state = NULL;
9814         }
9815         if (restore_state) {
9816                 drm_atomic_state_put(restore_state);
9817                 restore_state = NULL;
9818         }
9819
9820         if (ret == -EDEADLK)
9821                 return ret;
9822
9823         return false;
9824 }
9825
9826 void intel_release_load_detect_pipe(struct drm_connector *connector,
9827                                     struct intel_load_detect_pipe *old,
9828                                     struct drm_modeset_acquire_ctx *ctx)
9829 {
9830         struct intel_encoder *intel_encoder =
9831                 intel_attached_encoder(connector);
9832         struct drm_encoder *encoder = &intel_encoder->base;
9833         struct drm_atomic_state *state = old->restore_state;
9834         int ret;
9835
9836         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
9837                       connector->base.id, connector->name,
9838                       encoder->base.id, encoder->name);
9839
9840         if (!state)
9841                 return;
9842
9843         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
9844         if (ret)
9845                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
9846         drm_atomic_state_put(state);
9847 }
9848
9849 static int i9xx_pll_refclk(struct drm_device *dev,
9850                            const struct intel_crtc_state *pipe_config)
9851 {
9852         struct drm_i915_private *dev_priv = to_i915(dev);
9853         u32 dpll = pipe_config->dpll_hw_state.dpll;
9854
9855         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
9856                 return dev_priv->vbt.lvds_ssc_freq;
9857         else if (HAS_PCH_SPLIT(dev_priv))
9858                 return 120000;
9859         else if (!IS_GEN2(dev_priv))
9860                 return 96000;
9861         else
9862                 return 48000;
9863 }
9864
9865 /* Returns the clock of the currently programmed mode of the given pipe. */
9866 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
9867                                 struct intel_crtc_state *pipe_config)
9868 {
9869         struct drm_device *dev = crtc->base.dev;
9870         struct drm_i915_private *dev_priv = to_i915(dev);
9871         int pipe = pipe_config->cpu_transcoder;
9872         u32 dpll = pipe_config->dpll_hw_state.dpll;
9873         u32 fp;
9874         struct dpll clock;
9875         int port_clock;
9876         int refclk = i9xx_pll_refclk(dev, pipe_config);
9877
9878         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
9879                 fp = pipe_config->dpll_hw_state.fp0;
9880         else
9881                 fp = pipe_config->dpll_hw_state.fp1;
9882
9883         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
9884         if (IS_PINEVIEW(dev_priv)) {
9885                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
9886                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
9887         } else {
9888                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
9889                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
9890         }
9891
9892         if (!IS_GEN2(dev_priv)) {
9893                 if (IS_PINEVIEW(dev_priv))
9894                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
9895                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
9896                 else
9897                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
9898                                DPLL_FPA01_P1_POST_DIV_SHIFT);
9899
9900                 switch (dpll & DPLL_MODE_MASK) {
9901                 case DPLLB_MODE_DAC_SERIAL:
9902                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
9903                                 5 : 10;
9904                         break;
9905                 case DPLLB_MODE_LVDS:
9906                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
9907                                 7 : 14;
9908                         break;
9909                 default:
9910                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
9911                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
9912                         return;
9913                 }
9914
9915                 if (IS_PINEVIEW(dev_priv))
9916                         port_clock = pnv_calc_dpll_params(refclk, &clock);
9917                 else
9918                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
9919         } else {
9920                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
9921                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
9922
9923                 if (is_lvds) {
9924                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
9925                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
9926
9927                         if (lvds & LVDS_CLKB_POWER_UP)
9928                                 clock.p2 = 7;
9929                         else
9930                                 clock.p2 = 14;
9931                 } else {
9932                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
9933                                 clock.p1 = 2;
9934                         else {
9935                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
9936                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
9937                         }
9938                         if (dpll & PLL_P2_DIVIDE_BY_4)
9939                                 clock.p2 = 4;
9940                         else
9941                                 clock.p2 = 2;
9942                 }
9943
9944                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
9945         }
9946
9947         /*
9948          * This value includes pixel_multiplier. We will use
9949          * port_clock to compute adjusted_mode.crtc_clock in the
9950          * encoder's get_config() function.
9951          */
9952         pipe_config->port_clock = port_clock;
9953 }
9954
9955 int intel_dotclock_calculate(int link_freq,
9956                              const struct intel_link_m_n *m_n)
9957 {
9958         /*
9959          * The calculation for the data clock is:
9960          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
9961          * But we want to avoid losing precison if possible, so:
9962          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
9963          *
9964          * and the link clock is simpler:
9965          * link_clock = (m * link_clock) / n
9966          */
9967
9968         if (!m_n->link_n)
9969                 return 0;
9970
9971         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
9972 }
9973
9974 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
9975                                    struct intel_crtc_state *pipe_config)
9976 {
9977         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9978
9979         /* read out port_clock from the DPLL */
9980         i9xx_crtc_clock_get(crtc, pipe_config);
9981
9982         /*
9983          * In case there is an active pipe without active ports,
9984          * we may need some idea for the dotclock anyway.
9985          * Calculate one based on the FDI configuration.
9986          */
9987         pipe_config->base.adjusted_mode.crtc_clock =
9988                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
9989                                          &pipe_config->fdi_m_n);
9990 }
9991
9992 /** Returns the currently programmed mode of the given pipe. */
9993 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
9994                                              struct drm_crtc *crtc)
9995 {
9996         struct drm_i915_private *dev_priv = to_i915(dev);
9997         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9998         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
9999         struct drm_display_mode *mode;
10000         struct intel_crtc_state *pipe_config;
10001         int htot = I915_READ(HTOTAL(cpu_transcoder));
10002         int hsync = I915_READ(HSYNC(cpu_transcoder));
10003         int vtot = I915_READ(VTOTAL(cpu_transcoder));
10004         int vsync = I915_READ(VSYNC(cpu_transcoder));
10005         enum pipe pipe = intel_crtc->pipe;
10006
10007         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10008         if (!mode)
10009                 return NULL;
10010
10011         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10012         if (!pipe_config) {
10013                 kfree(mode);
10014                 return NULL;
10015         }
10016
10017         /*
10018          * Construct a pipe_config sufficient for getting the clock info
10019          * back out of crtc_clock_get.
10020          *
10021          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10022          * to use a real value here instead.
10023          */
10024         pipe_config->cpu_transcoder = (enum transcoder) pipe;
10025         pipe_config->pixel_multiplier = 1;
10026         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10027         pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10028         pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10029         i9xx_crtc_clock_get(intel_crtc, pipe_config);
10030
10031         mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10032         mode->hdisplay = (htot & 0xffff) + 1;
10033         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10034         mode->hsync_start = (hsync & 0xffff) + 1;
10035         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10036         mode->vdisplay = (vtot & 0xffff) + 1;
10037         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10038         mode->vsync_start = (vsync & 0xffff) + 1;
10039         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10040
10041         drm_mode_set_name(mode);
10042
10043         kfree(pipe_config);
10044
10045         return mode;
10046 }
10047
10048 static void intel_crtc_destroy(struct drm_crtc *crtc)
10049 {
10050         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10051         struct drm_device *dev = crtc->dev;
10052         struct intel_flip_work *work;
10053
10054         spin_lock_irq(&dev->event_lock);
10055         work = intel_crtc->flip_work;
10056         intel_crtc->flip_work = NULL;
10057         spin_unlock_irq(&dev->event_lock);
10058
10059         if (work) {
10060                 cancel_work_sync(&work->mmio_work);
10061                 cancel_work_sync(&work->unpin_work);
10062                 kfree(work);
10063         }
10064
10065         drm_crtc_cleanup(crtc);
10066
10067         kfree(intel_crtc);
10068 }
10069
10070 static void intel_unpin_work_fn(struct work_struct *__work)
10071 {
10072         struct intel_flip_work *work =
10073                 container_of(__work, struct intel_flip_work, unpin_work);
10074         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10075         struct drm_device *dev = crtc->base.dev;
10076         struct drm_plane *primary = crtc->base.primary;
10077
10078         if (is_mmio_work(work))
10079                 flush_work(&work->mmio_work);
10080
10081         mutex_lock(&dev->struct_mutex);
10082         intel_unpin_fb_vma(work->old_vma);
10083         i915_gem_object_put(work->pending_flip_obj);
10084         mutex_unlock(&dev->struct_mutex);
10085
10086         i915_gem_request_put(work->flip_queued_req);
10087
10088         intel_frontbuffer_flip_complete(to_i915(dev),
10089                                         to_intel_plane(primary)->frontbuffer_bit);
10090         intel_fbc_post_update(crtc);
10091         drm_framebuffer_unreference(work->old_fb);
10092
10093         BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10094         atomic_dec(&crtc->unpin_work_count);
10095
10096         kfree(work);
10097 }
10098
10099 /* Is 'a' after or equal to 'b'? */
10100 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10101 {
10102         return !((a - b) & 0x80000000);
10103 }
10104
10105 static bool __pageflip_finished_cs(struct intel_crtc *crtc,
10106                                    struct intel_flip_work *work)
10107 {
10108         struct drm_device *dev = crtc->base.dev;
10109         struct drm_i915_private *dev_priv = to_i915(dev);
10110
10111         if (abort_flip_on_reset(crtc))
10112                 return true;
10113
10114         /*
10115          * The relevant registers doen't exist on pre-ctg.
10116          * As the flip done interrupt doesn't trigger for mmio
10117          * flips on gmch platforms, a flip count check isn't
10118          * really needed there. But since ctg has the registers,
10119          * include it in the check anyway.
10120          */
10121         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10122                 return true;
10123
10124         /*
10125          * BDW signals flip done immediately if the plane
10126          * is disabled, even if the plane enable is already
10127          * armed to occur at the next vblank :(
10128          */
10129
10130         /*
10131          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10132          * used the same base address. In that case the mmio flip might
10133          * have completed, but the CS hasn't even executed the flip yet.
10134          *
10135          * A flip count check isn't enough as the CS might have updated
10136          * the base address just after start of vblank, but before we
10137          * managed to process the interrupt. This means we'd complete the
10138          * CS flip too soon.
10139          *
10140          * Combining both checks should get us a good enough result. It may
10141          * still happen that the CS flip has been executed, but has not
10142          * yet actually completed. But in case the base address is the same
10143          * anyway, we don't really care.
10144          */
10145         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10146                 crtc->flip_work->gtt_offset &&
10147                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10148                                     crtc->flip_work->flip_count);
10149 }
10150
10151 static bool
10152 __pageflip_finished_mmio(struct intel_crtc *crtc,
10153                                struct intel_flip_work *work)
10154 {
10155         /*
10156          * MMIO work completes when vblank is different from
10157          * flip_queued_vblank.
10158          *
10159          * Reset counter value doesn't matter, this is handled by
10160          * i915_wait_request finishing early, so no need to handle
10161          * reset here.
10162          */
10163         return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
10164 }
10165
10166
10167 static bool pageflip_finished(struct intel_crtc *crtc,
10168                               struct intel_flip_work *work)
10169 {
10170         if (!atomic_read(&work->pending))
10171                 return false;
10172
10173         smp_rmb();
10174
10175         if (is_mmio_work(work))
10176                 return __pageflip_finished_mmio(crtc, work);
10177         else
10178                 return __pageflip_finished_cs(crtc, work);
10179 }
10180
10181 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
10182 {
10183         struct drm_device *dev = &dev_priv->drm;
10184         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10185         struct intel_flip_work *work;
10186         unsigned long flags;
10187
10188         /* Ignore early vblank irqs */
10189         if (!crtc)
10190                 return;
10191
10192         /*
10193          * This is called both by irq handlers and the reset code (to complete
10194          * lost pageflips) so needs the full irqsave spinlocks.
10195          */
10196         spin_lock_irqsave(&dev->event_lock, flags);
10197         work = crtc->flip_work;
10198
10199         if (work != NULL &&
10200             !is_mmio_work(work) &&
10201             pageflip_finished(crtc, work))
10202                 page_flip_completed(crtc);
10203
10204         spin_unlock_irqrestore(&dev->event_lock, flags);
10205 }
10206
10207 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
10208 {
10209         struct drm_device *dev = &dev_priv->drm;
10210         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10211         struct intel_flip_work *work;
10212         unsigned long flags;
10213
10214         /* Ignore early vblank irqs */
10215         if (!crtc)
10216                 return;
10217
10218         /*
10219          * This is called both by irq handlers and the reset code (to complete
10220          * lost pageflips) so needs the full irqsave spinlocks.
10221          */
10222         spin_lock_irqsave(&dev->event_lock, flags);
10223         work = crtc->flip_work;
10224
10225         if (work != NULL &&
10226             is_mmio_work(work) &&
10227             pageflip_finished(crtc, work))
10228                 page_flip_completed(crtc);
10229
10230         spin_unlock_irqrestore(&dev->event_lock, flags);
10231 }
10232
10233 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
10234                                                struct intel_flip_work *work)
10235 {
10236         work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
10237
10238         /* Ensure that the work item is consistent when activating it ... */
10239         smp_mb__before_atomic();
10240         atomic_set(&work->pending, 1);
10241 }
10242
10243 static int intel_gen2_queue_flip(struct drm_device *dev,
10244                                  struct drm_crtc *crtc,
10245                                  struct drm_framebuffer *fb,
10246                                  struct drm_i915_gem_object *obj,
10247                                  struct drm_i915_gem_request *req,
10248                                  uint32_t flags)
10249 {
10250         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10251         u32 flip_mask, *cs;
10252
10253         cs = intel_ring_begin(req, 6);
10254         if (IS_ERR(cs))
10255                 return PTR_ERR(cs);
10256
10257         /* Can't queue multiple flips, so wait for the previous
10258          * one to finish before executing the next.
10259          */
10260         if (intel_crtc->plane)
10261                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10262         else
10263                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10264         *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
10265         *cs++ = MI_NOOP;
10266         *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
10267         *cs++ = fb->pitches[0];
10268         *cs++ = intel_crtc->flip_work->gtt_offset;
10269         *cs++ = 0; /* aux display base address, unused */
10270
10271         return 0;
10272 }
10273
10274 static int intel_gen3_queue_flip(struct drm_device *dev,
10275                                  struct drm_crtc *crtc,
10276                                  struct drm_framebuffer *fb,
10277                                  struct drm_i915_gem_object *obj,
10278                                  struct drm_i915_gem_request *req,
10279                                  uint32_t flags)
10280 {
10281         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10282         u32 flip_mask, *cs;
10283
10284         cs = intel_ring_begin(req, 6);
10285         if (IS_ERR(cs))
10286                 return PTR_ERR(cs);
10287
10288         if (intel_crtc->plane)
10289                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10290         else
10291                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10292         *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
10293         *cs++ = MI_NOOP;
10294         *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
10295         *cs++ = fb->pitches[0];
10296         *cs++ = intel_crtc->flip_work->gtt_offset;
10297         *cs++ = MI_NOOP;
10298
10299         return 0;
10300 }
10301
10302 static int intel_gen4_queue_flip(struct drm_device *dev,
10303                                  struct drm_crtc *crtc,
10304                                  struct drm_framebuffer *fb,
10305                                  struct drm_i915_gem_object *obj,
10306                                  struct drm_i915_gem_request *req,
10307                                  uint32_t flags)
10308 {
10309         struct drm_i915_private *dev_priv = to_i915(dev);
10310         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10311         u32 pf, pipesrc, *cs;
10312
10313         cs = intel_ring_begin(req, 4);
10314         if (IS_ERR(cs))
10315                 return PTR_ERR(cs);
10316
10317         /* i965+ uses the linear or tiled offsets from the
10318          * Display Registers (which do not change across a page-flip)
10319          * so we need only reprogram the base address.
10320          */
10321         *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
10322         *cs++ = fb->pitches[0];
10323         *cs++ = intel_crtc->flip_work->gtt_offset |
10324                 intel_fb_modifier_to_tiling(fb->modifier);
10325
10326         /* XXX Enabling the panel-fitter across page-flip is so far
10327          * untested on non-native modes, so ignore it for now.
10328          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
10329          */
10330         pf = 0;
10331         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
10332         *cs++ = pf | pipesrc;
10333
10334         return 0;
10335 }
10336
10337 static int intel_gen6_queue_flip(struct drm_device *dev,
10338                                  struct drm_crtc *crtc,
10339                                  struct drm_framebuffer *fb,
10340                                  struct drm_i915_gem_object *obj,
10341                                  struct drm_i915_gem_request *req,
10342                                  uint32_t flags)
10343 {
10344         struct drm_i915_private *dev_priv = to_i915(dev);
10345         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10346         u32 pf, pipesrc, *cs;
10347
10348         cs = intel_ring_begin(req, 4);
10349         if (IS_ERR(cs))
10350                 return PTR_ERR(cs);
10351
10352         *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
10353         *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
10354         *cs++ = intel_crtc->flip_work->gtt_offset;
10355
10356         /* Contrary to the suggestions in the documentation,
10357          * "Enable Panel Fitter" does not seem to be required when page
10358          * flipping with a non-native mode, and worse causes a normal
10359          * modeset to fail.
10360          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
10361          */
10362         pf = 0;
10363         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
10364         *cs++ = pf | pipesrc;
10365
10366         return 0;
10367 }
10368
10369 static int intel_gen7_queue_flip(struct drm_device *dev,
10370                                  struct drm_crtc *crtc,
10371                                  struct drm_framebuffer *fb,
10372                                  struct drm_i915_gem_object *obj,
10373                                  struct drm_i915_gem_request *req,
10374                                  uint32_t flags)
10375 {
10376         struct drm_i915_private *dev_priv = to_i915(dev);
10377         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10378         u32 *cs, plane_bit = 0;
10379         int len, ret;
10380
10381         switch (intel_crtc->plane) {
10382         case PLANE_A:
10383                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
10384                 break;
10385         case PLANE_B:
10386                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
10387                 break;
10388         case PLANE_C:
10389                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
10390                 break;
10391         default:
10392                 WARN_ONCE(1, "unknown plane in flip command\n");
10393                 return -ENODEV;
10394         }
10395
10396         len = 4;
10397         if (req->engine->id == RCS) {
10398                 len += 6;
10399                 /*
10400                  * On Gen 8, SRM is now taking an extra dword to accommodate
10401                  * 48bits addresses, and we need a NOOP for the batch size to
10402                  * stay even.
10403                  */
10404                 if (IS_GEN8(dev_priv))
10405                         len += 2;
10406         }
10407
10408         /*
10409          * BSpec MI_DISPLAY_FLIP for IVB:
10410          * "The full packet must be contained within the same cache line."
10411          *
10412          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
10413          * cacheline, if we ever start emitting more commands before
10414          * the MI_DISPLAY_FLIP we may need to first emit everything else,
10415          * then do the cacheline alignment, and finally emit the
10416          * MI_DISPLAY_FLIP.
10417          */
10418         ret = intel_ring_cacheline_align(req);
10419         if (ret)
10420                 return ret;
10421
10422         cs = intel_ring_begin(req, len);
10423         if (IS_ERR(cs))
10424                 return PTR_ERR(cs);
10425
10426         /* Unmask the flip-done completion message. Note that the bspec says that
10427          * we should do this for both the BCS and RCS, and that we must not unmask
10428          * more than one flip event at any time (or ensure that one flip message
10429          * can be sent by waiting for flip-done prior to queueing new flips).
10430          * Experimentation says that BCS works despite DERRMR masking all
10431          * flip-done completion events and that unmasking all planes at once
10432          * for the RCS also doesn't appear to drop events. Setting the DERRMR
10433          * to zero does lead to lockups within MI_DISPLAY_FLIP.
10434          */
10435         if (req->engine->id == RCS) {
10436                 *cs++ = MI_LOAD_REGISTER_IMM(1);
10437                 *cs++ = i915_mmio_reg_offset(DERRMR);
10438                 *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
10439                           DERRMR_PIPEB_PRI_FLIP_DONE |
10440                           DERRMR_PIPEC_PRI_FLIP_DONE);
10441                 if (IS_GEN8(dev_priv))
10442                         *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
10443                                 MI_SRM_LRM_GLOBAL_GTT;
10444                 else
10445                         *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
10446                 *cs++ = i915_mmio_reg_offset(DERRMR);
10447                 *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
10448                 if (IS_GEN8(dev_priv)) {
10449                         *cs++ = 0;
10450                         *cs++ = MI_NOOP;
10451                 }
10452         }
10453
10454         *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
10455         *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
10456         *cs++ = intel_crtc->flip_work->gtt_offset;
10457         *cs++ = MI_NOOP;
10458
10459         return 0;
10460 }
10461
10462 static bool use_mmio_flip(struct intel_engine_cs *engine,
10463                           struct drm_i915_gem_object *obj)
10464 {
10465         /*
10466          * This is not being used for older platforms, because
10467          * non-availability of flip done interrupt forces us to use
10468          * CS flips. Older platforms derive flip done using some clever
10469          * tricks involving the flip_pending status bits and vblank irqs.
10470          * So using MMIO flips there would disrupt this mechanism.
10471          */
10472
10473         if (engine == NULL)
10474                 return true;
10475
10476         if (INTEL_GEN(engine->i915) < 5)
10477                 return false;
10478
10479         if (i915.use_mmio_flip < 0)
10480                 return false;
10481         else if (i915.use_mmio_flip > 0)
10482                 return true;
10483         else if (i915.enable_execlists)
10484                 return true;
10485
10486         return engine != i915_gem_object_last_write_engine(obj);
10487 }
10488
10489 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
10490                              unsigned int rotation,
10491                              struct intel_flip_work *work)
10492 {
10493         struct drm_device *dev = intel_crtc->base.dev;
10494         struct drm_i915_private *dev_priv = to_i915(dev);
10495         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
10496         const enum pipe pipe = intel_crtc->pipe;
10497         u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
10498
10499         ctl = I915_READ(PLANE_CTL(pipe, 0));
10500         ctl &= ~PLANE_CTL_TILED_MASK;
10501         switch (fb->modifier) {
10502         case DRM_FORMAT_MOD_LINEAR:
10503                 break;
10504         case I915_FORMAT_MOD_X_TILED:
10505                 ctl |= PLANE_CTL_TILED_X;
10506                 break;
10507         case I915_FORMAT_MOD_Y_TILED:
10508                 ctl |= PLANE_CTL_TILED_Y;
10509                 break;
10510         case I915_FORMAT_MOD_Yf_TILED:
10511                 ctl |= PLANE_CTL_TILED_YF;
10512                 break;
10513         default:
10514                 MISSING_CASE(fb->modifier);
10515         }
10516
10517         /*
10518          * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
10519          * PLANE_SURF updates, the update is then guaranteed to be atomic.
10520          */
10521         I915_WRITE(PLANE_CTL(pipe, 0), ctl);
10522         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
10523
10524         I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
10525         POSTING_READ(PLANE_SURF(pipe, 0));
10526 }
10527
10528 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
10529                              struct intel_flip_work *work)
10530 {
10531         struct drm_device *dev = intel_crtc->base.dev;
10532         struct drm_i915_private *dev_priv = to_i915(dev);
10533         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
10534         i915_reg_t reg = DSPCNTR(intel_crtc->plane);
10535         u32 dspcntr;
10536
10537         dspcntr = I915_READ(reg);
10538
10539         if (fb->modifier == I915_FORMAT_MOD_X_TILED)
10540                 dspcntr |= DISPPLANE_TILED;
10541         else
10542                 dspcntr &= ~DISPPLANE_TILED;
10543
10544         I915_WRITE(reg, dspcntr);
10545
10546         I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
10547         POSTING_READ(DSPSURF(intel_crtc->plane));
10548 }
10549
10550 static void intel_mmio_flip_work_func(struct work_struct *w)
10551 {
10552         struct intel_flip_work *work =
10553                 container_of(w, struct intel_flip_work, mmio_work);
10554         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10555         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10556         struct intel_framebuffer *intel_fb =
10557                 to_intel_framebuffer(crtc->base.primary->fb);
10558         struct drm_i915_gem_object *obj = intel_fb->obj;
10559
10560         WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
10561
10562         intel_pipe_update_start(crtc);
10563
10564         if (INTEL_GEN(dev_priv) >= 9)
10565                 skl_do_mmio_flip(crtc, work->rotation, work);
10566         else
10567                 /* use_mmio_flip() retricts MMIO flips to ilk+ */
10568                 ilk_do_mmio_flip(crtc, work);
10569
10570         intel_pipe_update_end(crtc, work);
10571 }
10572
10573 static int intel_default_queue_flip(struct drm_device *dev,
10574                                     struct drm_crtc *crtc,
10575                                     struct drm_framebuffer *fb,
10576                                     struct drm_i915_gem_object *obj,
10577                                     struct drm_i915_gem_request *req,
10578                                     uint32_t flags)
10579 {
10580         return -ENODEV;
10581 }
10582
10583 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
10584                                       struct intel_crtc *intel_crtc,
10585                                       struct intel_flip_work *work)
10586 {
10587         u32 addr, vblank;
10588
10589         if (!atomic_read(&work->pending))
10590                 return false;
10591
10592         smp_rmb();
10593
10594         vblank = intel_crtc_get_vblank_counter(intel_crtc);
10595         if (work->flip_ready_vblank == 0) {
10596                 if (work->flip_queued_req &&
10597                     !i915_gem_request_completed(work->flip_queued_req))
10598                         return false;
10599
10600                 work->flip_ready_vblank = vblank;
10601         }
10602
10603         if (vblank - work->flip_ready_vblank < 3)
10604                 return false;
10605
10606         /* Potential stall - if we see that the flip has happened,
10607          * assume a missed interrupt. */
10608         if (INTEL_GEN(dev_priv) >= 4)
10609                 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
10610         else
10611                 addr = I915_READ(DSPADDR(intel_crtc->plane));
10612
10613         /* There is a potential issue here with a false positive after a flip
10614          * to the same address. We could address this by checking for a
10615          * non-incrementing frame counter.
10616          */
10617         return addr == work->gtt_offset;
10618 }
10619
10620 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
10621 {
10622         struct drm_device *dev = &dev_priv->drm;
10623         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10624         struct intel_flip_work *work;
10625
10626         WARN_ON(!in_interrupt());
10627
10628         if (crtc == NULL)
10629                 return;
10630
10631         spin_lock(&dev->event_lock);
10632         work = crtc->flip_work;
10633
10634         if (work != NULL && !is_mmio_work(work) &&
10635             __pageflip_stall_check_cs(dev_priv, crtc, work)) {
10636                 WARN_ONCE(1,
10637                           "Kicking stuck page flip: queued at %d, now %d\n",
10638                         work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
10639                 page_flip_completed(crtc);
10640                 work = NULL;
10641         }
10642
10643         if (work != NULL && !is_mmio_work(work) &&
10644             intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
10645                 intel_queue_rps_boost_for_request(work->flip_queued_req);
10646         spin_unlock(&dev->event_lock);
10647 }
10648
10649 __maybe_unused
10650 static int intel_crtc_page_flip(struct drm_crtc *crtc,
10651                                 struct drm_framebuffer *fb,
10652                                 struct drm_pending_vblank_event *event,
10653                                 uint32_t page_flip_flags)
10654 {
10655         struct drm_device *dev = crtc->dev;
10656         struct drm_i915_private *dev_priv = to_i915(dev);
10657         struct drm_framebuffer *old_fb = crtc->primary->fb;
10658         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10659         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10660         struct drm_plane *primary = crtc->primary;
10661         enum pipe pipe = intel_crtc->pipe;
10662         struct intel_flip_work *work;
10663         struct intel_engine_cs *engine;
10664         bool mmio_flip;
10665         struct drm_i915_gem_request *request;
10666         struct i915_vma *vma;
10667         int ret;
10668
10669         /*
10670          * drm_mode_page_flip_ioctl() should already catch this, but double
10671          * check to be safe.  In the future we may enable pageflipping from
10672          * a disabled primary plane.
10673          */
10674         if (WARN_ON(intel_fb_obj(old_fb) == NULL))
10675                 return -EBUSY;
10676
10677         /* Can't change pixel format via MI display flips. */
10678         if (fb->format != crtc->primary->fb->format)
10679                 return -EINVAL;
10680
10681         /*
10682          * TILEOFF/LINOFF registers can't be changed via MI display flips.
10683          * Note that pitch changes could also affect these register.
10684          */
10685         if (INTEL_GEN(dev_priv) > 3 &&
10686             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
10687              fb->pitches[0] != crtc->primary->fb->pitches[0]))
10688                 return -EINVAL;
10689
10690         if (i915_terminally_wedged(&dev_priv->gpu_error))
10691                 goto out_hang;
10692
10693         work = kzalloc(sizeof(*work), GFP_KERNEL);
10694         if (work == NULL)
10695                 return -ENOMEM;
10696
10697         work->event = event;
10698         work->crtc = crtc;
10699         work->old_fb = old_fb;
10700         INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
10701
10702         ret = drm_crtc_vblank_get(crtc);
10703         if (ret)
10704                 goto free_work;
10705
10706         /* We borrow the event spin lock for protecting flip_work */
10707         spin_lock_irq(&dev->event_lock);
10708         if (intel_crtc->flip_work) {
10709                 /* Before declaring the flip queue wedged, check if
10710                  * the hardware completed the operation behind our backs.
10711                  */
10712                 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
10713                         DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
10714                         page_flip_completed(intel_crtc);
10715                 } else {
10716                         DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
10717                         spin_unlock_irq(&dev->event_lock);
10718
10719                         drm_crtc_vblank_put(crtc);
10720                         kfree(work);
10721                         return -EBUSY;
10722                 }
10723         }
10724         intel_crtc->flip_work = work;
10725         spin_unlock_irq(&dev->event_lock);
10726
10727         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
10728                 flush_workqueue(dev_priv->wq);
10729
10730         /* Reference the objects for the scheduled work. */
10731         drm_framebuffer_reference(work->old_fb);
10732
10733         crtc->primary->fb = fb;
10734         update_state_fb(crtc->primary);
10735
10736         work->pending_flip_obj = i915_gem_object_get(obj);
10737
10738         ret = i915_mutex_lock_interruptible(dev);
10739         if (ret)
10740                 goto cleanup;
10741
10742         intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
10743         if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
10744                 ret = -EIO;
10745                 goto unlock;
10746         }
10747
10748         atomic_inc(&intel_crtc->unpin_work_count);
10749
10750         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10751                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
10752
10753         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10754                 engine = dev_priv->engine[BCS];
10755                 if (fb->modifier != old_fb->modifier)
10756                         /* vlv: DISPLAY_FLIP fails to change tiling */
10757                         engine = NULL;
10758         } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
10759                 engine = dev_priv->engine[BCS];
10760         } else if (INTEL_GEN(dev_priv) >= 7) {
10761                 engine = i915_gem_object_last_write_engine(obj);
10762                 if (engine == NULL || engine->id != RCS)
10763                         engine = dev_priv->engine[BCS];
10764         } else {
10765                 engine = dev_priv->engine[RCS];
10766         }
10767
10768         mmio_flip = use_mmio_flip(engine, obj);
10769
10770         vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
10771         if (IS_ERR(vma)) {
10772                 ret = PTR_ERR(vma);
10773                 goto cleanup_pending;
10774         }
10775
10776         work->old_vma = to_intel_plane_state(primary->state)->vma;
10777         to_intel_plane_state(primary->state)->vma = vma;
10778
10779         work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
10780         work->rotation = crtc->primary->state->rotation;
10781
10782         /*
10783          * There's the potential that the next frame will not be compatible with
10784          * FBC, so we want to call pre_update() before the actual page flip.
10785          * The problem is that pre_update() caches some information about the fb
10786          * object, so we want to do this only after the object is pinned. Let's
10787          * be on the safe side and do this immediately before scheduling the
10788          * flip.
10789          */
10790         intel_fbc_pre_update(intel_crtc, intel_crtc->config,
10791                              to_intel_plane_state(primary->state));
10792
10793         if (mmio_flip) {
10794                 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
10795                 queue_work(system_unbound_wq, &work->mmio_work);
10796         } else {
10797                 request = i915_gem_request_alloc(engine,
10798                                                  dev_priv->kernel_context);
10799                 if (IS_ERR(request)) {
10800                         ret = PTR_ERR(request);
10801                         goto cleanup_unpin;
10802                 }
10803
10804                 ret = i915_gem_request_await_object(request, obj, false);
10805                 if (ret)
10806                         goto cleanup_request;
10807
10808                 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
10809                                                    page_flip_flags);
10810                 if (ret)
10811                         goto cleanup_request;
10812
10813                 intel_mark_page_flip_active(intel_crtc, work);
10814
10815                 work->flip_queued_req = i915_gem_request_get(request);
10816                 i915_add_request(request);
10817         }
10818
10819         i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
10820         i915_gem_track_fb(intel_fb_obj(old_fb), obj,
10821                           to_intel_plane(primary)->frontbuffer_bit);
10822         mutex_unlock(&dev->struct_mutex);
10823
10824         intel_frontbuffer_flip_prepare(to_i915(dev),
10825                                        to_intel_plane(primary)->frontbuffer_bit);
10826
10827         trace_i915_flip_request(intel_crtc->plane, obj);
10828
10829         return 0;
10830
10831 cleanup_request:
10832         i915_add_request(request);
10833 cleanup_unpin:
10834         to_intel_plane_state(primary->state)->vma = work->old_vma;
10835         intel_unpin_fb_vma(vma);
10836 cleanup_pending:
10837         atomic_dec(&intel_crtc->unpin_work_count);
10838 unlock:
10839         mutex_unlock(&dev->struct_mutex);
10840 cleanup:
10841         crtc->primary->fb = old_fb;
10842         update_state_fb(crtc->primary);
10843
10844         i915_gem_object_put(obj);
10845         drm_framebuffer_unreference(work->old_fb);
10846
10847         spin_lock_irq(&dev->event_lock);
10848         intel_crtc->flip_work = NULL;
10849         spin_unlock_irq(&dev->event_lock);
10850
10851         drm_crtc_vblank_put(crtc);
10852 free_work:
10853         kfree(work);
10854
10855         if (ret == -EIO) {
10856                 struct drm_atomic_state *state;
10857                 struct drm_plane_state *plane_state;
10858
10859 out_hang:
10860                 state = drm_atomic_state_alloc(dev);
10861                 if (!state)
10862                         return -ENOMEM;
10863                 state->acquire_ctx = dev->mode_config.acquire_ctx;
10864
10865 retry:
10866                 plane_state = drm_atomic_get_plane_state(state, primary);
10867                 ret = PTR_ERR_OR_ZERO(plane_state);
10868                 if (!ret) {
10869                         drm_atomic_set_fb_for_plane(plane_state, fb);
10870
10871                         ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
10872                         if (!ret)
10873                                 ret = drm_atomic_commit(state);
10874                 }
10875
10876                 if (ret == -EDEADLK) {
10877                         drm_modeset_backoff(state->acquire_ctx);
10878                         drm_atomic_state_clear(state);
10879                         goto retry;
10880                 }
10881
10882                 drm_atomic_state_put(state);
10883
10884                 if (ret == 0 && event) {
10885                         spin_lock_irq(&dev->event_lock);
10886                         drm_crtc_send_vblank_event(crtc, event);
10887                         spin_unlock_irq(&dev->event_lock);
10888                 }
10889         }
10890         return ret;
10891 }
10892
10893
10894 /**
10895  * intel_wm_need_update - Check whether watermarks need updating
10896  * @plane: drm plane
10897  * @state: new plane state
10898  *
10899  * Check current plane state versus the new one to determine whether
10900  * watermarks need to be recalculated.
10901  *
10902  * Returns true or false.
10903  */
10904 static bool intel_wm_need_update(struct drm_plane *plane,
10905                                  struct drm_plane_state *state)
10906 {
10907         struct intel_plane_state *new = to_intel_plane_state(state);
10908         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10909
10910         /* Update watermarks on tiling or size changes. */
10911         if (new->base.visible != cur->base.visible)
10912                 return true;
10913
10914         if (!cur->base.fb || !new->base.fb)
10915                 return false;
10916
10917         if (cur->base.fb->modifier != new->base.fb->modifier ||
10918             cur->base.rotation != new->base.rotation ||
10919             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10920             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10921             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10922             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10923                 return true;
10924
10925         return false;
10926 }
10927
10928 static bool needs_scaling(struct intel_plane_state *state)
10929 {
10930         int src_w = drm_rect_width(&state->base.src) >> 16;
10931         int src_h = drm_rect_height(&state->base.src) >> 16;
10932         int dst_w = drm_rect_width(&state->base.dst);
10933         int dst_h = drm_rect_height(&state->base.dst);
10934
10935         return (src_w != dst_w || src_h != dst_h);
10936 }
10937
10938 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
10939                                     struct drm_plane_state *plane_state)
10940 {
10941         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10942         struct drm_crtc *crtc = crtc_state->crtc;
10943         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10944         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10945         struct drm_device *dev = crtc->dev;
10946         struct drm_i915_private *dev_priv = to_i915(dev);
10947         struct intel_plane_state *old_plane_state =
10948                 to_intel_plane_state(plane->base.state);
10949         bool mode_changed = needs_modeset(crtc_state);
10950         bool was_crtc_enabled = crtc->state->active;
10951         bool is_crtc_enabled = crtc_state->active;
10952         bool turn_off, turn_on, visible, was_visible;
10953         struct drm_framebuffer *fb = plane_state->fb;
10954         int ret;
10955
10956         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10957                 ret = skl_update_scaler_plane(
10958                         to_intel_crtc_state(crtc_state),
10959                         to_intel_plane_state(plane_state));
10960                 if (ret)
10961                         return ret;
10962         }
10963
10964         was_visible = old_plane_state->base.visible;
10965         visible = plane_state->visible;
10966
10967         if (!was_crtc_enabled && WARN_ON(was_visible))
10968                 was_visible = false;
10969
10970         /*
10971          * Visibility is calculated as if the crtc was on, but
10972          * after scaler setup everything depends on it being off
10973          * when the crtc isn't active.
10974          *
10975          * FIXME this is wrong for watermarks. Watermarks should also
10976          * be computed as if the pipe would be active. Perhaps move
10977          * per-plane wm computation to the .check_plane() hook, and
10978          * only combine the results from all planes in the current place?
10979          */
10980         if (!is_crtc_enabled) {
10981                 plane_state->visible = visible = false;
10982                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10983         }
10984
10985         if (!was_visible && !visible)
10986                 return 0;
10987
10988         if (fb != old_plane_state->base.fb)
10989                 pipe_config->fb_changed = true;
10990
10991         turn_off = was_visible && (!visible || mode_changed);
10992         turn_on = visible && (!was_visible || mode_changed);
10993
10994         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10995                          intel_crtc->base.base.id, intel_crtc->base.name,
10996                          plane->base.base.id, plane->base.name,
10997                          fb ? fb->base.id : -1);
10998
10999         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11000                          plane->base.base.id, plane->base.name,
11001                          was_visible, visible,
11002                          turn_off, turn_on, mode_changed);
11003
11004         if (turn_on) {
11005                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11006                         pipe_config->update_wm_pre = true;
11007
11008                 /* must disable cxsr around plane enable/disable */
11009                 if (plane->id != PLANE_CURSOR)
11010                         pipe_config->disable_cxsr = true;
11011         } else if (turn_off) {
11012                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11013                         pipe_config->update_wm_post = true;
11014
11015                 /* must disable cxsr around plane enable/disable */
11016                 if (plane->id != PLANE_CURSOR)
11017                         pipe_config->disable_cxsr = true;
11018         } else if (intel_wm_need_update(&plane->base, plane_state)) {
11019                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11020                         /* FIXME bollocks */
11021                         pipe_config->update_wm_pre = true;
11022                         pipe_config->update_wm_post = true;
11023                 }
11024         }
11025
11026         if (visible || was_visible)
11027                 pipe_config->fb_bits |= plane->frontbuffer_bit;
11028
11029         /*
11030          * WaCxSRDisabledForSpriteScaling:ivb
11031          *
11032          * cstate->update_wm was already set above, so this flag will
11033          * take effect when we commit and program watermarks.
11034          */
11035         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
11036             needs_scaling(to_intel_plane_state(plane_state)) &&
11037             !needs_scaling(old_plane_state))
11038                 pipe_config->disable_lp_wm = true;
11039
11040         return 0;
11041 }
11042
11043 static bool encoders_cloneable(const struct intel_encoder *a,
11044                                const struct intel_encoder *b)
11045 {
11046         /* masks could be asymmetric, so check both ways */
11047         return a == b || (a->cloneable & (1 << b->type) &&
11048                           b->cloneable & (1 << a->type));
11049 }
11050
11051 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11052                                          struct intel_crtc *crtc,
11053                                          struct intel_encoder *encoder)
11054 {
11055         struct intel_encoder *source_encoder;
11056         struct drm_connector *connector;
11057         struct drm_connector_state *connector_state;
11058         int i;
11059
11060         for_each_new_connector_in_state(state, connector, connector_state, i) {
11061                 if (connector_state->crtc != &crtc->base)
11062                         continue;
11063
11064                 source_encoder =
11065                         to_intel_encoder(connector_state->best_encoder);
11066                 if (!encoders_cloneable(encoder, source_encoder))
11067                         return false;
11068         }
11069
11070         return true;
11071 }
11072
11073 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11074                                    struct drm_crtc_state *crtc_state)
11075 {
11076         struct drm_device *dev = crtc->dev;
11077         struct drm_i915_private *dev_priv = to_i915(dev);
11078         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11079         struct intel_crtc_state *pipe_config =
11080                 to_intel_crtc_state(crtc_state);
11081         struct drm_atomic_state *state = crtc_state->state;
11082         int ret;
11083         bool mode_changed = needs_modeset(crtc_state);
11084
11085         if (mode_changed && !crtc_state->active)
11086                 pipe_config->update_wm_post = true;
11087
11088         if (mode_changed && crtc_state->enable &&
11089             dev_priv->display.crtc_compute_clock &&
11090             !WARN_ON(pipe_config->shared_dpll)) {
11091                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11092                                                            pipe_config);
11093                 if (ret)
11094                         return ret;
11095         }
11096
11097         if (crtc_state->color_mgmt_changed) {
11098                 ret = intel_color_check(crtc, crtc_state);
11099                 if (ret)
11100                         return ret;
11101
11102                 /*
11103                  * Changing color management on Intel hardware is
11104                  * handled as part of planes update.
11105                  */
11106                 crtc_state->planes_changed = true;
11107         }
11108
11109         ret = 0;
11110         if (dev_priv->display.compute_pipe_wm) {
11111                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11112                 if (ret) {
11113                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11114                         return ret;
11115                 }
11116         }
11117
11118         if (dev_priv->display.compute_intermediate_wm &&
11119             !to_intel_atomic_state(state)->skip_intermediate_wm) {
11120                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11121                         return 0;
11122
11123                 /*
11124                  * Calculate 'intermediate' watermarks that satisfy both the
11125                  * old state and the new state.  We can program these
11126                  * immediately.
11127                  */
11128                 ret = dev_priv->display.compute_intermediate_wm(dev,
11129                                                                 intel_crtc,
11130                                                                 pipe_config);
11131                 if (ret) {
11132                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11133                         return ret;
11134                 }
11135         } else if (dev_priv->display.compute_intermediate_wm) {
11136                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
11137                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
11138         }
11139
11140         if (INTEL_GEN(dev_priv) >= 9) {
11141                 if (mode_changed)
11142                         ret = skl_update_scaler_crtc(pipe_config);
11143
11144                 if (!ret)
11145                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11146                                                          pipe_config);
11147         }
11148
11149         return ret;
11150 }
11151
11152 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11153         .atomic_begin = intel_begin_crtc_commit,
11154         .atomic_flush = intel_finish_crtc_commit,
11155         .atomic_check = intel_crtc_atomic_check,
11156 };
11157
11158 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11159 {
11160         struct intel_connector *connector;
11161         struct drm_connector_list_iter conn_iter;
11162
11163         drm_connector_list_iter_begin(dev, &conn_iter);
11164         for_each_intel_connector_iter(connector, &conn_iter) {
11165                 if (connector->base.state->crtc)
11166                         drm_connector_unreference(&connector->base);
11167
11168                 if (connector->base.encoder) {
11169                         connector->base.state->best_encoder =
11170                                 connector->base.encoder;
11171                         connector->base.state->crtc =
11172                                 connector->base.encoder->crtc;
11173
11174                         drm_connector_reference(&connector->base);
11175                 } else {
11176                         connector->base.state->best_encoder = NULL;
11177                         connector->base.state->crtc = NULL;
11178                 }
11179         }
11180         drm_connector_list_iter_end(&conn_iter);
11181 }
11182
11183 static void
11184 connected_sink_compute_bpp(struct intel_connector *connector,
11185                            struct intel_crtc_state *pipe_config)
11186 {
11187         const struct drm_display_info *info = &connector->base.display_info;
11188         int bpp = pipe_config->pipe_bpp;
11189
11190         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11191                       connector->base.base.id,
11192                       connector->base.name);
11193
11194         /* Don't use an invalid EDID bpc value */
11195         if (info->bpc != 0 && info->bpc * 3 < bpp) {
11196                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11197                               bpp, info->bpc * 3);
11198                 pipe_config->pipe_bpp = info->bpc * 3;
11199         }
11200
11201         /* Clamp bpp to 8 on screens without EDID 1.4 */
11202         if (info->bpc == 0 && bpp > 24) {
11203                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
11204                               bpp);
11205                 pipe_config->pipe_bpp = 24;
11206         }
11207 }
11208
11209 static int
11210 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11211                           struct intel_crtc_state *pipe_config)
11212 {
11213         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11214         struct drm_atomic_state *state;
11215         struct drm_connector *connector;
11216         struct drm_connector_state *connector_state;
11217         int bpp, i;
11218
11219         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11220             IS_CHERRYVIEW(dev_priv)))
11221                 bpp = 10*3;
11222         else if (INTEL_GEN(dev_priv) >= 5)
11223                 bpp = 12*3;
11224         else
11225                 bpp = 8*3;
11226
11227
11228         pipe_config->pipe_bpp = bpp;
11229
11230         state = pipe_config->base.state;
11231
11232         /* Clamp display bpp to EDID value */
11233         for_each_new_connector_in_state(state, connector, connector_state, i) {
11234                 if (connector_state->crtc != &crtc->base)
11235                         continue;
11236
11237                 connected_sink_compute_bpp(to_intel_connector(connector),
11238                                            pipe_config);
11239         }
11240
11241         return bpp;
11242 }
11243
11244 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11245 {
11246         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11247                         "type: 0x%x flags: 0x%x\n",
11248                 mode->crtc_clock,
11249                 mode->crtc_hdisplay, mode->crtc_hsync_start,
11250                 mode->crtc_hsync_end, mode->crtc_htotal,
11251                 mode->crtc_vdisplay, mode->crtc_vsync_start,
11252                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11253 }
11254
11255 static inline void
11256 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11257                       unsigned int lane_count, struct intel_link_m_n *m_n)
11258 {
11259         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11260                       id, lane_count,
11261                       m_n->gmch_m, m_n->gmch_n,
11262                       m_n->link_m, m_n->link_n, m_n->tu);
11263 }
11264
11265 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11266                                    struct intel_crtc_state *pipe_config,
11267                                    const char *context)
11268 {
11269         struct drm_device *dev = crtc->base.dev;
11270         struct drm_i915_private *dev_priv = to_i915(dev);
11271         struct drm_plane *plane;
11272         struct intel_plane *intel_plane;
11273         struct intel_plane_state *state;
11274         struct drm_framebuffer *fb;
11275
11276         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11277                       crtc->base.base.id, crtc->base.name, context);
11278
11279         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11280                       transcoder_name(pipe_config->cpu_transcoder),
11281                       pipe_config->pipe_bpp, pipe_config->dither);
11282
11283         if (pipe_config->has_pch_encoder)
11284                 intel_dump_m_n_config(pipe_config, "fdi",
11285                                       pipe_config->fdi_lanes,
11286                                       &pipe_config->fdi_m_n);
11287
11288         if (intel_crtc_has_dp_encoder(pipe_config)) {
11289                 intel_dump_m_n_config(pipe_config, "dp m_n",
11290                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11291                 if (pipe_config->has_drrs)
11292                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11293                                               pipe_config->lane_count,
11294                                               &pipe_config->dp_m2_n2);
11295         }
11296
11297         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11298                       pipe_config->has_audio, pipe_config->has_infoframe);
11299
11300         DRM_DEBUG_KMS("requested mode:\n");
11301         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11302         DRM_DEBUG_KMS("adjusted mode:\n");
11303         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11304         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11305         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11306                       pipe_config->port_clock,
11307                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11308                       pipe_config->pixel_rate);
11309
11310         if (INTEL_GEN(dev_priv) >= 9)
11311                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11312                               crtc->num_scalers,
11313                               pipe_config->scaler_state.scaler_users,
11314                               pipe_config->scaler_state.scaler_id);
11315
11316         if (HAS_GMCH_DISPLAY(dev_priv))
11317                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11318                               pipe_config->gmch_pfit.control,
11319                               pipe_config->gmch_pfit.pgm_ratios,
11320                               pipe_config->gmch_pfit.lvds_border_bits);
11321         else
11322                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11323                               pipe_config->pch_pfit.pos,
11324                               pipe_config->pch_pfit.size,
11325                               enableddisabled(pipe_config->pch_pfit.enabled));
11326
11327         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11328                       pipe_config->ips_enabled, pipe_config->double_wide);
11329
11330         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11331
11332         DRM_DEBUG_KMS("planes on this crtc\n");
11333         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11334                 struct drm_format_name_buf format_name;
11335                 intel_plane = to_intel_plane(plane);
11336                 if (intel_plane->pipe != crtc->pipe)
11337                         continue;
11338
11339                 state = to_intel_plane_state(plane->state);
11340                 fb = state->base.fb;
11341                 if (!fb) {
11342                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11343                                       plane->base.id, plane->name, state->scaler_id);
11344                         continue;
11345                 }
11346
11347                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11348                               plane->base.id, plane->name,
11349                               fb->base.id, fb->width, fb->height,
11350                               drm_get_format_name(fb->format->format, &format_name));
11351                 if (INTEL_GEN(dev_priv) >= 9)
11352                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11353                                       state->scaler_id,
11354                                       state->base.src.x1 >> 16,
11355                                       state->base.src.y1 >> 16,
11356                                       drm_rect_width(&state->base.src) >> 16,
11357                                       drm_rect_height(&state->base.src) >> 16,
11358                                       state->base.dst.x1, state->base.dst.y1,
11359                                       drm_rect_width(&state->base.dst),
11360                                       drm_rect_height(&state->base.dst));
11361         }
11362 }
11363
11364 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11365 {
11366         struct drm_device *dev = state->dev;
11367         struct drm_connector *connector;
11368         unsigned int used_ports = 0;
11369         unsigned int used_mst_ports = 0;
11370
11371         /*
11372          * Walk the connector list instead of the encoder
11373          * list to detect the problem on ddi platforms
11374          * where there's just one encoder per digital port.
11375          */
11376         drm_for_each_connector(connector, dev) {
11377                 struct drm_connector_state *connector_state;
11378                 struct intel_encoder *encoder;
11379
11380                 connector_state = drm_atomic_get_existing_connector_state(state, connector);
11381                 if (!connector_state)
11382                         connector_state = connector->state;
11383
11384                 if (!connector_state->best_encoder)
11385                         continue;
11386
11387                 encoder = to_intel_encoder(connector_state->best_encoder);
11388
11389                 WARN_ON(!connector_state->crtc);
11390
11391                 switch (encoder->type) {
11392                         unsigned int port_mask;
11393                 case INTEL_OUTPUT_UNKNOWN:
11394                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11395                                 break;
11396                 case INTEL_OUTPUT_DP:
11397                 case INTEL_OUTPUT_HDMI:
11398                 case INTEL_OUTPUT_EDP:
11399                         port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
11400
11401                         /* the same port mustn't appear more than once */
11402                         if (used_ports & port_mask)
11403                                 return false;
11404
11405                         used_ports |= port_mask;
11406                         break;
11407                 case INTEL_OUTPUT_DP_MST:
11408                         used_mst_ports |=
11409                                 1 << enc_to_mst(&encoder->base)->primary->port;
11410                         break;
11411                 default:
11412                         break;
11413                 }
11414         }
11415
11416         /* can't mix MST and SST/HDMI on the same port */
11417         if (used_ports & used_mst_ports)
11418                 return false;
11419
11420         return true;
11421 }
11422
11423 static void
11424 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11425 {
11426         struct drm_i915_private *dev_priv =
11427                 to_i915(crtc_state->base.crtc->dev);
11428         struct intel_crtc_scaler_state scaler_state;
11429         struct intel_dpll_hw_state dpll_hw_state;
11430         struct intel_shared_dpll *shared_dpll;
11431         struct intel_crtc_wm_state wm_state;
11432         bool force_thru;
11433
11434         /* FIXME: before the switch to atomic started, a new pipe_config was
11435          * kzalloc'd. Code that depends on any field being zero should be
11436          * fixed, so that the crtc_state can be safely duplicated. For now,
11437          * only fields that are know to not cause problems are preserved. */
11438
11439         scaler_state = crtc_state->scaler_state;
11440         shared_dpll = crtc_state->shared_dpll;
11441         dpll_hw_state = crtc_state->dpll_hw_state;
11442         force_thru = crtc_state->pch_pfit.force_thru;
11443         if (IS_G4X(dev_priv) ||
11444             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11445                 wm_state = crtc_state->wm;
11446
11447         /* Keep base drm_crtc_state intact, only clear our extended struct */
11448         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11449         memset(&crtc_state->base + 1, 0,
11450                sizeof(*crtc_state) - sizeof(crtc_state->base));
11451
11452         crtc_state->scaler_state = scaler_state;
11453         crtc_state->shared_dpll = shared_dpll;
11454         crtc_state->dpll_hw_state = dpll_hw_state;
11455         crtc_state->pch_pfit.force_thru = force_thru;
11456         if (IS_G4X(dev_priv) ||
11457             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11458                 crtc_state->wm = wm_state;
11459 }
11460
11461 static int
11462 intel_modeset_pipe_config(struct drm_crtc *crtc,
11463                           struct intel_crtc_state *pipe_config)
11464 {
11465         struct drm_atomic_state *state = pipe_config->base.state;
11466         struct intel_encoder *encoder;
11467         struct drm_connector *connector;
11468         struct drm_connector_state *connector_state;
11469         int base_bpp, ret = -EINVAL;
11470         int i;
11471         bool retry = true;
11472
11473         clear_intel_crtc_state(pipe_config);
11474
11475         pipe_config->cpu_transcoder =
11476                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11477
11478         /*
11479          * Sanitize sync polarity flags based on requested ones. If neither
11480          * positive or negative polarity is requested, treat this as meaning
11481          * negative polarity.
11482          */
11483         if (!(pipe_config->base.adjusted_mode.flags &
11484               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11485                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11486
11487         if (!(pipe_config->base.adjusted_mode.flags &
11488               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11489                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11490
11491         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11492                                              pipe_config);
11493         if (base_bpp < 0)
11494                 goto fail;
11495
11496         /*
11497          * Determine the real pipe dimensions. Note that stereo modes can
11498          * increase the actual pipe size due to the frame doubling and
11499          * insertion of additional space for blanks between the frame. This
11500          * is stored in the crtc timings. We use the requested mode to do this
11501          * computation to clearly distinguish it from the adjusted mode, which
11502          * can be changed by the connectors in the below retry loop.
11503          */
11504         drm_mode_get_hv_timing(&pipe_config->base.mode,
11505                                &pipe_config->pipe_src_w,
11506                                &pipe_config->pipe_src_h);
11507
11508         for_each_new_connector_in_state(state, connector, connector_state, i) {
11509                 if (connector_state->crtc != crtc)
11510                         continue;
11511
11512                 encoder = to_intel_encoder(connector_state->best_encoder);
11513
11514                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11515                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11516                         goto fail;
11517                 }
11518
11519                 /*
11520                  * Determine output_types before calling the .compute_config()
11521                  * hooks so that the hooks can use this information safely.
11522                  */
11523                 pipe_config->output_types |= 1 << encoder->type;
11524         }
11525
11526 encoder_retry:
11527         /* Ensure the port clock defaults are reset when retrying. */
11528         pipe_config->port_clock = 0;
11529         pipe_config->pixel_multiplier = 1;
11530
11531         /* Fill in default crtc timings, allow encoders to overwrite them. */
11532         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11533                               CRTC_STEREO_DOUBLE);
11534
11535         /* Pass our mode to the connectors and the CRTC to give them a chance to
11536          * adjust it according to limitations or connector properties, and also
11537          * a chance to reject the mode entirely.
11538          */
11539         for_each_new_connector_in_state(state, connector, connector_state, i) {
11540                 if (connector_state->crtc != crtc)
11541                         continue;
11542
11543                 encoder = to_intel_encoder(connector_state->best_encoder);
11544
11545                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11546                         DRM_DEBUG_KMS("Encoder config failure\n");
11547                         goto fail;
11548                 }
11549         }
11550
11551         /* Set default port clock if not overwritten by the encoder. Needs to be
11552          * done afterwards in case the encoder adjusts the mode. */
11553         if (!pipe_config->port_clock)
11554                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11555                         * pipe_config->pixel_multiplier;
11556
11557         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11558         if (ret < 0) {
11559                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11560                 goto fail;
11561         }
11562
11563         if (ret == RETRY) {
11564                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11565                         ret = -EINVAL;
11566                         goto fail;
11567                 }
11568
11569                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11570                 retry = false;
11571                 goto encoder_retry;
11572         }
11573
11574         /* Dithering seems to not pass-through bits correctly when it should, so
11575          * only enable it on 6bpc panels and when its not a compliance
11576          * test requesting 6bpc video pattern.
11577          */
11578         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11579                 !pipe_config->dither_force_disable;
11580         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11581                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11582
11583 fail:
11584         return ret;
11585 }
11586
11587 static void
11588 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
11589 {
11590         struct drm_crtc *crtc;
11591         struct drm_crtc_state *new_crtc_state;
11592         int i;
11593
11594         /* Double check state. */
11595         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11596                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
11597
11598                 /* Update hwmode for vblank functions */
11599                 if (new_crtc_state->active)
11600                         crtc->hwmode = new_crtc_state->adjusted_mode;
11601                 else
11602                         crtc->hwmode.crtc_clock = 0;
11603
11604                 /*
11605                  * Update legacy state to satisfy fbc code. This can
11606                  * be removed when fbc uses the atomic state.
11607                  */
11608                 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
11609                         struct drm_plane_state *plane_state = crtc->primary->state;
11610
11611                         crtc->primary->fb = plane_state->fb;
11612                         crtc->x = plane_state->src_x >> 16;
11613                         crtc->y = plane_state->src_y >> 16;
11614                 }
11615         }
11616 }
11617
11618 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11619 {
11620         int diff;
11621
11622         if (clock1 == clock2)
11623                 return true;
11624
11625         if (!clock1 || !clock2)
11626                 return false;
11627
11628         diff = abs(clock1 - clock2);
11629
11630         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11631                 return true;
11632
11633         return false;
11634 }
11635
11636 static bool
11637 intel_compare_m_n(unsigned int m, unsigned int n,
11638                   unsigned int m2, unsigned int n2,
11639                   bool exact)
11640 {
11641         if (m == m2 && n == n2)
11642                 return true;
11643
11644         if (exact || !m || !n || !m2 || !n2)
11645                 return false;
11646
11647         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11648
11649         if (n > n2) {
11650                 while (n > n2) {
11651                         m2 <<= 1;
11652                         n2 <<= 1;
11653                 }
11654         } else if (n < n2) {
11655                 while (n < n2) {
11656                         m <<= 1;
11657                         n <<= 1;
11658                 }
11659         }
11660
11661         if (n != n2)
11662                 return false;
11663
11664         return intel_fuzzy_clock_check(m, m2);
11665 }
11666
11667 static bool
11668 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11669                        struct intel_link_m_n *m2_n2,
11670                        bool adjust)
11671 {
11672         if (m_n->tu == m2_n2->tu &&
11673             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11674                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11675             intel_compare_m_n(m_n->link_m, m_n->link_n,
11676                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11677                 if (adjust)
11678                         *m2_n2 = *m_n;
11679
11680                 return true;
11681         }
11682
11683         return false;
11684 }
11685
11686 static void __printf(3, 4)
11687 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11688 {
11689         char *level;
11690         unsigned int category;
11691         struct va_format vaf;
11692         va_list args;
11693
11694         if (adjust) {
11695                 level = KERN_DEBUG;
11696                 category = DRM_UT_KMS;
11697         } else {
11698                 level = KERN_ERR;
11699                 category = DRM_UT_NONE;
11700         }
11701
11702         va_start(args, format);
11703         vaf.fmt = format;
11704         vaf.va = &args;
11705
11706         drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
11707
11708         va_end(args);
11709 }
11710
11711 static bool
11712 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11713                           struct intel_crtc_state *current_config,
11714                           struct intel_crtc_state *pipe_config,
11715                           bool adjust)
11716 {
11717         bool ret = true;
11718
11719 #define PIPE_CONF_CHECK_X(name) \
11720         if (current_config->name != pipe_config->name) { \
11721                 pipe_config_err(adjust, __stringify(name), \
11722                           "(expected 0x%08x, found 0x%08x)\n", \
11723                           current_config->name, \
11724                           pipe_config->name); \
11725                 ret = false; \
11726         }
11727
11728 #define PIPE_CONF_CHECK_I(name) \
11729         if (current_config->name != pipe_config->name) { \
11730                 pipe_config_err(adjust, __stringify(name), \
11731                           "(expected %i, found %i)\n", \
11732                           current_config->name, \
11733                           pipe_config->name); \
11734                 ret = false; \
11735         }
11736
11737 #define PIPE_CONF_CHECK_P(name) \
11738         if (current_config->name != pipe_config->name) { \
11739                 pipe_config_err(adjust, __stringify(name), \
11740                           "(expected %p, found %p)\n", \
11741                           current_config->name, \
11742                           pipe_config->name); \
11743                 ret = false; \
11744         }
11745
11746 #define PIPE_CONF_CHECK_M_N(name) \
11747         if (!intel_compare_link_m_n(&current_config->name, \
11748                                     &pipe_config->name,\
11749                                     adjust)) { \
11750                 pipe_config_err(adjust, __stringify(name), \
11751                           "(expected tu %i gmch %i/%i link %i/%i, " \
11752                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11753                           current_config->name.tu, \
11754                           current_config->name.gmch_m, \
11755                           current_config->name.gmch_n, \
11756                           current_config->name.link_m, \
11757                           current_config->name.link_n, \
11758                           pipe_config->name.tu, \
11759                           pipe_config->name.gmch_m, \
11760                           pipe_config->name.gmch_n, \
11761                           pipe_config->name.link_m, \
11762                           pipe_config->name.link_n); \
11763                 ret = false; \
11764         }
11765
11766 /* This is required for BDW+ where there is only one set of registers for
11767  * switching between high and low RR.
11768  * This macro can be used whenever a comparison has to be made between one
11769  * hw state and multiple sw state variables.
11770  */
11771 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
11772         if (!intel_compare_link_m_n(&current_config->name, \
11773                                     &pipe_config->name, adjust) && \
11774             !intel_compare_link_m_n(&current_config->alt_name, \
11775                                     &pipe_config->name, adjust)) { \
11776                 pipe_config_err(adjust, __stringify(name), \
11777                           "(expected tu %i gmch %i/%i link %i/%i, " \
11778                           "or tu %i gmch %i/%i link %i/%i, " \
11779                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11780                           current_config->name.tu, \
11781                           current_config->name.gmch_m, \
11782                           current_config->name.gmch_n, \
11783                           current_config->name.link_m, \
11784                           current_config->name.link_n, \
11785                           current_config->alt_name.tu, \
11786                           current_config->alt_name.gmch_m, \
11787                           current_config->alt_name.gmch_n, \
11788                           current_config->alt_name.link_m, \
11789                           current_config->alt_name.link_n, \
11790                           pipe_config->name.tu, \
11791                           pipe_config->name.gmch_m, \
11792                           pipe_config->name.gmch_n, \
11793                           pipe_config->name.link_m, \
11794                           pipe_config->name.link_n); \
11795                 ret = false; \
11796         }
11797
11798 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
11799         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11800                 pipe_config_err(adjust, __stringify(name), \
11801                           "(%x) (expected %i, found %i)\n", \
11802                           (mask), \
11803                           current_config->name & (mask), \
11804                           pipe_config->name & (mask)); \
11805                 ret = false; \
11806         }
11807
11808 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
11809         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11810                 pipe_config_err(adjust, __stringify(name), \
11811                           "(expected %i, found %i)\n", \
11812                           current_config->name, \
11813                           pipe_config->name); \
11814                 ret = false; \
11815         }
11816
11817 #define PIPE_CONF_QUIRK(quirk)  \
11818         ((current_config->quirks | pipe_config->quirks) & (quirk))
11819
11820         PIPE_CONF_CHECK_I(cpu_transcoder);
11821
11822         PIPE_CONF_CHECK_I(has_pch_encoder);
11823         PIPE_CONF_CHECK_I(fdi_lanes);
11824         PIPE_CONF_CHECK_M_N(fdi_m_n);
11825
11826         PIPE_CONF_CHECK_I(lane_count);
11827         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11828
11829         if (INTEL_GEN(dev_priv) < 8) {
11830                 PIPE_CONF_CHECK_M_N(dp_m_n);
11831
11832                 if (current_config->has_drrs)
11833                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11834         } else
11835                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11836
11837         PIPE_CONF_CHECK_X(output_types);
11838
11839         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11840         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11841         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11842         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11843         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11844         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11845
11846         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11847         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11848         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11849         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11850         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11851         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11852
11853         PIPE_CONF_CHECK_I(pixel_multiplier);
11854         PIPE_CONF_CHECK_I(has_hdmi_sink);
11855         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11856             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11857                 PIPE_CONF_CHECK_I(limited_color_range);
11858
11859         PIPE_CONF_CHECK_I(hdmi_scrambling);
11860         PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
11861         PIPE_CONF_CHECK_I(has_infoframe);
11862
11863         PIPE_CONF_CHECK_I(has_audio);
11864
11865         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11866                               DRM_MODE_FLAG_INTERLACE);
11867
11868         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11869                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11870                                       DRM_MODE_FLAG_PHSYNC);
11871                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11872                                       DRM_MODE_FLAG_NHSYNC);
11873                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11874                                       DRM_MODE_FLAG_PVSYNC);
11875                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11876                                       DRM_MODE_FLAG_NVSYNC);
11877         }
11878
11879         PIPE_CONF_CHECK_X(gmch_pfit.control);
11880         /* pfit ratios are autocomputed by the hw on gen4+ */
11881         if (INTEL_GEN(dev_priv) < 4)
11882                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11883         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11884
11885         if (!adjust) {
11886                 PIPE_CONF_CHECK_I(pipe_src_w);
11887                 PIPE_CONF_CHECK_I(pipe_src_h);
11888
11889                 PIPE_CONF_CHECK_I(pch_pfit.enabled);
11890                 if (current_config->pch_pfit.enabled) {
11891                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11892                         PIPE_CONF_CHECK_X(pch_pfit.size);
11893                 }
11894
11895                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11896                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11897         }
11898
11899         /* BDW+ don't expose a synchronous way to read the state */
11900         if (IS_HASWELL(dev_priv))
11901                 PIPE_CONF_CHECK_I(ips_enabled);
11902
11903         PIPE_CONF_CHECK_I(double_wide);
11904
11905         PIPE_CONF_CHECK_P(shared_dpll);
11906         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11907         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11908         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11909         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11910         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11911         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11912         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11913         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11914         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11915
11916         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11917         PIPE_CONF_CHECK_X(dsi_pll.div);
11918
11919         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11920                 PIPE_CONF_CHECK_I(pipe_bpp);
11921
11922         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11923         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11924
11925 #undef PIPE_CONF_CHECK_X
11926 #undef PIPE_CONF_CHECK_I
11927 #undef PIPE_CONF_CHECK_P
11928 #undef PIPE_CONF_CHECK_FLAGS
11929 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11930 #undef PIPE_CONF_QUIRK
11931
11932         return ret;
11933 }
11934
11935 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11936                                            const struct intel_crtc_state *pipe_config)
11937 {
11938         if (pipe_config->has_pch_encoder) {
11939                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11940                                                             &pipe_config->fdi_m_n);
11941                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11942
11943                 /*
11944                  * FDI already provided one idea for the dotclock.
11945                  * Yell if the encoder disagrees.
11946                  */
11947                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11948                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11949                      fdi_dotclock, dotclock);
11950         }
11951 }
11952
11953 static void verify_wm_state(struct drm_crtc *crtc,
11954                             struct drm_crtc_state *new_state)
11955 {
11956         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11957         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11958         struct skl_pipe_wm hw_wm, *sw_wm;
11959         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11960         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11961         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11962         const enum pipe pipe = intel_crtc->pipe;
11963         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11964
11965         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11966                 return;
11967
11968         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11969         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11970
11971         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11972         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11973
11974         /* planes */
11975         for_each_universal_plane(dev_priv, pipe, plane) {
11976                 hw_plane_wm = &hw_wm.planes[plane];
11977                 sw_plane_wm = &sw_wm->planes[plane];
11978
11979                 /* Watermarks */
11980                 for (level = 0; level <= max_level; level++) {
11981                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11982                                                 &sw_plane_wm->wm[level]))
11983                                 continue;
11984
11985                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11986                                   pipe_name(pipe), plane + 1, level,
11987                                   sw_plane_wm->wm[level].plane_en,
11988                                   sw_plane_wm->wm[level].plane_res_b,
11989                                   sw_plane_wm->wm[level].plane_res_l,
11990                                   hw_plane_wm->wm[level].plane_en,
11991                                   hw_plane_wm->wm[level].plane_res_b,
11992                                   hw_plane_wm->wm[level].plane_res_l);
11993                 }
11994
11995                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11996                                          &sw_plane_wm->trans_wm)) {
11997                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11998                                   pipe_name(pipe), plane + 1,
11999                                   sw_plane_wm->trans_wm.plane_en,
12000                                   sw_plane_wm->trans_wm.plane_res_b,
12001                                   sw_plane_wm->trans_wm.plane_res_l,
12002                                   hw_plane_wm->trans_wm.plane_en,
12003                                   hw_plane_wm->trans_wm.plane_res_b,
12004                                   hw_plane_wm->trans_wm.plane_res_l);
12005                 }
12006
12007                 /* DDB */
12008                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
12009                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
12010
12011                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12012                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12013                                   pipe_name(pipe), plane + 1,
12014                                   sw_ddb_entry->start, sw_ddb_entry->end,
12015                                   hw_ddb_entry->start, hw_ddb_entry->end);
12016                 }
12017         }
12018
12019         /*
12020          * cursor
12021          * If the cursor plane isn't active, we may not have updated it's ddb
12022          * allocation. In that case since the ddb allocation will be updated
12023          * once the plane becomes visible, we can skip this check
12024          */
12025         if (1) {
12026                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12027                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12028
12029                 /* Watermarks */
12030                 for (level = 0; level <= max_level; level++) {
12031                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12032                                                 &sw_plane_wm->wm[level]))
12033                                 continue;
12034
12035                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12036                                   pipe_name(pipe), level,
12037                                   sw_plane_wm->wm[level].plane_en,
12038                                   sw_plane_wm->wm[level].plane_res_b,
12039                                   sw_plane_wm->wm[level].plane_res_l,
12040                                   hw_plane_wm->wm[level].plane_en,
12041                                   hw_plane_wm->wm[level].plane_res_b,
12042                                   hw_plane_wm->wm[level].plane_res_l);
12043                 }
12044
12045                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12046                                          &sw_plane_wm->trans_wm)) {
12047                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12048                                   pipe_name(pipe),
12049                                   sw_plane_wm->trans_wm.plane_en,
12050                                   sw_plane_wm->trans_wm.plane_res_b,
12051                                   sw_plane_wm->trans_wm.plane_res_l,
12052                                   hw_plane_wm->trans_wm.plane_en,
12053                                   hw_plane_wm->trans_wm.plane_res_b,
12054                                   hw_plane_wm->trans_wm.plane_res_l);
12055                 }
12056
12057                 /* DDB */
12058                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12059                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12060
12061                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12062                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12063                                   pipe_name(pipe),
12064                                   sw_ddb_entry->start, sw_ddb_entry->end,
12065                                   hw_ddb_entry->start, hw_ddb_entry->end);
12066                 }
12067         }
12068 }
12069
12070 static void
12071 verify_connector_state(struct drm_device *dev,
12072                        struct drm_atomic_state *state,
12073                        struct drm_crtc *crtc)
12074 {
12075         struct drm_connector *connector;
12076         struct drm_connector_state *new_conn_state;
12077         int i;
12078
12079         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12080                 struct drm_encoder *encoder = connector->encoder;
12081
12082                 if (new_conn_state->crtc != crtc)
12083                         continue;
12084
12085                 intel_connector_verify_state(to_intel_connector(connector));
12086
12087                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12088                      "connector's atomic encoder doesn't match legacy encoder\n");
12089         }
12090 }
12091
12092 static void
12093 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12094 {
12095         struct intel_encoder *encoder;
12096         struct drm_connector *connector;
12097         struct drm_connector_state *old_conn_state, *new_conn_state;
12098         int i;
12099
12100         for_each_intel_encoder(dev, encoder) {
12101                 bool enabled = false, found = false;
12102                 enum pipe pipe;
12103
12104                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12105                               encoder->base.base.id,
12106                               encoder->base.name);
12107
12108                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12109                                                    new_conn_state, i) {
12110                         if (old_conn_state->best_encoder == &encoder->base)
12111                                 found = true;
12112
12113                         if (new_conn_state->best_encoder != &encoder->base)
12114                                 continue;
12115                         found = enabled = true;
12116
12117                         I915_STATE_WARN(new_conn_state->crtc !=
12118                                         encoder->base.crtc,
12119                              "connector's crtc doesn't match encoder crtc\n");
12120                 }
12121
12122                 if (!found)
12123                         continue;
12124
12125                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12126                      "encoder's enabled state mismatch "
12127                      "(expected %i, found %i)\n",
12128                      !!encoder->base.crtc, enabled);
12129
12130                 if (!encoder->base.crtc) {
12131                         bool active;
12132
12133                         active = encoder->get_hw_state(encoder, &pipe);
12134                         I915_STATE_WARN(active,
12135                              "encoder detached but still enabled on pipe %c.\n",
12136                              pipe_name(pipe));
12137                 }
12138         }
12139 }
12140
12141 static void
12142 verify_crtc_state(struct drm_crtc *crtc,
12143                   struct drm_crtc_state *old_crtc_state,
12144                   struct drm_crtc_state *new_crtc_state)
12145 {
12146         struct drm_device *dev = crtc->dev;
12147         struct drm_i915_private *dev_priv = to_i915(dev);
12148         struct intel_encoder *encoder;
12149         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12150         struct intel_crtc_state *pipe_config, *sw_config;
12151         struct drm_atomic_state *old_state;
12152         bool active;
12153
12154         old_state = old_crtc_state->state;
12155         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12156         pipe_config = to_intel_crtc_state(old_crtc_state);
12157         memset(pipe_config, 0, sizeof(*pipe_config));
12158         pipe_config->base.crtc = crtc;
12159         pipe_config->base.state = old_state;
12160
12161         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12162
12163         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12164
12165         /* hw state is inconsistent with the pipe quirk */
12166         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12167             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12168                 active = new_crtc_state->active;
12169
12170         I915_STATE_WARN(new_crtc_state->active != active,
12171              "crtc active state doesn't match with hw state "
12172              "(expected %i, found %i)\n", new_crtc_state->active, active);
12173
12174         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12175              "transitional active state does not match atomic hw state "
12176              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12177
12178         for_each_encoder_on_crtc(dev, crtc, encoder) {
12179                 enum pipe pipe;
12180
12181                 active = encoder->get_hw_state(encoder, &pipe);
12182                 I915_STATE_WARN(active != new_crtc_state->active,
12183                         "[ENCODER:%i] active %i with crtc active %i\n",
12184                         encoder->base.base.id, active, new_crtc_state->active);
12185
12186                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12187                                 "Encoder connected to wrong pipe %c\n",
12188                                 pipe_name(pipe));
12189
12190                 if (active) {
12191                         pipe_config->output_types |= 1 << encoder->type;
12192                         encoder->get_config(encoder, pipe_config);
12193                 }
12194         }
12195
12196         intel_crtc_compute_pixel_rate(pipe_config);
12197
12198         if (!new_crtc_state->active)
12199                 return;
12200
12201         intel_pipe_config_sanity_check(dev_priv, pipe_config);
12202
12203         sw_config = to_intel_crtc_state(crtc->state);
12204         if (!intel_pipe_config_compare(dev_priv, sw_config,
12205                                        pipe_config, false)) {
12206                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12207                 intel_dump_pipe_config(intel_crtc, pipe_config,
12208                                        "[hw state]");
12209                 intel_dump_pipe_config(intel_crtc, sw_config,
12210                                        "[sw state]");
12211         }
12212 }
12213
12214 static void
12215 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12216                          struct intel_shared_dpll *pll,
12217                          struct drm_crtc *crtc,
12218                          struct drm_crtc_state *new_state)
12219 {
12220         struct intel_dpll_hw_state dpll_hw_state;
12221         unsigned crtc_mask;
12222         bool active;
12223
12224         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12225
12226         DRM_DEBUG_KMS("%s\n", pll->name);
12227
12228         active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
12229
12230         if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
12231                 I915_STATE_WARN(!pll->on && pll->active_mask,
12232                      "pll in active use but not on in sw tracking\n");
12233                 I915_STATE_WARN(pll->on && !pll->active_mask,
12234                      "pll is on but not used by any active crtc\n");
12235                 I915_STATE_WARN(pll->on != active,
12236                      "pll on state mismatch (expected %i, found %i)\n",
12237                      pll->on, active);
12238         }
12239
12240         if (!crtc) {
12241                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12242                                 "more active pll users than references: %x vs %x\n",
12243                                 pll->active_mask, pll->state.crtc_mask);
12244
12245                 return;
12246         }
12247
12248         crtc_mask = 1 << drm_crtc_index(crtc);
12249
12250         if (new_state->active)
12251                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12252                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12253                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12254         else
12255                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12256                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12257                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12258
12259         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12260                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12261                         crtc_mask, pll->state.crtc_mask);
12262
12263         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12264                                           &dpll_hw_state,
12265                                           sizeof(dpll_hw_state)),
12266                         "pll hw state mismatch\n");
12267 }
12268
12269 static void
12270 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12271                          struct drm_crtc_state *old_crtc_state,
12272                          struct drm_crtc_state *new_crtc_state)
12273 {
12274         struct drm_i915_private *dev_priv = to_i915(dev);
12275         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12276         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12277
12278         if (new_state->shared_dpll)
12279                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12280
12281         if (old_state->shared_dpll &&
12282             old_state->shared_dpll != new_state->shared_dpll) {
12283                 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
12284                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12285
12286                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12287                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12288                                 pipe_name(drm_crtc_index(crtc)));
12289                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12290                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12291                                 pipe_name(drm_crtc_index(crtc)));
12292         }
12293 }
12294
12295 static void
12296 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12297                           struct drm_atomic_state *state,
12298                           struct drm_crtc_state *old_state,
12299                           struct drm_crtc_state *new_state)
12300 {
12301         if (!needs_modeset(new_state) &&
12302             !to_intel_crtc_state(new_state)->update_pipe)
12303                 return;
12304
12305         verify_wm_state(crtc, new_state);
12306         verify_connector_state(crtc->dev, state, crtc);
12307         verify_crtc_state(crtc, old_state, new_state);
12308         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12309 }
12310
12311 static void
12312 verify_disabled_dpll_state(struct drm_device *dev)
12313 {
12314         struct drm_i915_private *dev_priv = to_i915(dev);
12315         int i;
12316
12317         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12318                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12319 }
12320
12321 static void
12322 intel_modeset_verify_disabled(struct drm_device *dev,
12323                               struct drm_atomic_state *state)
12324 {
12325         verify_encoder_state(dev, state);
12326         verify_connector_state(dev, state, NULL);
12327         verify_disabled_dpll_state(dev);
12328 }
12329
12330 static void update_scanline_offset(struct intel_crtc *crtc)
12331 {
12332         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12333
12334         /*
12335          * The scanline counter increments at the leading edge of hsync.
12336          *
12337          * On most platforms it starts counting from vtotal-1 on the
12338          * first active line. That means the scanline counter value is
12339          * always one less than what we would expect. Ie. just after
12340          * start of vblank, which also occurs at start of hsync (on the
12341          * last active line), the scanline counter will read vblank_start-1.
12342          *
12343          * On gen2 the scanline counter starts counting from 1 instead
12344          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12345          * to keep the value positive), instead of adding one.
12346          *
12347          * On HSW+ the behaviour of the scanline counter depends on the output
12348          * type. For DP ports it behaves like most other platforms, but on HDMI
12349          * there's an extra 1 line difference. So we need to add two instead of
12350          * one to the value.
12351          */
12352         if (IS_GEN2(dev_priv)) {
12353                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12354                 int vtotal;
12355
12356                 vtotal = adjusted_mode->crtc_vtotal;
12357                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12358                         vtotal /= 2;
12359
12360                 crtc->scanline_offset = vtotal - 1;
12361         } else if (HAS_DDI(dev_priv) &&
12362                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12363                 crtc->scanline_offset = 2;
12364         } else
12365                 crtc->scanline_offset = 1;
12366 }
12367
12368 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12369 {
12370         struct drm_device *dev = state->dev;
12371         struct drm_i915_private *dev_priv = to_i915(dev);
12372         struct drm_crtc *crtc;
12373         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12374         int i;
12375
12376         if (!dev_priv->display.crtc_compute_clock)
12377                 return;
12378
12379         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12380                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12381                 struct intel_shared_dpll *old_dpll =
12382                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12383
12384                 if (!needs_modeset(new_crtc_state))
12385                         continue;
12386
12387                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12388
12389                 if (!old_dpll)
12390                         continue;
12391
12392                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12393         }
12394 }
12395
12396 /*
12397  * This implements the workaround described in the "notes" section of the mode
12398  * set sequence documentation. When going from no pipes or single pipe to
12399  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12400  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12401  */
12402 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12403 {
12404         struct drm_crtc_state *crtc_state;
12405         struct intel_crtc *intel_crtc;
12406         struct drm_crtc *crtc;
12407         struct intel_crtc_state *first_crtc_state = NULL;
12408         struct intel_crtc_state *other_crtc_state = NULL;
12409         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12410         int i;
12411
12412         /* look at all crtc's that are going to be enabled in during modeset */
12413         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12414                 intel_crtc = to_intel_crtc(crtc);
12415
12416                 if (!crtc_state->active || !needs_modeset(crtc_state))
12417                         continue;
12418
12419                 if (first_crtc_state) {
12420                         other_crtc_state = to_intel_crtc_state(crtc_state);
12421                         break;
12422                 } else {
12423                         first_crtc_state = to_intel_crtc_state(crtc_state);
12424                         first_pipe = intel_crtc->pipe;
12425                 }
12426         }
12427
12428         /* No workaround needed? */
12429         if (!first_crtc_state)
12430                 return 0;
12431
12432         /* w/a possibly needed, check how many crtc's are already enabled. */
12433         for_each_intel_crtc(state->dev, intel_crtc) {
12434                 struct intel_crtc_state *pipe_config;
12435
12436                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12437                 if (IS_ERR(pipe_config))
12438                         return PTR_ERR(pipe_config);
12439
12440                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12441
12442                 if (!pipe_config->base.active ||
12443                     needs_modeset(&pipe_config->base))
12444                         continue;
12445
12446                 /* 2 or more enabled crtcs means no need for w/a */
12447                 if (enabled_pipe != INVALID_PIPE)
12448                         return 0;
12449
12450                 enabled_pipe = intel_crtc->pipe;
12451         }
12452
12453         if (enabled_pipe != INVALID_PIPE)
12454                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12455         else if (other_crtc_state)
12456                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12457
12458         return 0;
12459 }
12460
12461 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12462 {
12463         struct drm_crtc *crtc;
12464
12465         /* Add all pipes to the state */
12466         for_each_crtc(state->dev, crtc) {
12467                 struct drm_crtc_state *crtc_state;
12468
12469                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12470                 if (IS_ERR(crtc_state))
12471                         return PTR_ERR(crtc_state);
12472         }
12473
12474         return 0;
12475 }
12476
12477 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12478 {
12479         struct drm_crtc *crtc;
12480
12481         /*
12482          * Add all pipes to the state, and force
12483          * a modeset on all the active ones.
12484          */
12485         for_each_crtc(state->dev, crtc) {
12486                 struct drm_crtc_state *crtc_state;
12487                 int ret;
12488
12489                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12490                 if (IS_ERR(crtc_state))
12491                         return PTR_ERR(crtc_state);
12492
12493                 if (!crtc_state->active || needs_modeset(crtc_state))
12494                         continue;
12495
12496                 crtc_state->mode_changed = true;
12497
12498                 ret = drm_atomic_add_affected_connectors(state, crtc);
12499                 if (ret)
12500                         return ret;
12501
12502                 ret = drm_atomic_add_affected_planes(state, crtc);
12503                 if (ret)
12504                         return ret;
12505         }
12506
12507         return 0;
12508 }
12509
12510 static int intel_modeset_checks(struct drm_atomic_state *state)
12511 {
12512         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12513         struct drm_i915_private *dev_priv = to_i915(state->dev);
12514         struct drm_crtc *crtc;
12515         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12516         int ret = 0, i;
12517
12518         if (!check_digital_port_conflicts(state)) {
12519                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12520                 return -EINVAL;
12521         }
12522
12523         intel_state->modeset = true;
12524         intel_state->active_crtcs = dev_priv->active_crtcs;
12525         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12526         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12527
12528         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12529                 if (new_crtc_state->active)
12530                         intel_state->active_crtcs |= 1 << i;
12531                 else
12532                         intel_state->active_crtcs &= ~(1 << i);
12533
12534                 if (old_crtc_state->active != new_crtc_state->active)
12535                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12536         }
12537
12538         /*
12539          * See if the config requires any additional preparation, e.g.
12540          * to adjust global state with pipes off.  We need to do this
12541          * here so we can get the modeset_pipe updated config for the new
12542          * mode set on this crtc.  For other crtcs we need to use the
12543          * adjusted_mode bits in the crtc directly.
12544          */
12545         if (dev_priv->display.modeset_calc_cdclk) {
12546                 ret = dev_priv->display.modeset_calc_cdclk(state);
12547                 if (ret < 0)
12548                         return ret;
12549
12550                 /*
12551                  * Writes to dev_priv->cdclk.logical must protected by
12552                  * holding all the crtc locks, even if we don't end up
12553                  * touching the hardware
12554                  */
12555                 if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
12556                                                &intel_state->cdclk.logical)) {
12557                         ret = intel_lock_all_pipes(state);
12558                         if (ret < 0)
12559                                 return ret;
12560                 }
12561
12562                 /* All pipes must be switched off while we change the cdclk. */
12563                 if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
12564                                                &intel_state->cdclk.actual)) {
12565                         ret = intel_modeset_all_pipes(state);
12566                         if (ret < 0)
12567                                 return ret;
12568                 }
12569
12570                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12571                               intel_state->cdclk.logical.cdclk,
12572                               intel_state->cdclk.actual.cdclk);
12573         } else {
12574                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12575         }
12576
12577         intel_modeset_clear_plls(state);
12578
12579         if (IS_HASWELL(dev_priv))
12580                 return haswell_mode_set_planes_workaround(state);
12581
12582         return 0;
12583 }
12584
12585 /*
12586  * Handle calculation of various watermark data at the end of the atomic check
12587  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12588  * handlers to ensure that all derived state has been updated.
12589  */
12590 static int calc_watermark_data(struct drm_atomic_state *state)
12591 {
12592         struct drm_device *dev = state->dev;
12593         struct drm_i915_private *dev_priv = to_i915(dev);
12594
12595         /* Is there platform-specific watermark information to calculate? */
12596         if (dev_priv->display.compute_global_watermarks)
12597                 return dev_priv->display.compute_global_watermarks(state);
12598
12599         return 0;
12600 }
12601
12602 /**
12603  * intel_atomic_check - validate state object
12604  * @dev: drm device
12605  * @state: state to validate
12606  */
12607 static int intel_atomic_check(struct drm_device *dev,
12608                               struct drm_atomic_state *state)
12609 {
12610         struct drm_i915_private *dev_priv = to_i915(dev);
12611         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12612         struct drm_crtc *crtc;
12613         struct drm_crtc_state *old_crtc_state, *crtc_state;
12614         int ret, i;
12615         bool any_ms = false;
12616
12617         ret = drm_atomic_helper_check_modeset(dev, state);
12618         if (ret)
12619                 return ret;
12620
12621         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12622                 struct intel_crtc_state *pipe_config =
12623                         to_intel_crtc_state(crtc_state);
12624
12625                 /* Catch I915_MODE_FLAG_INHERITED */
12626                 if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
12627                         crtc_state->mode_changed = true;
12628
12629                 if (!needs_modeset(crtc_state))
12630                         continue;
12631
12632                 if (!crtc_state->enable) {
12633                         any_ms = true;
12634                         continue;
12635                 }
12636
12637                 /* FIXME: For only active_changed we shouldn't need to do any
12638                  * state recomputation at all. */
12639
12640                 ret = drm_atomic_add_affected_connectors(state, crtc);
12641                 if (ret)
12642                         return ret;
12643
12644                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12645                 if (ret) {
12646                         intel_dump_pipe_config(to_intel_crtc(crtc),
12647                                                pipe_config, "[failed]");
12648                         return ret;
12649                 }
12650
12651                 if (i915.fastboot &&
12652                     intel_pipe_config_compare(dev_priv,
12653                                         to_intel_crtc_state(old_crtc_state),
12654                                         pipe_config, true)) {
12655                         crtc_state->mode_changed = false;
12656                         pipe_config->update_pipe = true;
12657                 }
12658
12659                 if (needs_modeset(crtc_state))
12660                         any_ms = true;
12661
12662                 ret = drm_atomic_add_affected_planes(state, crtc);
12663                 if (ret)
12664                         return ret;
12665
12666                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12667                                        needs_modeset(crtc_state) ?
12668                                        "[modeset]" : "[fastset]");
12669         }
12670
12671         if (any_ms) {
12672                 ret = intel_modeset_checks(state);
12673
12674                 if (ret)
12675                         return ret;
12676         } else {
12677                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12678         }
12679
12680         ret = drm_atomic_helper_check_planes(dev, state);
12681         if (ret)
12682                 return ret;
12683
12684         intel_fbc_choose_crtc(dev_priv, state);
12685         return calc_watermark_data(state);
12686 }
12687
12688 static int intel_atomic_prepare_commit(struct drm_device *dev,
12689                                        struct drm_atomic_state *state)
12690 {
12691         struct drm_i915_private *dev_priv = to_i915(dev);
12692         struct drm_crtc_state *crtc_state;
12693         struct drm_crtc *crtc;
12694         int i, ret;
12695
12696         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12697                 if (state->legacy_cursor_update)
12698                         continue;
12699
12700                 ret = intel_crtc_wait_for_pending_flips(crtc);
12701                 if (ret)
12702                         return ret;
12703
12704                 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
12705                         flush_workqueue(dev_priv->wq);
12706         }
12707
12708         ret = mutex_lock_interruptible(&dev->struct_mutex);
12709         if (ret)
12710                 return ret;
12711
12712         ret = drm_atomic_helper_prepare_planes(dev, state);
12713         mutex_unlock(&dev->struct_mutex);
12714
12715         return ret;
12716 }
12717
12718 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12719 {
12720         struct drm_device *dev = crtc->base.dev;
12721
12722         if (!dev->max_vblank_count)
12723                 return drm_accurate_vblank_count(&crtc->base);
12724
12725         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12726 }
12727
12728 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
12729                                           struct drm_i915_private *dev_priv,
12730                                           unsigned crtc_mask)
12731 {
12732         unsigned last_vblank_count[I915_MAX_PIPES];
12733         enum pipe pipe;
12734         int ret;
12735
12736         if (!crtc_mask)
12737                 return;
12738
12739         for_each_pipe(dev_priv, pipe) {
12740                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
12741                                                                   pipe);
12742
12743                 if (!((1 << pipe) & crtc_mask))
12744                         continue;
12745
12746                 ret = drm_crtc_vblank_get(&crtc->base);
12747                 if (WARN_ON(ret != 0)) {
12748                         crtc_mask &= ~(1 << pipe);
12749                         continue;
12750                 }
12751
12752                 last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
12753         }
12754
12755         for_each_pipe(dev_priv, pipe) {
12756                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
12757                                                                   pipe);
12758                 long lret;
12759
12760                 if (!((1 << pipe) & crtc_mask))
12761                         continue;
12762
12763                 lret = wait_event_timeout(dev->vblank[pipe].queue,
12764                                 last_vblank_count[pipe] !=
12765                                         drm_crtc_vblank_count(&crtc->base),
12766                                 msecs_to_jiffies(50));
12767
12768                 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
12769
12770                 drm_crtc_vblank_put(&crtc->base);
12771         }
12772 }
12773
12774 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
12775 {
12776         /* fb updated, need to unpin old fb */
12777         if (crtc_state->fb_changed)
12778                 return true;
12779
12780         /* wm changes, need vblank before final wm's */
12781         if (crtc_state->update_wm_post)
12782                 return true;
12783
12784         if (crtc_state->wm.need_postvbl_update)
12785                 return true;
12786
12787         return false;
12788 }
12789
12790 static void intel_update_crtc(struct drm_crtc *crtc,
12791                               struct drm_atomic_state *state,
12792                               struct drm_crtc_state *old_crtc_state,
12793                               struct drm_crtc_state *new_crtc_state,
12794                               unsigned int *crtc_vblank_mask)
12795 {
12796         struct drm_device *dev = crtc->dev;
12797         struct drm_i915_private *dev_priv = to_i915(dev);
12798         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12799         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12800         bool modeset = needs_modeset(new_crtc_state);
12801
12802         if (modeset) {
12803                 update_scanline_offset(intel_crtc);
12804                 dev_priv->display.crtc_enable(pipe_config, state);
12805         } else {
12806                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12807                                        pipe_config);
12808         }
12809
12810         if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12811                 intel_fbc_enable(
12812                     intel_crtc, pipe_config,
12813                     to_intel_plane_state(crtc->primary->state));
12814         }
12815
12816         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
12817
12818         if (needs_vblank_wait(pipe_config))
12819                 *crtc_vblank_mask |= drm_crtc_mask(crtc);
12820 }
12821
12822 static void intel_update_crtcs(struct drm_atomic_state *state,
12823                                unsigned int *crtc_vblank_mask)
12824 {
12825         struct drm_crtc *crtc;
12826         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12827         int i;
12828
12829         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12830                 if (!new_crtc_state->active)
12831                         continue;
12832
12833                 intel_update_crtc(crtc, state, old_crtc_state,
12834                                   new_crtc_state, crtc_vblank_mask);
12835         }
12836 }
12837
12838 static void skl_update_crtcs(struct drm_atomic_state *state,
12839                              unsigned int *crtc_vblank_mask)
12840 {
12841         struct drm_i915_private *dev_priv = to_i915(state->dev);
12842         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12843         struct drm_crtc *crtc;
12844         struct intel_crtc *intel_crtc;
12845         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12846         struct intel_crtc_state *cstate;
12847         unsigned int updated = 0;
12848         bool progress;
12849         enum pipe pipe;
12850         int i;
12851
12852         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12853
12854         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12855                 /* ignore allocations for crtc's that have been turned off. */
12856                 if (new_crtc_state->active)
12857                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12858
12859         /*
12860          * Whenever the number of active pipes changes, we need to make sure we
12861          * update the pipes in the right order so that their ddb allocations
12862          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12863          * cause pipe underruns and other bad stuff.
12864          */
12865         do {
12866                 progress = false;
12867
12868                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12869                         bool vbl_wait = false;
12870                         unsigned int cmask = drm_crtc_mask(crtc);
12871
12872                         intel_crtc = to_intel_crtc(crtc);
12873                         cstate = to_intel_crtc_state(crtc->state);
12874                         pipe = intel_crtc->pipe;
12875
12876                         if (updated & cmask || !cstate->base.active)
12877                                 continue;
12878
12879                         if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
12880                                 continue;
12881
12882                         updated |= cmask;
12883                         entries[i] = &cstate->wm.skl.ddb;
12884
12885                         /*
12886                          * If this is an already active pipe, it's DDB changed,
12887                          * and this isn't the last pipe that needs updating
12888                          * then we need to wait for a vblank to pass for the
12889                          * new ddb allocation to take effect.
12890                          */
12891                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12892                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12893                             !new_crtc_state->active_changed &&
12894                             intel_state->wm_results.dirty_pipes != updated)
12895                                 vbl_wait = true;
12896
12897                         intel_update_crtc(crtc, state, old_crtc_state,
12898                                           new_crtc_state, crtc_vblank_mask);
12899
12900                         if (vbl_wait)
12901                                 intel_wait_for_vblank(dev_priv, pipe);
12902
12903                         progress = true;
12904                 }
12905         } while (progress);
12906 }
12907
12908 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12909 {
12910         struct intel_atomic_state *state, *next;
12911         struct llist_node *freed;
12912
12913         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12914         llist_for_each_entry_safe(state, next, freed, freed)
12915                 drm_atomic_state_put(&state->base);
12916 }
12917
12918 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12919 {
12920         struct drm_i915_private *dev_priv =
12921                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12922
12923         intel_atomic_helper_free_state(dev_priv);
12924 }
12925
12926 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12927 {
12928         struct drm_device *dev = state->dev;
12929         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12930         struct drm_i915_private *dev_priv = to_i915(dev);
12931         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12932         struct drm_crtc *crtc;
12933         struct intel_crtc_state *intel_cstate;
12934         bool hw_check = intel_state->modeset;
12935         u64 put_domains[I915_MAX_PIPES] = {};
12936         unsigned crtc_vblank_mask = 0;
12937         int i;
12938
12939         drm_atomic_helper_wait_for_dependencies(state);
12940
12941         if (intel_state->modeset)
12942                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12943
12944         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12945                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12946
12947                 if (needs_modeset(new_crtc_state) ||
12948                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12949                         hw_check = true;
12950
12951                         put_domains[to_intel_crtc(crtc)->pipe] =
12952                                 modeset_get_crtc_power_domains(crtc,
12953                                         to_intel_crtc_state(new_crtc_state));
12954                 }
12955
12956                 if (!needs_modeset(new_crtc_state))
12957                         continue;
12958
12959                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12960                                        to_intel_crtc_state(new_crtc_state));
12961
12962                 if (old_crtc_state->active) {
12963                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
12964                         dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
12965                         intel_crtc->active = false;
12966                         intel_fbc_disable(intel_crtc);
12967                         intel_disable_shared_dpll(intel_crtc);
12968
12969                         /*
12970                          * Underruns don't always raise
12971                          * interrupts, so check manually.
12972                          */
12973                         intel_check_cpu_fifo_underruns(dev_priv);
12974                         intel_check_pch_fifo_underruns(dev_priv);
12975
12976                         if (!crtc->state->active) {
12977                                 /*
12978                                  * Make sure we don't call initial_watermarks
12979                                  * for ILK-style watermark updates.
12980                                  *
12981                                  * No clue what this is supposed to achieve.
12982                                  */
12983                                 if (INTEL_GEN(dev_priv) >= 9)
12984                                         dev_priv->display.initial_watermarks(intel_state,
12985                                                                              to_intel_crtc_state(crtc->state));
12986                         }
12987                 }
12988         }
12989
12990         /* Only after disabling all output pipelines that will be changed can we
12991          * update the the output configuration. */
12992         intel_modeset_update_crtc_state(state);
12993
12994         if (intel_state->modeset) {
12995                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12996
12997                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12998
12999                 /*
13000                  * SKL workaround: bspec recommends we disable the SAGV when we
13001                  * have more then one pipe enabled
13002                  */
13003                 if (!intel_can_enable_sagv(state))
13004                         intel_disable_sagv(dev_priv);
13005
13006                 intel_modeset_verify_disabled(dev, state);
13007         }
13008
13009         /* Complete the events for pipes that have now been disabled */
13010         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13011                 bool modeset = needs_modeset(new_crtc_state);
13012
13013                 /* Complete events for now disable pipes here. */
13014                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13015                         spin_lock_irq(&dev->event_lock);
13016                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13017                         spin_unlock_irq(&dev->event_lock);
13018
13019                         new_crtc_state->event = NULL;
13020                 }
13021         }
13022
13023         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13024         dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
13025
13026         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13027          * already, but still need the state for the delayed optimization. To
13028          * fix this:
13029          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13030          * - schedule that vblank worker _before_ calling hw_done
13031          * - at the start of commit_tail, cancel it _synchrously
13032          * - switch over to the vblank wait helper in the core after that since
13033          *   we don't need out special handling any more.
13034          */
13035         if (!state->legacy_cursor_update)
13036                 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13037
13038         /*
13039          * Now that the vblank has passed, we can go ahead and program the
13040          * optimal watermarks on platforms that need two-step watermark
13041          * programming.
13042          *
13043          * TODO: Move this (and other cleanup) to an async worker eventually.
13044          */
13045         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13046                 intel_cstate = to_intel_crtc_state(new_crtc_state);
13047
13048                 if (dev_priv->display.optimize_watermarks)
13049                         dev_priv->display.optimize_watermarks(intel_state,
13050                                                               intel_cstate);
13051         }
13052
13053         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13054                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13055
13056                 if (put_domains[i])
13057                         modeset_put_power_domains(dev_priv, put_domains[i]);
13058
13059                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13060         }
13061
13062         if (intel_state->modeset && intel_can_enable_sagv(state))
13063                 intel_enable_sagv(dev_priv);
13064
13065         drm_atomic_helper_commit_hw_done(state);
13066
13067         if (intel_state->modeset)
13068                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13069
13070         mutex_lock(&dev->struct_mutex);
13071         drm_atomic_helper_cleanup_planes(dev, state);
13072         mutex_unlock(&dev->struct_mutex);
13073
13074         drm_atomic_helper_commit_cleanup_done(state);
13075
13076         drm_atomic_state_put(state);
13077
13078         /* As one of the primary mmio accessors, KMS has a high likelihood
13079          * of triggering bugs in unclaimed access. After we finish
13080          * modesetting, see if an error has been flagged, and if so
13081          * enable debugging for the next modeset - and hope we catch
13082          * the culprit.
13083          *
13084          * XXX note that we assume display power is on at this point.
13085          * This might hold true now but we need to add pm helper to check
13086          * unclaimed only when the hardware is on, as atomic commits
13087          * can happen also when the device is completely off.
13088          */
13089         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13090
13091         intel_atomic_helper_free_state(dev_priv);
13092 }
13093
13094 static void intel_atomic_commit_work(struct work_struct *work)
13095 {
13096         struct drm_atomic_state *state =
13097                 container_of(work, struct drm_atomic_state, commit_work);
13098
13099         intel_atomic_commit_tail(state);
13100 }
13101
13102 static int __i915_sw_fence_call
13103 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13104                           enum i915_sw_fence_notify notify)
13105 {
13106         struct intel_atomic_state *state =
13107                 container_of(fence, struct intel_atomic_state, commit_ready);
13108
13109         switch (notify) {
13110         case FENCE_COMPLETE:
13111                 if (state->base.commit_work.func)
13112                         queue_work(system_unbound_wq, &state->base.commit_work);
13113                 break;
13114
13115         case FENCE_FREE:
13116                 {
13117                         struct intel_atomic_helper *helper =
13118                                 &to_i915(state->base.dev)->atomic_helper;
13119
13120                         if (llist_add(&state->freed, &helper->free_list))
13121                                 schedule_work(&helper->free_work);
13122                         break;
13123                 }
13124         }
13125
13126         return NOTIFY_DONE;
13127 }
13128
13129 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13130 {
13131         struct drm_plane_state *old_plane_state, *new_plane_state;
13132         struct drm_plane *plane;
13133         int i;
13134
13135         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13136                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13137                                   intel_fb_obj(new_plane_state->fb),
13138                                   to_intel_plane(plane)->frontbuffer_bit);
13139 }
13140
13141 /**
13142  * intel_atomic_commit - commit validated state object
13143  * @dev: DRM device
13144  * @state: the top-level driver state object
13145  * @nonblock: nonblocking commit
13146  *
13147  * This function commits a top-level state object that has been validated
13148  * with drm_atomic_helper_check().
13149  *
13150  * RETURNS
13151  * Zero for success or -errno.
13152  */
13153 static int intel_atomic_commit(struct drm_device *dev,
13154                                struct drm_atomic_state *state,
13155                                bool nonblock)
13156 {
13157         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13158         struct drm_i915_private *dev_priv = to_i915(dev);
13159         int ret = 0;
13160
13161         ret = drm_atomic_helper_setup_commit(state, nonblock);
13162         if (ret)
13163                 return ret;
13164
13165         drm_atomic_state_get(state);
13166         i915_sw_fence_init(&intel_state->commit_ready,
13167                            intel_atomic_commit_ready);
13168
13169         ret = intel_atomic_prepare_commit(dev, state);
13170         if (ret) {
13171                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13172                 i915_sw_fence_commit(&intel_state->commit_ready);
13173                 return ret;
13174         }
13175
13176         /*
13177          * The intel_legacy_cursor_update() fast path takes care
13178          * of avoiding the vblank waits for simple cursor
13179          * movement and flips. For cursor on/off and size changes,
13180          * we want to perform the vblank waits so that watermark
13181          * updates happen during the correct frames. Gen9+ have
13182          * double buffered watermarks and so shouldn't need this.
13183          *
13184          * Do this after drm_atomic_helper_setup_commit() and
13185          * intel_atomic_prepare_commit() because we still want
13186          * to skip the flip and fb cleanup waits. Although that
13187          * does risk yanking the mapping from under the display
13188          * engine.
13189          *
13190          * FIXME doing watermarks and fb cleanup from a vblank worker
13191          * (assuming we had any) would solve these problems.
13192          */
13193         if (INTEL_GEN(dev_priv) < 9)
13194                 state->legacy_cursor_update = false;
13195
13196         drm_atomic_helper_swap_state(state, true);
13197         dev_priv->wm.distrust_bios_wm = false;
13198         intel_shared_dpll_swap_state(state);
13199         intel_atomic_track_fbs(state);
13200
13201         if (intel_state->modeset) {
13202                 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13203                        sizeof(intel_state->min_pixclk));
13204                 dev_priv->active_crtcs = intel_state->active_crtcs;
13205                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13206                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
13207         }
13208
13209         drm_atomic_state_get(state);
13210         INIT_WORK(&state->commit_work,
13211                   nonblock ? intel_atomic_commit_work : NULL);
13212
13213         i915_sw_fence_commit(&intel_state->commit_ready);
13214         if (!nonblock) {
13215                 i915_sw_fence_wait(&intel_state->commit_ready);
13216                 intel_atomic_commit_tail(state);
13217         }
13218
13219         return 0;
13220 }
13221
13222 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13223 {
13224         struct drm_device *dev = crtc->dev;
13225         struct drm_atomic_state *state;
13226         struct drm_crtc_state *crtc_state;
13227         int ret;
13228
13229         state = drm_atomic_state_alloc(dev);
13230         if (!state) {
13231                 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13232                               crtc->base.id, crtc->name);
13233                 return;
13234         }
13235
13236         state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
13237
13238 retry:
13239         crtc_state = drm_atomic_get_crtc_state(state, crtc);
13240         ret = PTR_ERR_OR_ZERO(crtc_state);
13241         if (!ret) {
13242                 if (!crtc_state->active)
13243                         goto out;
13244
13245                 crtc_state->mode_changed = true;
13246                 ret = drm_atomic_commit(state);
13247         }
13248
13249         if (ret == -EDEADLK) {
13250                 drm_atomic_state_clear(state);
13251                 drm_modeset_backoff(state->acquire_ctx);
13252                 goto retry;
13253         }
13254
13255 out:
13256         drm_atomic_state_put(state);
13257 }
13258
13259 static const struct drm_crtc_funcs intel_crtc_funcs = {
13260         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13261         .set_config = drm_atomic_helper_set_config,
13262         .set_property = drm_atomic_helper_crtc_set_property,
13263         .destroy = intel_crtc_destroy,
13264         .page_flip = drm_atomic_helper_page_flip,
13265         .atomic_duplicate_state = intel_crtc_duplicate_state,
13266         .atomic_destroy_state = intel_crtc_destroy_state,
13267         .set_crc_source = intel_crtc_set_crc_source,
13268 };
13269
13270 /**
13271  * intel_prepare_plane_fb - Prepare fb for usage on plane
13272  * @plane: drm plane to prepare for
13273  * @fb: framebuffer to prepare for presentation
13274  *
13275  * Prepares a framebuffer for usage on a display plane.  Generally this
13276  * involves pinning the underlying object and updating the frontbuffer tracking
13277  * bits.  Some older platforms need special physical address handling for
13278  * cursor planes.
13279  *
13280  * Must be called with struct_mutex held.
13281  *
13282  * Returns 0 on success, negative error code on failure.
13283  */
13284 int
13285 intel_prepare_plane_fb(struct drm_plane *plane,
13286                        struct drm_plane_state *new_state)
13287 {
13288         struct intel_atomic_state *intel_state =
13289                 to_intel_atomic_state(new_state->state);
13290         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13291         struct drm_framebuffer *fb = new_state->fb;
13292         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13293         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13294         int ret;
13295
13296         if (obj) {
13297                 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13298                     INTEL_INFO(dev_priv)->cursor_needs_physical) {
13299                         const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
13300
13301                         ret = i915_gem_object_attach_phys(obj, align);
13302                         if (ret) {
13303                                 DRM_DEBUG_KMS("failed to attach phys object\n");
13304                                 return ret;
13305                         }
13306                 } else {
13307                         struct i915_vma *vma;
13308
13309                         vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
13310                         if (IS_ERR(vma)) {
13311                                 DRM_DEBUG_KMS("failed to pin object\n");
13312                                 return PTR_ERR(vma);
13313                         }
13314
13315                         to_intel_plane_state(new_state)->vma = vma;
13316                 }
13317         }
13318
13319         if (!obj && !old_obj)
13320                 return 0;
13321
13322         if (old_obj) {
13323                 struct drm_crtc_state *crtc_state =
13324                         drm_atomic_get_existing_crtc_state(new_state->state,
13325                                                            plane->state->crtc);
13326
13327                 /* Big Hammer, we also need to ensure that any pending
13328                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13329                  * current scanout is retired before unpinning the old
13330                  * framebuffer. Note that we rely on userspace rendering
13331                  * into the buffer attached to the pipe they are waiting
13332                  * on. If not, userspace generates a GPU hang with IPEHR
13333                  * point to the MI_WAIT_FOR_EVENT.
13334                  *
13335                  * This should only fail upon a hung GPU, in which case we
13336                  * can safely continue.
13337                  */
13338                 if (needs_modeset(crtc_state)) {
13339                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13340                                                               old_obj->resv, NULL,
13341                                                               false, 0,
13342                                                               GFP_KERNEL);
13343                         if (ret < 0)
13344                                 return ret;
13345                 }
13346         }
13347
13348         if (new_state->fence) { /* explicit fencing */
13349                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13350                                                     new_state->fence,
13351                                                     I915_FENCE_TIMEOUT,
13352                                                     GFP_KERNEL);
13353                 if (ret < 0)
13354                         return ret;
13355         }
13356
13357         if (!obj)
13358                 return 0;
13359
13360         if (!new_state->fence) { /* implicit fencing */
13361                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13362                                                       obj->resv, NULL,
13363                                                       false, I915_FENCE_TIMEOUT,
13364                                                       GFP_KERNEL);
13365                 if (ret < 0)
13366                         return ret;
13367
13368                 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
13369         }
13370
13371         return 0;
13372 }
13373
13374 /**
13375  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13376  * @plane: drm plane to clean up for
13377  * @fb: old framebuffer that was on plane
13378  *
13379  * Cleans up a framebuffer that has just been removed from a plane.
13380  *
13381  * Must be called with struct_mutex held.
13382  */
13383 void
13384 intel_cleanup_plane_fb(struct drm_plane *plane,
13385                        struct drm_plane_state *old_state)
13386 {
13387         struct i915_vma *vma;
13388
13389         /* Should only be called after a successful intel_prepare_plane_fb()! */
13390         vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
13391         if (vma)
13392                 intel_unpin_fb_vma(vma);
13393 }
13394
13395 int
13396 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13397 {
13398         struct drm_i915_private *dev_priv;
13399         int max_scale;
13400         int crtc_clock, max_dotclk;
13401
13402         if (!intel_crtc || !crtc_state->base.enable)
13403                 return DRM_PLANE_HELPER_NO_SCALING;
13404
13405         dev_priv = to_i915(intel_crtc->base.dev);
13406
13407         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13408         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13409
13410         if (IS_GEMINILAKE(dev_priv))
13411                 max_dotclk *= 2;
13412
13413         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13414                 return DRM_PLANE_HELPER_NO_SCALING;
13415
13416         /*
13417          * skl max scale is lower of:
13418          *    close to 3 but not 3, -1 is for that purpose
13419          *            or
13420          *    cdclk/crtc_clock
13421          */
13422         max_scale = min((1 << 16) * 3 - 1,
13423                         (1 << 8) * ((max_dotclk << 8) / crtc_clock));
13424
13425         return max_scale;
13426 }
13427
13428 static int
13429 intel_check_primary_plane(struct intel_plane *plane,
13430                           struct intel_crtc_state *crtc_state,
13431                           struct intel_plane_state *state)
13432 {
13433         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13434         struct drm_crtc *crtc = state->base.crtc;
13435         int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13436         int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13437         bool can_position = false;
13438         int ret;
13439
13440         if (INTEL_GEN(dev_priv) >= 9) {
13441                 /* use scaler when colorkey is not required */
13442                 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13443                         min_scale = 1;
13444                         max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13445                 }
13446                 can_position = true;
13447         }
13448
13449         ret = drm_plane_helper_check_state(&state->base,
13450                                            &state->clip,
13451                                            min_scale, max_scale,
13452                                            can_position, true);
13453         if (ret)
13454                 return ret;
13455
13456         if (!state->base.fb)
13457                 return 0;
13458
13459         if (INTEL_GEN(dev_priv) >= 9) {
13460                 ret = skl_check_plane_surface(state);
13461                 if (ret)
13462                         return ret;
13463
13464                 state->ctl = skl_plane_ctl(crtc_state, state);
13465         } else {
13466                 ret = i9xx_check_plane_surface(state);
13467                 if (ret)
13468                         return ret;
13469
13470                 state->ctl = i9xx_plane_ctl(crtc_state, state);
13471         }
13472
13473         return 0;
13474 }
13475
13476 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13477                                     struct drm_crtc_state *old_crtc_state)
13478 {
13479         struct drm_device *dev = crtc->dev;
13480         struct drm_i915_private *dev_priv = to_i915(dev);
13481         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13482         struct intel_crtc_state *intel_cstate =
13483                 to_intel_crtc_state(crtc->state);
13484         struct intel_crtc_state *old_intel_cstate =
13485                 to_intel_crtc_state(old_crtc_state);
13486         struct intel_atomic_state *old_intel_state =
13487                 to_intel_atomic_state(old_crtc_state->state);
13488         bool modeset = needs_modeset(crtc->state);
13489
13490         if (!modeset &&
13491             (intel_cstate->base.color_mgmt_changed ||
13492              intel_cstate->update_pipe)) {
13493                 intel_color_set_csc(crtc->state);
13494                 intel_color_load_luts(crtc->state);
13495         }
13496
13497         /* Perform vblank evasion around commit operation */
13498         intel_pipe_update_start(intel_crtc);
13499
13500         if (modeset)
13501                 goto out;
13502
13503         if (intel_cstate->update_pipe)
13504                 intel_update_pipe_config(intel_crtc, old_intel_cstate);
13505         else if (INTEL_GEN(dev_priv) >= 9)
13506                 skl_detach_scalers(intel_crtc);
13507
13508 out:
13509         if (dev_priv->display.atomic_update_watermarks)
13510                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13511                                                            intel_cstate);
13512 }
13513
13514 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13515                                      struct drm_crtc_state *old_crtc_state)
13516 {
13517         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13518
13519         intel_pipe_update_end(intel_crtc, NULL);
13520 }
13521
13522 /**
13523  * intel_plane_destroy - destroy a plane
13524  * @plane: plane to destroy
13525  *
13526  * Common destruction function for all types of planes (primary, cursor,
13527  * sprite).
13528  */
13529 void intel_plane_destroy(struct drm_plane *plane)
13530 {
13531         drm_plane_cleanup(plane);
13532         kfree(to_intel_plane(plane));
13533 }
13534
13535 const struct drm_plane_funcs intel_plane_funcs = {
13536         .update_plane = drm_atomic_helper_update_plane,
13537         .disable_plane = drm_atomic_helper_disable_plane,
13538         .destroy = intel_plane_destroy,
13539         .set_property = drm_atomic_helper_plane_set_property,
13540         .atomic_get_property = intel_plane_atomic_get_property,
13541         .atomic_set_property = intel_plane_atomic_set_property,
13542         .atomic_duplicate_state = intel_plane_duplicate_state,
13543         .atomic_destroy_state = intel_plane_destroy_state,
13544 };
13545
13546 static int
13547 intel_legacy_cursor_update(struct drm_plane *plane,
13548                            struct drm_crtc *crtc,
13549                            struct drm_framebuffer *fb,
13550                            int crtc_x, int crtc_y,
13551                            unsigned int crtc_w, unsigned int crtc_h,
13552                            uint32_t src_x, uint32_t src_y,
13553                            uint32_t src_w, uint32_t src_h,
13554                            struct drm_modeset_acquire_ctx *ctx)
13555 {
13556         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13557         int ret;
13558         struct drm_plane_state *old_plane_state, *new_plane_state;
13559         struct intel_plane *intel_plane = to_intel_plane(plane);
13560         struct drm_framebuffer *old_fb;
13561         struct drm_crtc_state *crtc_state = crtc->state;
13562         struct i915_vma *old_vma;
13563
13564         /*
13565          * When crtc is inactive or there is a modeset pending,
13566          * wait for it to complete in the slowpath
13567          */
13568         if (!crtc_state->active || needs_modeset(crtc_state) ||
13569             to_intel_crtc_state(crtc_state)->update_pipe)
13570                 goto slow;
13571
13572         old_plane_state = plane->state;
13573
13574         /*
13575          * If any parameters change that may affect watermarks,
13576          * take the slowpath. Only changing fb or position should be
13577          * in the fastpath.
13578          */
13579         if (old_plane_state->crtc != crtc ||
13580             old_plane_state->src_w != src_w ||
13581             old_plane_state->src_h != src_h ||
13582             old_plane_state->crtc_w != crtc_w ||
13583             old_plane_state->crtc_h != crtc_h ||
13584             !old_plane_state->fb != !fb)
13585                 goto slow;
13586
13587         new_plane_state = intel_plane_duplicate_state(plane);
13588         if (!new_plane_state)
13589                 return -ENOMEM;
13590
13591         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13592
13593         new_plane_state->src_x = src_x;
13594         new_plane_state->src_y = src_y;
13595         new_plane_state->src_w = src_w;
13596         new_plane_state->src_h = src_h;
13597         new_plane_state->crtc_x = crtc_x;
13598         new_plane_state->crtc_y = crtc_y;
13599         new_plane_state->crtc_w = crtc_w;
13600         new_plane_state->crtc_h = crtc_h;
13601
13602         ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
13603                                                   to_intel_plane_state(new_plane_state));
13604         if (ret)
13605                 goto out_free;
13606
13607         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13608         if (ret)
13609                 goto out_free;
13610
13611         if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
13612                 int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
13613
13614                 ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
13615                 if (ret) {
13616                         DRM_DEBUG_KMS("failed to attach phys object\n");
13617                         goto out_unlock;
13618                 }
13619         } else {
13620                 struct i915_vma *vma;
13621
13622                 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
13623                 if (IS_ERR(vma)) {
13624                         DRM_DEBUG_KMS("failed to pin object\n");
13625
13626                         ret = PTR_ERR(vma);
13627                         goto out_unlock;
13628                 }
13629
13630                 to_intel_plane_state(new_plane_state)->vma = vma;
13631         }
13632
13633         old_fb = old_plane_state->fb;
13634         old_vma = to_intel_plane_state(old_plane_state)->vma;
13635
13636         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13637                           intel_plane->frontbuffer_bit);
13638
13639         /* Swap plane state */
13640         new_plane_state->fence = old_plane_state->fence;
13641         *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
13642         new_plane_state->fence = NULL;
13643         new_plane_state->fb = old_fb;
13644         to_intel_plane_state(new_plane_state)->vma = old_vma;
13645
13646         if (plane->state->visible) {
13647                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13648                 intel_plane->update_plane(intel_plane,
13649                                           to_intel_crtc_state(crtc->state),
13650                                           to_intel_plane_state(plane->state));
13651         } else {
13652                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13653                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13654         }
13655
13656         intel_cleanup_plane_fb(plane, new_plane_state);
13657
13658 out_unlock:
13659         mutex_unlock(&dev_priv->drm.struct_mutex);
13660 out_free:
13661         intel_plane_destroy_state(plane, new_plane_state);
13662         return ret;
13663
13664 slow:
13665         return drm_atomic_helper_update_plane(plane, crtc, fb,
13666                                               crtc_x, crtc_y, crtc_w, crtc_h,
13667                                               src_x, src_y, src_w, src_h, ctx);
13668 }
13669
13670 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13671         .update_plane = intel_legacy_cursor_update,
13672         .disable_plane = drm_atomic_helper_disable_plane,
13673         .destroy = intel_plane_destroy,
13674         .set_property = drm_atomic_helper_plane_set_property,
13675         .atomic_get_property = intel_plane_atomic_get_property,
13676         .atomic_set_property = intel_plane_atomic_set_property,
13677         .atomic_duplicate_state = intel_plane_duplicate_state,
13678         .atomic_destroy_state = intel_plane_destroy_state,
13679 };
13680
13681 static struct intel_plane *
13682 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13683 {
13684         struct intel_plane *primary = NULL;
13685         struct intel_plane_state *state = NULL;
13686         const uint32_t *intel_primary_formats;
13687         unsigned int supported_rotations;
13688         unsigned int num_formats;
13689         int ret;
13690
13691         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13692         if (!primary) {
13693                 ret = -ENOMEM;
13694                 goto fail;
13695         }
13696
13697         state = intel_create_plane_state(&primary->base);
13698         if (!state) {
13699                 ret = -ENOMEM;
13700                 goto fail;
13701         }
13702
13703         primary->base.state = &state->base;
13704
13705         primary->can_scale = false;
13706         primary->max_downscale = 1;
13707         if (INTEL_GEN(dev_priv) >= 9) {
13708                 primary->can_scale = true;
13709                 state->scaler_id = -1;
13710         }
13711         primary->pipe = pipe;
13712         /*
13713          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13714          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13715          */
13716         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13717                 primary->plane = (enum plane) !pipe;
13718         else
13719                 primary->plane = (enum plane) pipe;
13720         primary->id = PLANE_PRIMARY;
13721         primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13722         primary->check_plane = intel_check_primary_plane;
13723
13724         if (INTEL_GEN(dev_priv) >= 9) {
13725                 intel_primary_formats = skl_primary_formats;
13726                 num_formats = ARRAY_SIZE(skl_primary_formats);
13727
13728                 primary->update_plane = skylake_update_primary_plane;
13729                 primary->disable_plane = skylake_disable_primary_plane;
13730         } else if (INTEL_GEN(dev_priv) >= 4) {
13731                 intel_primary_formats = i965_primary_formats;
13732                 num_formats = ARRAY_SIZE(i965_primary_formats);
13733
13734                 primary->update_plane = i9xx_update_primary_plane;
13735                 primary->disable_plane = i9xx_disable_primary_plane;
13736         } else {
13737                 intel_primary_formats = i8xx_primary_formats;
13738                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13739
13740                 primary->update_plane = i9xx_update_primary_plane;
13741                 primary->disable_plane = i9xx_disable_primary_plane;
13742         }
13743
13744         if (INTEL_GEN(dev_priv) >= 9)
13745                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13746                                                0, &intel_plane_funcs,
13747                                                intel_primary_formats, num_formats,
13748                                                DRM_PLANE_TYPE_PRIMARY,
13749                                                "plane 1%c", pipe_name(pipe));
13750         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13751                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13752                                                0, &intel_plane_funcs,
13753                                                intel_primary_formats, num_formats,
13754                                                DRM_PLANE_TYPE_PRIMARY,
13755                                                "primary %c", pipe_name(pipe));
13756         else
13757                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13758                                                0, &intel_plane_funcs,
13759                                                intel_primary_formats, num_formats,
13760                                                DRM_PLANE_TYPE_PRIMARY,
13761                                                "plane %c", plane_name(primary->plane));
13762         if (ret)
13763                 goto fail;
13764
13765         if (INTEL_GEN(dev_priv) >= 9) {
13766                 supported_rotations =
13767                         DRM_ROTATE_0 | DRM_ROTATE_90 |
13768                         DRM_ROTATE_180 | DRM_ROTATE_270;
13769         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13770                 supported_rotations =
13771                         DRM_ROTATE_0 | DRM_ROTATE_180 |
13772                         DRM_REFLECT_X;
13773         } else if (INTEL_GEN(dev_priv) >= 4) {
13774                 supported_rotations =
13775                         DRM_ROTATE_0 | DRM_ROTATE_180;
13776         } else {
13777                 supported_rotations = DRM_ROTATE_0;
13778         }
13779
13780         if (INTEL_GEN(dev_priv) >= 4)
13781                 drm_plane_create_rotation_property(&primary->base,
13782                                                    DRM_ROTATE_0,
13783                                                    supported_rotations);
13784
13785         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13786
13787         return primary;
13788
13789 fail:
13790         kfree(state);
13791         kfree(primary);
13792
13793         return ERR_PTR(ret);
13794 }
13795
13796 static struct intel_plane *
13797 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13798                           enum pipe pipe)
13799 {
13800         struct intel_plane *cursor = NULL;
13801         struct intel_plane_state *state = NULL;
13802         int ret;
13803
13804         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13805         if (!cursor) {
13806                 ret = -ENOMEM;
13807                 goto fail;
13808         }
13809
13810         state = intel_create_plane_state(&cursor->base);
13811         if (!state) {
13812                 ret = -ENOMEM;
13813                 goto fail;
13814         }
13815
13816         cursor->base.state = &state->base;
13817
13818         cursor->can_scale = false;
13819         cursor->max_downscale = 1;
13820         cursor->pipe = pipe;
13821         cursor->plane = pipe;
13822         cursor->id = PLANE_CURSOR;
13823         cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13824
13825         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13826                 cursor->update_plane = i845_update_cursor;
13827                 cursor->disable_plane = i845_disable_cursor;
13828                 cursor->check_plane = i845_check_cursor;
13829         } else {
13830                 cursor->update_plane = i9xx_update_cursor;
13831                 cursor->disable_plane = i9xx_disable_cursor;
13832                 cursor->check_plane = i9xx_check_cursor;
13833         }
13834
13835         cursor->cursor.base = ~0;
13836         cursor->cursor.cntl = ~0;
13837         cursor->cursor.size = ~0;
13838
13839         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13840                                        0, &intel_cursor_plane_funcs,
13841                                        intel_cursor_formats,
13842                                        ARRAY_SIZE(intel_cursor_formats),
13843                                        DRM_PLANE_TYPE_CURSOR,
13844                                        "cursor %c", pipe_name(pipe));
13845         if (ret)
13846                 goto fail;
13847
13848         if (INTEL_GEN(dev_priv) >= 4)
13849                 drm_plane_create_rotation_property(&cursor->base,
13850                                                    DRM_ROTATE_0,
13851                                                    DRM_ROTATE_0 |
13852                                                    DRM_ROTATE_180);
13853
13854         if (INTEL_GEN(dev_priv) >= 9)
13855                 state->scaler_id = -1;
13856
13857         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13858
13859         return cursor;
13860
13861 fail:
13862         kfree(state);
13863         kfree(cursor);
13864
13865         return ERR_PTR(ret);
13866 }
13867
13868 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13869                                     struct intel_crtc_state *crtc_state)
13870 {
13871         struct intel_crtc_scaler_state *scaler_state =
13872                 &crtc_state->scaler_state;
13873         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13874         int i;
13875
13876         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13877         if (!crtc->num_scalers)
13878                 return;
13879
13880         for (i = 0; i < crtc->num_scalers; i++) {
13881                 struct intel_scaler *scaler = &scaler_state->scalers[i];
13882
13883                 scaler->in_use = 0;
13884                 scaler->mode = PS_SCALER_MODE_DYN;
13885         }
13886
13887         scaler_state->scaler_id = -1;
13888 }
13889
13890 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13891 {
13892         struct intel_crtc *intel_crtc;
13893         struct intel_crtc_state *crtc_state = NULL;
13894         struct intel_plane *primary = NULL;
13895         struct intel_plane *cursor = NULL;
13896         int sprite, ret;
13897
13898         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13899         if (!intel_crtc)
13900                 return -ENOMEM;
13901
13902         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13903         if (!crtc_state) {
13904                 ret = -ENOMEM;
13905                 goto fail;
13906         }
13907         intel_crtc->config = crtc_state;
13908         intel_crtc->base.state = &crtc_state->base;
13909         crtc_state->base.crtc = &intel_crtc->base;
13910
13911         primary = intel_primary_plane_create(dev_priv, pipe);
13912         if (IS_ERR(primary)) {
13913                 ret = PTR_ERR(primary);
13914                 goto fail;
13915         }
13916         intel_crtc->plane_ids_mask |= BIT(primary->id);
13917
13918         for_each_sprite(dev_priv, pipe, sprite) {
13919                 struct intel_plane *plane;
13920
13921                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
13922                 if (IS_ERR(plane)) {
13923                         ret = PTR_ERR(plane);
13924                         goto fail;
13925                 }
13926                 intel_crtc->plane_ids_mask |= BIT(plane->id);
13927         }
13928
13929         cursor = intel_cursor_plane_create(dev_priv, pipe);
13930         if (IS_ERR(cursor)) {
13931                 ret = PTR_ERR(cursor);
13932                 goto fail;
13933         }
13934         intel_crtc->plane_ids_mask |= BIT(cursor->id);
13935
13936         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
13937                                         &primary->base, &cursor->base,
13938                                         &intel_crtc_funcs,
13939                                         "pipe %c", pipe_name(pipe));
13940         if (ret)
13941                 goto fail;
13942
13943         intel_crtc->pipe = pipe;
13944         intel_crtc->plane = primary->plane;
13945
13946         /* initialize shared scalers */
13947         intel_crtc_init_scalers(intel_crtc, crtc_state);
13948
13949         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13950                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
13951         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
13952         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
13953
13954         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13955
13956         intel_color_init(&intel_crtc->base);
13957
13958         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13959
13960         return 0;
13961
13962 fail:
13963         /*
13964          * drm_mode_config_cleanup() will free up any
13965          * crtcs/planes already initialized.
13966          */
13967         kfree(crtc_state);
13968         kfree(intel_crtc);
13969
13970         return ret;
13971 }
13972
13973 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13974 {
13975         struct drm_device *dev = connector->base.dev;
13976
13977         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
13978
13979         if (!connector->base.state->crtc)
13980                 return INVALID_PIPE;
13981
13982         return to_intel_crtc(connector->base.state->crtc)->pipe;
13983 }
13984
13985 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
13986                                 struct drm_file *file)
13987 {
13988         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
13989         struct drm_crtc *drmmode_crtc;
13990         struct intel_crtc *crtc;
13991
13992         drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
13993         if (!drmmode_crtc)
13994                 return -ENOENT;
13995
13996         crtc = to_intel_crtc(drmmode_crtc);
13997         pipe_from_crtc_id->pipe = crtc->pipe;
13998
13999         return 0;
14000 }
14001
14002 static int intel_encoder_clones(struct intel_encoder *encoder)
14003 {
14004         struct drm_device *dev = encoder->base.dev;
14005         struct intel_encoder *source_encoder;
14006         int index_mask = 0;
14007         int entry = 0;
14008
14009         for_each_intel_encoder(dev, source_encoder) {
14010                 if (encoders_cloneable(encoder, source_encoder))
14011                         index_mask |= (1 << entry);
14012
14013                 entry++;
14014         }
14015
14016         return index_mask;
14017 }
14018
14019 static bool has_edp_a(struct drm_i915_private *dev_priv)
14020 {
14021         if (!IS_MOBILE(dev_priv))
14022                 return false;
14023
14024         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14025                 return false;
14026
14027         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14028                 return false;
14029
14030         return true;
14031 }
14032
14033 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14034 {
14035         if (INTEL_GEN(dev_priv) >= 9)
14036                 return false;
14037
14038         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14039                 return false;
14040
14041         if (IS_CHERRYVIEW(dev_priv))
14042                 return false;
14043
14044         if (HAS_PCH_LPT_H(dev_priv) &&
14045             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14046                 return false;
14047
14048         /* DDI E can't be used if DDI A requires 4 lanes */
14049         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14050                 return false;
14051
14052         if (!dev_priv->vbt.int_crt_support)
14053                 return false;
14054
14055         return true;
14056 }
14057
14058 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14059 {
14060         int pps_num;
14061         int pps_idx;
14062
14063         if (HAS_DDI(dev_priv))
14064                 return;
14065         /*
14066          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14067          * everywhere where registers can be write protected.
14068          */
14069         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14070                 pps_num = 2;
14071         else
14072                 pps_num = 1;
14073
14074         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14075                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14076
14077                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14078                 I915_WRITE(PP_CONTROL(pps_idx), val);
14079         }
14080 }
14081
14082 static void intel_pps_init(struct drm_i915_private *dev_priv)
14083 {
14084         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14085                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14086         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14087                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14088         else
14089                 dev_priv->pps_mmio_base = PPS_BASE;
14090
14091         intel_pps_unlock_regs_wa(dev_priv);
14092 }
14093
14094 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14095 {
14096         struct intel_encoder *encoder;
14097         bool dpd_is_edp = false;
14098
14099         intel_pps_init(dev_priv);
14100
14101         /*
14102          * intel_edp_init_connector() depends on this completing first, to
14103          * prevent the registeration of both eDP and LVDS and the incorrect
14104          * sharing of the PPS.
14105          */
14106         intel_lvds_init(dev_priv);
14107
14108         if (intel_crt_present(dev_priv))
14109                 intel_crt_init(dev_priv);
14110
14111         if (IS_GEN9_LP(dev_priv)) {
14112                 /*
14113                  * FIXME: Broxton doesn't support port detection via the
14114                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14115                  * detect the ports.
14116                  */
14117                 intel_ddi_init(dev_priv, PORT_A);
14118                 intel_ddi_init(dev_priv, PORT_B);
14119                 intel_ddi_init(dev_priv, PORT_C);
14120
14121                 intel_dsi_init(dev_priv);
14122         } else if (HAS_DDI(dev_priv)) {
14123                 int found;
14124
14125                 /*
14126                  * Haswell uses DDI functions to detect digital outputs.
14127                  * On SKL pre-D0 the strap isn't connected, so we assume
14128                  * it's there.
14129                  */
14130                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14131                 /* WaIgnoreDDIAStrap: skl */
14132                 if (found || IS_GEN9_BC(dev_priv))
14133                         intel_ddi_init(dev_priv, PORT_A);
14134
14135                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14136                  * register */
14137                 found = I915_READ(SFUSE_STRAP);
14138
14139                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14140                         intel_ddi_init(dev_priv, PORT_B);
14141                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14142                         intel_ddi_init(dev_priv, PORT_C);
14143                 if (found & SFUSE_STRAP_DDID_DETECTED)
14144                         intel_ddi_init(dev_priv, PORT_D);
14145                 /*
14146                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14147                  */
14148                 if (IS_GEN9_BC(dev_priv) &&
14149                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14150                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14151                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14152                         intel_ddi_init(dev_priv, PORT_E);
14153
14154         } else if (HAS_PCH_SPLIT(dev_priv)) {
14155                 int found;
14156                 dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
14157
14158                 if (has_edp_a(dev_priv))
14159                         intel_dp_init(dev_priv, DP_A, PORT_A);
14160
14161                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14162                         /* PCH SDVOB multiplex with HDMIB */
14163                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14164                         if (!found)
14165                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14166                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14167                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14168                 }
14169
14170                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14171                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14172
14173                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14174                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14175
14176                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14177                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14178
14179                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14180                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14181         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14182                 bool has_edp, has_port;
14183
14184                 /*
14185                  * The DP_DETECTED bit is the latched state of the DDC
14186                  * SDA pin at boot. However since eDP doesn't require DDC
14187                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14188                  * eDP ports may have been muxed to an alternate function.
14189                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14190                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14191                  * detect eDP ports.
14192                  *
14193                  * Sadly the straps seem to be missing sometimes even for HDMI
14194                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14195                  * and VBT for the presence of the port. Additionally we can't
14196                  * trust the port type the VBT declares as we've seen at least
14197                  * HDMI ports that the VBT claim are DP or eDP.
14198                  */
14199                 has_edp = intel_dp_is_edp(dev_priv, PORT_B);
14200                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14201                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14202                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14203                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14204                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14205
14206                 has_edp = intel_dp_is_edp(dev_priv, PORT_C);
14207                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14208                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14209                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14210                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14211                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14212
14213                 if (IS_CHERRYVIEW(dev_priv)) {
14214                         /*
14215                          * eDP not supported on port D,
14216                          * so no need to worry about it
14217                          */
14218                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14219                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14220                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14221                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14222                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14223                 }
14224
14225                 intel_dsi_init(dev_priv);
14226         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14227                 bool found = false;
14228
14229                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14230                         DRM_DEBUG_KMS("probing SDVOB\n");
14231                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14232                         if (!found && IS_G4X(dev_priv)) {
14233                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14234                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14235                         }
14236
14237                         if (!found && IS_G4X(dev_priv))
14238                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14239                 }
14240
14241                 /* Before G4X SDVOC doesn't have its own detect register */
14242
14243                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14244                         DRM_DEBUG_KMS("probing SDVOC\n");
14245                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14246                 }
14247
14248                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14249
14250                         if (IS_G4X(dev_priv)) {
14251                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14252                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14253                         }
14254                         if (IS_G4X(dev_priv))
14255                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14256                 }
14257
14258                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14259                         intel_dp_init(dev_priv, DP_D, PORT_D);
14260         } else if (IS_GEN2(dev_priv))
14261                 intel_dvo_init(dev_priv);
14262
14263         if (SUPPORTS_TV(dev_priv))
14264                 intel_tv_init(dev_priv);
14265
14266         intel_psr_init(dev_priv);
14267
14268         for_each_intel_encoder(&dev_priv->drm, encoder) {
14269                 encoder->base.possible_crtcs = encoder->crtc_mask;
14270                 encoder->base.possible_clones =
14271                         intel_encoder_clones(encoder);
14272         }
14273
14274         intel_init_pch_refclk(dev_priv);
14275
14276         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14277 }
14278
14279 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14280 {
14281         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14282
14283         drm_framebuffer_cleanup(fb);
14284
14285         i915_gem_object_lock(intel_fb->obj);
14286         WARN_ON(!intel_fb->obj->framebuffer_references--);
14287         i915_gem_object_unlock(intel_fb->obj);
14288
14289         i915_gem_object_put(intel_fb->obj);
14290
14291         kfree(intel_fb);
14292 }
14293
14294 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14295                                                 struct drm_file *file,
14296                                                 unsigned int *handle)
14297 {
14298         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14299         struct drm_i915_gem_object *obj = intel_fb->obj;
14300
14301         if (obj->userptr.mm) {
14302                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14303                 return -EINVAL;
14304         }
14305
14306         return drm_gem_handle_create(file, &obj->base, handle);
14307 }
14308
14309 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14310                                         struct drm_file *file,
14311                                         unsigned flags, unsigned color,
14312                                         struct drm_clip_rect *clips,
14313                                         unsigned num_clips)
14314 {
14315         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14316
14317         i915_gem_object_flush_if_display(obj);
14318         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14319
14320         return 0;
14321 }
14322
14323 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14324         .destroy = intel_user_framebuffer_destroy,
14325         .create_handle = intel_user_framebuffer_create_handle,
14326         .dirty = intel_user_framebuffer_dirty,
14327 };
14328
14329 static
14330 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14331                          uint64_t fb_modifier, uint32_t pixel_format)
14332 {
14333         u32 gen = INTEL_GEN(dev_priv);
14334
14335         if (gen >= 9) {
14336                 int cpp = drm_format_plane_cpp(pixel_format, 0);
14337
14338                 /* "The stride in bytes must not exceed the of the size of 8K
14339                  *  pixels and 32K bytes."
14340                  */
14341                 return min(8192 * cpp, 32768);
14342         } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
14343                 return 32*1024;
14344         } else if (gen >= 4) {
14345                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14346                         return 16*1024;
14347                 else
14348                         return 32*1024;
14349         } else if (gen >= 3) {
14350                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14351                         return 8*1024;
14352                 else
14353                         return 16*1024;
14354         } else {
14355                 /* XXX DSPC is limited to 4k tiled */
14356                 return 8*1024;
14357         }
14358 }
14359
14360 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14361                                   struct drm_i915_gem_object *obj,
14362                                   struct drm_mode_fb_cmd2 *mode_cmd)
14363 {
14364         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14365         struct drm_format_name_buf format_name;
14366         u32 pitch_limit, stride_alignment;
14367         unsigned int tiling, stride;
14368         int ret = -EINVAL;
14369
14370         i915_gem_object_lock(obj);
14371         obj->framebuffer_references++;
14372         tiling = i915_gem_object_get_tiling(obj);
14373         stride = i915_gem_object_get_stride(obj);
14374         i915_gem_object_unlock(obj);
14375
14376         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14377                 /*
14378                  * If there's a fence, enforce that
14379                  * the fb modifier and tiling mode match.
14380                  */
14381                 if (tiling != I915_TILING_NONE &&
14382                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14383                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14384                         goto err;
14385                 }
14386         } else {
14387                 if (tiling == I915_TILING_X) {
14388                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14389                 } else if (tiling == I915_TILING_Y) {
14390                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14391                         goto err;
14392                 }
14393         }
14394
14395         /* Passed in modifier sanity checking. */
14396         switch (mode_cmd->modifier[0]) {
14397         case I915_FORMAT_MOD_Y_TILED:
14398         case I915_FORMAT_MOD_Yf_TILED:
14399                 if (INTEL_GEN(dev_priv) < 9) {
14400                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14401                                       mode_cmd->modifier[0]);
14402                         goto err;
14403                 }
14404         case DRM_FORMAT_MOD_LINEAR:
14405         case I915_FORMAT_MOD_X_TILED:
14406                 break;
14407         default:
14408                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14409                               mode_cmd->modifier[0]);
14410                 goto err;
14411         }
14412
14413         /*
14414          * gen2/3 display engine uses the fence if present,
14415          * so the tiling mode must match the fb modifier exactly.
14416          */
14417         if (INTEL_INFO(dev_priv)->gen < 4 &&
14418             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14419                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14420                 goto err;
14421         }
14422
14423         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14424                                            mode_cmd->pixel_format);
14425         if (mode_cmd->pitches[0] > pitch_limit) {
14426                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14427                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14428                               "tiled" : "linear",
14429                               mode_cmd->pitches[0], pitch_limit);
14430                 goto err;
14431         }
14432
14433         /*
14434          * If there's a fence, enforce that
14435          * the fb pitch and fence stride match.
14436          */
14437         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14438                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14439                               mode_cmd->pitches[0], stride);
14440                 goto err;
14441         }
14442
14443         /* Reject formats not supported by any plane early. */
14444         switch (mode_cmd->pixel_format) {
14445         case DRM_FORMAT_C8:
14446         case DRM_FORMAT_RGB565:
14447         case DRM_FORMAT_XRGB8888:
14448         case DRM_FORMAT_ARGB8888:
14449                 break;
14450         case DRM_FORMAT_XRGB1555:
14451                 if (INTEL_GEN(dev_priv) > 3) {
14452                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14453                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14454                         goto err;
14455                 }
14456                 break;
14457         case DRM_FORMAT_ABGR8888:
14458                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14459                     INTEL_GEN(dev_priv) < 9) {
14460                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14461                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14462                         goto err;
14463                 }
14464                 break;
14465         case DRM_FORMAT_XBGR8888:
14466         case DRM_FORMAT_XRGB2101010:
14467         case DRM_FORMAT_XBGR2101010:
14468                 if (INTEL_GEN(dev_priv) < 4) {
14469                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14470                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14471                         goto err;
14472                 }
14473                 break;
14474         case DRM_FORMAT_ABGR2101010:
14475                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14476                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14477                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14478                         goto err;
14479                 }
14480                 break;
14481         case DRM_FORMAT_YUYV:
14482         case DRM_FORMAT_UYVY:
14483         case DRM_FORMAT_YVYU:
14484         case DRM_FORMAT_VYUY:
14485                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14486                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14487                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14488                         goto err;
14489                 }
14490                 break;
14491         default:
14492                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14493                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14494                 goto err;
14495         }
14496
14497         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14498         if (mode_cmd->offsets[0] != 0)
14499                 goto err;
14500
14501         drm_helper_mode_fill_fb_struct(&dev_priv->drm,
14502                                        &intel_fb->base, mode_cmd);
14503
14504         stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
14505         if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14506                 DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
14507                               mode_cmd->pitches[0], stride_alignment);
14508                 goto err;
14509         }
14510
14511         intel_fb->obj = obj;
14512
14513         ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
14514         if (ret)
14515                 goto err;
14516
14517         ret = drm_framebuffer_init(obj->base.dev,
14518                                    &intel_fb->base,
14519                                    &intel_fb_funcs);
14520         if (ret) {
14521                 DRM_ERROR("framebuffer init failed %d\n", ret);
14522                 goto err;
14523         }
14524
14525         return 0;
14526
14527 err:
14528         i915_gem_object_lock(obj);
14529         obj->framebuffer_references--;
14530         i915_gem_object_unlock(obj);
14531         return ret;
14532 }
14533
14534 static struct drm_framebuffer *
14535 intel_user_framebuffer_create(struct drm_device *dev,
14536                               struct drm_file *filp,
14537                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14538 {
14539         struct drm_framebuffer *fb;
14540         struct drm_i915_gem_object *obj;
14541         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14542
14543         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14544         if (!obj)
14545                 return ERR_PTR(-ENOENT);
14546
14547         fb = intel_framebuffer_create(obj, &mode_cmd);
14548         if (IS_ERR(fb))
14549                 i915_gem_object_put(obj);
14550
14551         return fb;
14552 }
14553
14554 static void intel_atomic_state_free(struct drm_atomic_state *state)
14555 {
14556         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14557
14558         drm_atomic_state_default_release(state);
14559
14560         i915_sw_fence_fini(&intel_state->commit_ready);
14561
14562         kfree(state);
14563 }
14564
14565 static const struct drm_mode_config_funcs intel_mode_funcs = {
14566         .fb_create = intel_user_framebuffer_create,
14567         .output_poll_changed = intel_fbdev_output_poll_changed,
14568         .atomic_check = intel_atomic_check,
14569         .atomic_commit = intel_atomic_commit,
14570         .atomic_state_alloc = intel_atomic_state_alloc,
14571         .atomic_state_clear = intel_atomic_state_clear,
14572         .atomic_state_free = intel_atomic_state_free,
14573 };
14574
14575 /**
14576  * intel_init_display_hooks - initialize the display modesetting hooks
14577  * @dev_priv: device private
14578  */
14579 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14580 {
14581         intel_init_cdclk_hooks(dev_priv);
14582
14583         if (INTEL_INFO(dev_priv)->gen >= 9) {
14584                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14585                 dev_priv->display.get_initial_plane_config =
14586                         skylake_get_initial_plane_config;
14587                 dev_priv->display.crtc_compute_clock =
14588                         haswell_crtc_compute_clock;
14589                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14590                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14591         } else if (HAS_DDI(dev_priv)) {
14592                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14593                 dev_priv->display.get_initial_plane_config =
14594                         ironlake_get_initial_plane_config;
14595                 dev_priv->display.crtc_compute_clock =
14596                         haswell_crtc_compute_clock;
14597                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14598                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14599         } else if (HAS_PCH_SPLIT(dev_priv)) {
14600                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14601                 dev_priv->display.get_initial_plane_config =
14602                         ironlake_get_initial_plane_config;
14603                 dev_priv->display.crtc_compute_clock =
14604                         ironlake_crtc_compute_clock;
14605                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14606                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14607         } else if (IS_CHERRYVIEW(dev_priv)) {
14608                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14609                 dev_priv->display.get_initial_plane_config =
14610                         i9xx_get_initial_plane_config;
14611                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14612                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14613                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14614         } else if (IS_VALLEYVIEW(dev_priv)) {
14615                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14616                 dev_priv->display.get_initial_plane_config =
14617                         i9xx_get_initial_plane_config;
14618                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14619                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14620                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14621         } else if (IS_G4X(dev_priv)) {
14622                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14623                 dev_priv->display.get_initial_plane_config =
14624                         i9xx_get_initial_plane_config;
14625                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14626                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14627                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14628         } else if (IS_PINEVIEW(dev_priv)) {
14629                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14630                 dev_priv->display.get_initial_plane_config =
14631                         i9xx_get_initial_plane_config;
14632                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14633                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14634                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14635         } else if (!IS_GEN2(dev_priv)) {
14636                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14637                 dev_priv->display.get_initial_plane_config =
14638                         i9xx_get_initial_plane_config;
14639                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14640                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14641                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14642         } else {
14643                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14644                 dev_priv->display.get_initial_plane_config =
14645                         i9xx_get_initial_plane_config;
14646                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14647                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14648                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14649         }
14650
14651         if (IS_GEN5(dev_priv)) {
14652                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14653         } else if (IS_GEN6(dev_priv)) {
14654                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14655         } else if (IS_IVYBRIDGE(dev_priv)) {
14656                 /* FIXME: detect B0+ stepping and use auto training */
14657                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14658         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14659                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14660         }
14661
14662         if (dev_priv->info.gen >= 9)
14663                 dev_priv->display.update_crtcs = skl_update_crtcs;
14664         else
14665                 dev_priv->display.update_crtcs = intel_update_crtcs;
14666
14667         switch (INTEL_INFO(dev_priv)->gen) {
14668         case 2:
14669                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
14670                 break;
14671
14672         case 3:
14673                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
14674                 break;
14675
14676         case 4:
14677         case 5:
14678                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
14679                 break;
14680
14681         case 6:
14682                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
14683                 break;
14684         case 7:
14685         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
14686                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
14687                 break;
14688         case 9:
14689                 /* Drop through - unsupported since execlist only. */
14690         default:
14691                 /* Default just returns -ENODEV to indicate unsupported */
14692                 dev_priv->display.queue_flip = intel_default_queue_flip;
14693         }
14694 }
14695
14696 /*
14697  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14698  * resume, or other times.  This quirk makes sure that's the case for
14699  * affected systems.
14700  */
14701 static void quirk_pipea_force(struct drm_device *dev)
14702 {
14703         struct drm_i915_private *dev_priv = to_i915(dev);
14704
14705         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14706         DRM_INFO("applying pipe a force quirk\n");
14707 }
14708
14709 static void quirk_pipeb_force(struct drm_device *dev)
14710 {
14711         struct drm_i915_private *dev_priv = to_i915(dev);
14712
14713         dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14714         DRM_INFO("applying pipe b force quirk\n");
14715 }
14716
14717 /*
14718  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14719  */
14720 static void quirk_ssc_force_disable(struct drm_device *dev)
14721 {
14722         struct drm_i915_private *dev_priv = to_i915(dev);
14723         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14724         DRM_INFO("applying lvds SSC disable quirk\n");
14725 }
14726
14727 /*
14728  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14729  * brightness value
14730  */
14731 static void quirk_invert_brightness(struct drm_device *dev)
14732 {
14733         struct drm_i915_private *dev_priv = to_i915(dev);
14734         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14735         DRM_INFO("applying inverted panel brightness quirk\n");
14736 }
14737
14738 /* Some VBT's incorrectly indicate no backlight is present */
14739 static void quirk_backlight_present(struct drm_device *dev)
14740 {
14741         struct drm_i915_private *dev_priv = to_i915(dev);
14742         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14743         DRM_INFO("applying backlight present quirk\n");
14744 }
14745
14746 struct intel_quirk {
14747         int device;
14748         int subsystem_vendor;
14749         int subsystem_device;
14750         void (*hook)(struct drm_device *dev);
14751 };
14752
14753 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14754 struct intel_dmi_quirk {
14755         void (*hook)(struct drm_device *dev);
14756         const struct dmi_system_id (*dmi_id_list)[];
14757 };
14758
14759 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14760 {
14761         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14762         return 1;
14763 }
14764
14765 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14766         {
14767                 .dmi_id_list = &(const struct dmi_system_id[]) {
14768                         {
14769                                 .callback = intel_dmi_reverse_brightness,
14770                                 .ident = "NCR Corporation",
14771                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14772                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14773                                 },
14774                         },
14775                         { }  /* terminating entry */
14776                 },
14777                 .hook = quirk_invert_brightness,
14778         },
14779 };
14780
14781 static struct intel_quirk intel_quirks[] = {
14782         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14783         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14784
14785         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14786         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14787
14788         /* 830 needs to leave pipe A & dpll A up */
14789         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14790
14791         /* 830 needs to leave pipe B & dpll B up */
14792         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14793
14794         /* Lenovo U160 cannot use SSC on LVDS */
14795         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14796
14797         /* Sony Vaio Y cannot use SSC on LVDS */
14798         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14799
14800         /* Acer Aspire 5734Z must invert backlight brightness */
14801         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14802
14803         /* Acer/eMachines G725 */
14804         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14805
14806         /* Acer/eMachines e725 */
14807         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14808
14809         /* Acer/Packard Bell NCL20 */
14810         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14811
14812         /* Acer Aspire 4736Z */
14813         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14814
14815         /* Acer Aspire 5336 */
14816         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14817
14818         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14819         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14820
14821         /* Acer C720 Chromebook (Core i3 4005U) */
14822         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14823
14824         /* Apple Macbook 2,1 (Core 2 T7400) */
14825         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14826
14827         /* Apple Macbook 4,1 */
14828         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14829
14830         /* Toshiba CB35 Chromebook (Celeron 2955U) */
14831         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14832
14833         /* HP Chromebook 14 (Celeron 2955U) */
14834         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14835
14836         /* Dell Chromebook 11 */
14837         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14838
14839         /* Dell Chromebook 11 (2015 version) */
14840         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14841 };
14842
14843 static void intel_init_quirks(struct drm_device *dev)
14844 {
14845         struct pci_dev *d = dev->pdev;
14846         int i;
14847
14848         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14849                 struct intel_quirk *q = &intel_quirks[i];
14850
14851                 if (d->device == q->device &&
14852                     (d->subsystem_vendor == q->subsystem_vendor ||
14853                      q->subsystem_vendor == PCI_ANY_ID) &&
14854                     (d->subsystem_device == q->subsystem_device ||
14855                      q->subsystem_device == PCI_ANY_ID))
14856                         q->hook(dev);
14857         }
14858         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14859                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14860                         intel_dmi_quirks[i].hook(dev);
14861         }
14862 }
14863
14864 /* Disable the VGA plane that we never use */
14865 static void i915_disable_vga(struct drm_i915_private *dev_priv)
14866 {
14867         struct pci_dev *pdev = dev_priv->drm.pdev;
14868         u8 sr1;
14869         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
14870
14871         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14872         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
14873         outb(SR01, VGA_SR_INDEX);
14874         sr1 = inb(VGA_SR_DATA);
14875         outb(sr1 | 1<<5, VGA_SR_DATA);
14876         vga_put(pdev, VGA_RSRC_LEGACY_IO);
14877         udelay(300);
14878
14879         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14880         POSTING_READ(vga_reg);
14881 }
14882
14883 void intel_modeset_init_hw(struct drm_device *dev)
14884 {
14885         struct drm_i915_private *dev_priv = to_i915(dev);
14886
14887         intel_update_cdclk(dev_priv);
14888         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
14889
14890         intel_init_clock_gating(dev_priv);
14891 }
14892
14893 /*
14894  * Calculate what we think the watermarks should be for the state we've read
14895  * out of the hardware and then immediately program those watermarks so that
14896  * we ensure the hardware settings match our internal state.
14897  *
14898  * We can calculate what we think WM's should be by creating a duplicate of the
14899  * current state (which was constructed during hardware readout) and running it
14900  * through the atomic check code to calculate new watermark values in the
14901  * state object.
14902  */
14903 static void sanitize_watermarks(struct drm_device *dev)
14904 {
14905         struct drm_i915_private *dev_priv = to_i915(dev);
14906         struct drm_atomic_state *state;
14907         struct intel_atomic_state *intel_state;
14908         struct drm_crtc *crtc;
14909         struct drm_crtc_state *cstate;
14910         struct drm_modeset_acquire_ctx ctx;
14911         int ret;
14912         int i;
14913
14914         /* Only supported on platforms that use atomic watermark design */
14915         if (!dev_priv->display.optimize_watermarks)
14916                 return;
14917
14918         /*
14919          * We need to hold connection_mutex before calling duplicate_state so
14920          * that the connector loop is protected.
14921          */
14922         drm_modeset_acquire_init(&ctx, 0);
14923 retry:
14924         ret = drm_modeset_lock_all_ctx(dev, &ctx);
14925         if (ret == -EDEADLK) {
14926                 drm_modeset_backoff(&ctx);
14927                 goto retry;
14928         } else if (WARN_ON(ret)) {
14929                 goto fail;
14930         }
14931
14932         state = drm_atomic_helper_duplicate_state(dev, &ctx);
14933         if (WARN_ON(IS_ERR(state)))
14934                 goto fail;
14935
14936         intel_state = to_intel_atomic_state(state);
14937
14938         /*
14939          * Hardware readout is the only time we don't want to calculate
14940          * intermediate watermarks (since we don't trust the current
14941          * watermarks).
14942          */
14943         if (!HAS_GMCH_DISPLAY(dev_priv))
14944                 intel_state->skip_intermediate_wm = true;
14945
14946         ret = intel_atomic_check(dev, state);
14947         if (ret) {
14948                 /*
14949                  * If we fail here, it means that the hardware appears to be
14950                  * programmed in a way that shouldn't be possible, given our
14951                  * understanding of watermark requirements.  This might mean a
14952                  * mistake in the hardware readout code or a mistake in the
14953                  * watermark calculations for a given platform.  Raise a WARN
14954                  * so that this is noticeable.
14955                  *
14956                  * If this actually happens, we'll have to just leave the
14957                  * BIOS-programmed watermarks untouched and hope for the best.
14958                  */
14959                 WARN(true, "Could not determine valid watermarks for inherited state\n");
14960                 goto put_state;
14961         }
14962
14963         /* Write calculated watermark values back */
14964         for_each_new_crtc_in_state(state, crtc, cstate, i) {
14965                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14966
14967                 cs->wm.need_postvbl_update = true;
14968                 dev_priv->display.optimize_watermarks(intel_state, cs);
14969         }
14970
14971 put_state:
14972         drm_atomic_state_put(state);
14973 fail:
14974         drm_modeset_drop_locks(&ctx);
14975         drm_modeset_acquire_fini(&ctx);
14976 }
14977
14978 int intel_modeset_init(struct drm_device *dev)
14979 {
14980         struct drm_i915_private *dev_priv = to_i915(dev);
14981         struct i915_ggtt *ggtt = &dev_priv->ggtt;
14982         enum pipe pipe;
14983         struct intel_crtc *crtc;
14984
14985         drm_mode_config_init(dev);
14986
14987         dev->mode_config.min_width = 0;
14988         dev->mode_config.min_height = 0;
14989
14990         dev->mode_config.preferred_depth = 24;
14991         dev->mode_config.prefer_shadow = 1;
14992
14993         dev->mode_config.allow_fb_modifiers = true;
14994
14995         dev->mode_config.funcs = &intel_mode_funcs;
14996
14997         init_llist_head(&dev_priv->atomic_helper.free_list);
14998         INIT_WORK(&dev_priv->atomic_helper.free_work,
14999                   intel_atomic_helper_free_state_worker);
15000
15001         intel_init_quirks(dev);
15002
15003         intel_init_pm(dev_priv);
15004
15005         if (INTEL_INFO(dev_priv)->num_pipes == 0)
15006                 return 0;
15007
15008         /*
15009          * There may be no VBT; and if the BIOS enabled SSC we can
15010          * just keep using it to avoid unnecessary flicker.  Whereas if the
15011          * BIOS isn't using it, don't assume it will work even if the VBT
15012          * indicates as much.
15013          */
15014         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15015                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15016                                             DREF_SSC1_ENABLE);
15017
15018                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15019                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15020                                      bios_lvds_use_ssc ? "en" : "dis",
15021                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15022                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15023                 }
15024         }
15025
15026         if (IS_GEN2(dev_priv)) {
15027                 dev->mode_config.max_width = 2048;
15028                 dev->mode_config.max_height = 2048;
15029         } else if (IS_GEN3(dev_priv)) {
15030                 dev->mode_config.max_width = 4096;
15031                 dev->mode_config.max_height = 4096;
15032         } else {
15033                 dev->mode_config.max_width = 8192;
15034                 dev->mode_config.max_height = 8192;
15035         }
15036
15037         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15038                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15039                 dev->mode_config.cursor_height = 1023;
15040         } else if (IS_GEN2(dev_priv)) {
15041                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15042                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15043         } else {
15044                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15045                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15046         }
15047
15048         dev->mode_config.fb_base = ggtt->mappable_base;
15049
15050         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15051                       INTEL_INFO(dev_priv)->num_pipes,
15052                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15053
15054         for_each_pipe(dev_priv, pipe) {
15055                 int ret;
15056
15057                 ret = intel_crtc_init(dev_priv, pipe);
15058                 if (ret) {
15059                         drm_mode_config_cleanup(dev);
15060                         return ret;
15061                 }
15062         }
15063
15064         intel_shared_dpll_init(dev);
15065
15066         intel_update_czclk(dev_priv);
15067         intel_modeset_init_hw(dev);
15068
15069         if (dev_priv->max_cdclk_freq == 0)
15070                 intel_update_max_cdclk(dev_priv);
15071
15072         /* Just disable it once at startup */
15073         i915_disable_vga(dev_priv);
15074         intel_setup_outputs(dev_priv);
15075
15076         drm_modeset_lock_all(dev);
15077         intel_modeset_setup_hw_state(dev);
15078         drm_modeset_unlock_all(dev);
15079
15080         for_each_intel_crtc(dev, crtc) {
15081                 struct intel_initial_plane_config plane_config = {};
15082
15083                 if (!crtc->active)
15084                         continue;
15085
15086                 /*
15087                  * Note that reserving the BIOS fb up front prevents us
15088                  * from stuffing other stolen allocations like the ring
15089                  * on top.  This prevents some ugliness at boot time, and
15090                  * can even allow for smooth boot transitions if the BIOS
15091                  * fb is large enough for the active pipe configuration.
15092                  */
15093                 dev_priv->display.get_initial_plane_config(crtc,
15094                                                            &plane_config);
15095
15096                 /*
15097                  * If the fb is shared between multiple heads, we'll
15098                  * just get the first one.
15099                  */
15100                 intel_find_initial_plane_obj(crtc, &plane_config);
15101         }
15102
15103         /*
15104          * Make sure hardware watermarks really match the state we read out.
15105          * Note that we need to do this after reconstructing the BIOS fb's
15106          * since the watermark calculation done here will use pstate->fb.
15107          */
15108         if (!HAS_GMCH_DISPLAY(dev_priv))
15109                 sanitize_watermarks(dev);
15110
15111         return 0;
15112 }
15113
15114 static void intel_enable_pipe_a(struct drm_device *dev)
15115 {
15116         struct intel_connector *connector;
15117         struct drm_connector_list_iter conn_iter;
15118         struct drm_connector *crt = NULL;
15119         struct intel_load_detect_pipe load_detect_temp;
15120         struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15121         int ret;
15122
15123         /* We can't just switch on the pipe A, we need to set things up with a
15124          * proper mode and output configuration. As a gross hack, enable pipe A
15125          * by enabling the load detect pipe once. */
15126         drm_connector_list_iter_begin(dev, &conn_iter);
15127         for_each_intel_connector_iter(connector, &conn_iter) {
15128                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15129                         crt = &connector->base;
15130                         break;
15131                 }
15132         }
15133         drm_connector_list_iter_end(&conn_iter);
15134
15135         if (!crt)
15136                 return;
15137
15138         ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx);
15139         WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n");
15140
15141         if (ret > 0)
15142                 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15143 }
15144
15145 static bool
15146 intel_check_plane_mapping(struct intel_crtc *crtc)
15147 {
15148         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15149         u32 val;
15150
15151         if (INTEL_INFO(dev_priv)->num_pipes == 1)
15152                 return true;
15153
15154         val = I915_READ(DSPCNTR(!crtc->plane));
15155
15156         if ((val & DISPLAY_PLANE_ENABLE) &&
15157             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15158                 return false;
15159
15160         return true;
15161 }
15162
15163 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15164 {
15165         struct drm_device *dev = crtc->base.dev;
15166         struct intel_encoder *encoder;
15167
15168         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15169                 return true;
15170
15171         return false;
15172 }
15173
15174 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15175 {
15176         struct drm_device *dev = encoder->base.dev;
15177         struct intel_connector *connector;
15178
15179         for_each_connector_on_encoder(dev, &encoder->base, connector)
15180                 return connector;
15181
15182         return NULL;
15183 }
15184
15185 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15186                               enum transcoder pch_transcoder)
15187 {
15188         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15189                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
15190 }
15191
15192 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15193 {
15194         struct drm_device *dev = crtc->base.dev;
15195         struct drm_i915_private *dev_priv = to_i915(dev);
15196         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15197
15198         /* Clear any frame start delays used for debugging left by the BIOS */
15199         if (!transcoder_is_dsi(cpu_transcoder)) {
15200                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15201
15202                 I915_WRITE(reg,
15203                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15204         }
15205
15206         /* restore vblank interrupts to correct state */
15207         drm_crtc_vblank_reset(&crtc->base);
15208         if (crtc->active) {
15209                 struct intel_plane *plane;
15210
15211                 drm_crtc_vblank_on(&crtc->base);
15212
15213                 /* Disable everything but the primary plane */
15214                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15215                         if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15216                                 continue;
15217
15218                         trace_intel_disable_plane(&plane->base, crtc);
15219                         plane->disable_plane(plane, crtc);
15220                 }
15221         }
15222
15223         /* We need to sanitize the plane -> pipe mapping first because this will
15224          * disable the crtc (and hence change the state) if it is wrong. Note
15225          * that gen4+ has a fixed plane -> pipe mapping.  */
15226         if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
15227                 bool plane;
15228
15229                 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15230                               crtc->base.base.id, crtc->base.name);
15231
15232                 /* Pipe has the wrong plane attached and the plane is active.
15233                  * Temporarily change the plane mapping and disable everything
15234                  * ...  */
15235                 plane = crtc->plane;
15236                 crtc->base.primary->state->visible = true;
15237                 crtc->plane = !plane;
15238                 intel_crtc_disable_noatomic(&crtc->base);
15239                 crtc->plane = plane;
15240         }
15241
15242         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15243             crtc->pipe == PIPE_A && !crtc->active) {
15244                 /* BIOS forgot to enable pipe A, this mostly happens after
15245                  * resume. Force-enable the pipe to fix this, the update_dpms
15246                  * call below we restore the pipe to the right state, but leave
15247                  * the required bits on. */
15248                 intel_enable_pipe_a(dev);
15249         }
15250
15251         /* Adjust the state of the output pipe according to whether we
15252          * have active connectors/encoders. */
15253         if (crtc->active && !intel_crtc_has_encoders(crtc))
15254                 intel_crtc_disable_noatomic(&crtc->base);
15255
15256         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15257                 /*
15258                  * We start out with underrun reporting disabled to avoid races.
15259                  * For correct bookkeeping mark this on active crtcs.
15260                  *
15261                  * Also on gmch platforms we dont have any hardware bits to
15262                  * disable the underrun reporting. Which means we need to start
15263                  * out with underrun reporting disabled also on inactive pipes,
15264                  * since otherwise we'll complain about the garbage we read when
15265                  * e.g. coming up after runtime pm.
15266                  *
15267                  * No protection against concurrent access is required - at
15268                  * worst a fifo underrun happens which also sets this to false.
15269                  */
15270                 crtc->cpu_fifo_underrun_disabled = true;
15271                 /*
15272                  * We track the PCH trancoder underrun reporting state
15273                  * within the crtc. With crtc for pipe A housing the underrun
15274                  * reporting state for PCH transcoder A, crtc for pipe B housing
15275                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15276                  * and marking underrun reporting as disabled for the non-existing
15277                  * PCH transcoders B and C would prevent enabling the south
15278                  * error interrupt (see cpt_can_enable_serr_int()).
15279                  */
15280                 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
15281                         crtc->pch_fifo_underrun_disabled = true;
15282         }
15283 }
15284
15285 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15286 {
15287         struct intel_connector *connector;
15288
15289         /* We need to check both for a crtc link (meaning that the
15290          * encoder is active and trying to read from a pipe) and the
15291          * pipe itself being active. */
15292         bool has_active_crtc = encoder->base.crtc &&
15293                 to_intel_crtc(encoder->base.crtc)->active;
15294
15295         connector = intel_encoder_find_connector(encoder);
15296         if (connector && !has_active_crtc) {
15297                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15298                               encoder->base.base.id,
15299                               encoder->base.name);
15300
15301                 /* Connector is active, but has no active pipe. This is
15302                  * fallout from our resume register restoring. Disable
15303                  * the encoder manually again. */
15304                 if (encoder->base.crtc) {
15305                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15306
15307                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15308                                       encoder->base.base.id,
15309                                       encoder->base.name);
15310                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15311                         if (encoder->post_disable)
15312                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15313                 }
15314                 encoder->base.crtc = NULL;
15315
15316                 /* Inconsistent output/port/pipe state happens presumably due to
15317                  * a bug in one of the get_hw_state functions. Or someplace else
15318                  * in our code, like the register restore mess on resume. Clamp
15319                  * things to off as a safer default. */
15320
15321                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15322                 connector->base.encoder = NULL;
15323         }
15324         /* Enabled encoders without active connectors will be fixed in
15325          * the crtc fixup. */
15326 }
15327
15328 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15329 {
15330         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15331
15332         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15333                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15334                 i915_disable_vga(dev_priv);
15335         }
15336 }
15337
15338 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15339 {
15340         /* This function can be called both from intel_modeset_setup_hw_state or
15341          * at a very early point in our resume sequence, where the power well
15342          * structures are not yet restored. Since this function is at a very
15343          * paranoid "someone might have enabled VGA while we were not looking"
15344          * level, just check if the power well is enabled instead of trying to
15345          * follow the "don't touch the power well if we don't need it" policy
15346          * the rest of the driver uses. */
15347         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15348                 return;
15349
15350         i915_redisable_vga_power_on(dev_priv);
15351
15352         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15353 }
15354
15355 static bool primary_get_hw_state(struct intel_plane *plane)
15356 {
15357         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15358
15359         return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15360 }
15361
15362 /* FIXME read out full plane state for all planes */
15363 static void readout_plane_state(struct intel_crtc *crtc)
15364 {
15365         struct intel_plane *primary = to_intel_plane(crtc->base.primary);
15366         bool visible;
15367
15368         visible = crtc->active && primary_get_hw_state(primary);
15369
15370         intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
15371                                 to_intel_plane_state(primary->base.state),
15372                                 visible);
15373 }
15374
15375 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15376 {
15377         struct drm_i915_private *dev_priv = to_i915(dev);
15378         enum pipe pipe;
15379         struct intel_crtc *crtc;
15380         struct intel_encoder *encoder;
15381         struct intel_connector *connector;
15382         struct drm_connector_list_iter conn_iter;
15383         int i;
15384
15385         dev_priv->active_crtcs = 0;
15386
15387         for_each_intel_crtc(dev, crtc) {
15388                 struct intel_crtc_state *crtc_state =
15389                         to_intel_crtc_state(crtc->base.state);
15390
15391                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15392                 memset(crtc_state, 0, sizeof(*crtc_state));
15393                 crtc_state->base.crtc = &crtc->base;
15394
15395                 crtc_state->base.active = crtc_state->base.enable =
15396                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15397
15398                 crtc->base.enabled = crtc_state->base.enable;
15399                 crtc->active = crtc_state->base.active;
15400
15401                 if (crtc_state->base.active)
15402                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15403
15404                 readout_plane_state(crtc);
15405
15406                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15407                               crtc->base.base.id, crtc->base.name,
15408                               enableddisabled(crtc_state->base.active));
15409         }
15410
15411         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15412                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15413
15414                 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15415                                                   &pll->state.hw_state);
15416                 pll->state.crtc_mask = 0;
15417                 for_each_intel_crtc(dev, crtc) {
15418                         struct intel_crtc_state *crtc_state =
15419                                 to_intel_crtc_state(crtc->base.state);
15420
15421                         if (crtc_state->base.active &&
15422                             crtc_state->shared_dpll == pll)
15423                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15424                 }
15425                 pll->active_mask = pll->state.crtc_mask;
15426
15427                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15428                               pll->name, pll->state.crtc_mask, pll->on);
15429         }
15430
15431         for_each_intel_encoder(dev, encoder) {
15432                 pipe = 0;
15433
15434                 if (encoder->get_hw_state(encoder, &pipe)) {
15435                         struct intel_crtc_state *crtc_state;
15436
15437                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15438                         crtc_state = to_intel_crtc_state(crtc->base.state);
15439
15440                         encoder->base.crtc = &crtc->base;
15441                         crtc_state->output_types |= 1 << encoder->type;
15442                         encoder->get_config(encoder, crtc_state);
15443                 } else {
15444                         encoder->base.crtc = NULL;
15445                 }
15446
15447                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15448                               encoder->base.base.id, encoder->base.name,
15449                               enableddisabled(encoder->base.crtc),
15450                               pipe_name(pipe));
15451         }
15452
15453         drm_connector_list_iter_begin(dev, &conn_iter);
15454         for_each_intel_connector_iter(connector, &conn_iter) {
15455                 if (connector->get_hw_state(connector)) {
15456                         connector->base.dpms = DRM_MODE_DPMS_ON;
15457
15458                         encoder = connector->encoder;
15459                         connector->base.encoder = &encoder->base;
15460
15461                         if (encoder->base.crtc &&
15462                             encoder->base.crtc->state->active) {
15463                                 /*
15464                                  * This has to be done during hardware readout
15465                                  * because anything calling .crtc_disable may
15466                                  * rely on the connector_mask being accurate.
15467                                  */
15468                                 encoder->base.crtc->state->connector_mask |=
15469                                         1 << drm_connector_index(&connector->base);
15470                                 encoder->base.crtc->state->encoder_mask |=
15471                                         1 << drm_encoder_index(&encoder->base);
15472                         }
15473
15474                 } else {
15475                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15476                         connector->base.encoder = NULL;
15477                 }
15478                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15479                               connector->base.base.id, connector->base.name,
15480                               enableddisabled(connector->base.encoder));
15481         }
15482         drm_connector_list_iter_end(&conn_iter);
15483
15484         for_each_intel_crtc(dev, crtc) {
15485                 struct intel_crtc_state *crtc_state =
15486                         to_intel_crtc_state(crtc->base.state);
15487                 int pixclk = 0;
15488
15489                 crtc->base.hwmode = crtc_state->base.adjusted_mode;
15490
15491                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15492                 if (crtc_state->base.active) {
15493                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15494                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15495                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15496
15497                         /*
15498                          * The initial mode needs to be set in order to keep
15499                          * the atomic core happy. It wants a valid mode if the
15500                          * crtc's enabled, so we do the above call.
15501                          *
15502                          * But we don't set all the derived state fully, hence
15503                          * set a flag to indicate that a full recalculation is
15504                          * needed on the next commit.
15505                          */
15506                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15507
15508                         intel_crtc_compute_pixel_rate(crtc_state);
15509
15510                         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
15511                             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15512                                 pixclk = crtc_state->pixel_rate;
15513                         else
15514                                 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15515
15516                         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15517                         if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
15518                                 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15519
15520                         drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15521                         update_scanline_offset(crtc);
15522                 }
15523
15524                 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15525
15526                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15527         }
15528 }
15529
15530 static void
15531 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15532 {
15533         struct intel_encoder *encoder;
15534
15535         for_each_intel_encoder(&dev_priv->drm, encoder) {
15536                 u64 get_domains;
15537                 enum intel_display_power_domain domain;
15538
15539                 if (!encoder->get_power_domains)
15540                         continue;
15541
15542                 get_domains = encoder->get_power_domains(encoder);
15543                 for_each_power_domain(domain, get_domains)
15544                         intel_display_power_get(dev_priv, domain);
15545         }
15546 }
15547
15548 /* Scan out the current hw modeset state,
15549  * and sanitizes it to the current state
15550  */
15551 static void
15552 intel_modeset_setup_hw_state(struct drm_device *dev)
15553 {
15554         struct drm_i915_private *dev_priv = to_i915(dev);
15555         enum pipe pipe;
15556         struct intel_crtc *crtc;
15557         struct intel_encoder *encoder;
15558         int i;
15559
15560         intel_modeset_readout_hw_state(dev);
15561
15562         /* HW state is read out, now we need to sanitize this mess. */
15563         get_encoder_power_domains(dev_priv);
15564
15565         for_each_intel_encoder(dev, encoder) {
15566                 intel_sanitize_encoder(encoder);
15567         }
15568
15569         for_each_pipe(dev_priv, pipe) {
15570                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15571
15572                 intel_sanitize_crtc(crtc);
15573                 intel_dump_pipe_config(crtc, crtc->config,
15574                                        "[setup_hw_state]");
15575         }
15576
15577         intel_modeset_update_connector_atomic_state(dev);
15578
15579         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15580                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15581
15582                 if (!pll->on || pll->active_mask)
15583                         continue;
15584
15585                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15586
15587                 pll->funcs.disable(dev_priv, pll);
15588                 pll->on = false;
15589         }
15590
15591         if (IS_G4X(dev_priv)) {
15592                 g4x_wm_get_hw_state(dev);
15593                 g4x_wm_sanitize(dev_priv);
15594         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15595                 vlv_wm_get_hw_state(dev);
15596                 vlv_wm_sanitize(dev_priv);
15597         } else if (IS_GEN9(dev_priv)) {
15598                 skl_wm_get_hw_state(dev);
15599         } else if (HAS_PCH_SPLIT(dev_priv)) {
15600                 ilk_wm_get_hw_state(dev);
15601         }
15602
15603         for_each_intel_crtc(dev, crtc) {
15604                 u64 put_domains;
15605
15606                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15607                 if (WARN_ON(put_domains))
15608                         modeset_put_power_domains(dev_priv, put_domains);
15609         }
15610         intel_display_set_init_power(dev_priv, false);
15611
15612         intel_power_domains_verify_state(dev_priv);
15613
15614         intel_fbc_init_pipe_state(dev_priv);
15615 }
15616
15617 void intel_display_resume(struct drm_device *dev)
15618 {
15619         struct drm_i915_private *dev_priv = to_i915(dev);
15620         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15621         struct drm_modeset_acquire_ctx ctx;
15622         int ret;
15623
15624         dev_priv->modeset_restore_state = NULL;
15625         if (state)
15626                 state->acquire_ctx = &ctx;
15627
15628         drm_modeset_acquire_init(&ctx, 0);
15629
15630         while (1) {
15631                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15632                 if (ret != -EDEADLK)
15633                         break;
15634
15635                 drm_modeset_backoff(&ctx);
15636         }
15637
15638         if (!ret)
15639                 ret = __intel_display_resume(dev, state, &ctx);
15640
15641         drm_modeset_drop_locks(&ctx);
15642         drm_modeset_acquire_fini(&ctx);
15643
15644         if (ret)
15645                 DRM_ERROR("Restoring old state failed with %i\n", ret);
15646         if (state)
15647                 drm_atomic_state_put(state);
15648 }
15649
15650 void intel_modeset_gem_init(struct drm_device *dev)
15651 {
15652         struct drm_i915_private *dev_priv = to_i915(dev);
15653
15654         intel_init_gt_powersave(dev_priv);
15655
15656         intel_setup_overlay(dev_priv);
15657 }
15658
15659 int intel_connector_register(struct drm_connector *connector)
15660 {
15661         struct intel_connector *intel_connector = to_intel_connector(connector);
15662         int ret;
15663
15664         ret = intel_backlight_device_register(intel_connector);
15665         if (ret)
15666                 goto err;
15667
15668         return 0;
15669
15670 err:
15671         return ret;
15672 }
15673
15674 void intel_connector_unregister(struct drm_connector *connector)
15675 {
15676         struct intel_connector *intel_connector = to_intel_connector(connector);
15677
15678         intel_backlight_device_unregister(intel_connector);
15679         intel_panel_destroy_backlight(connector);
15680 }
15681
15682 void intel_modeset_cleanup(struct drm_device *dev)
15683 {
15684         struct drm_i915_private *dev_priv = to_i915(dev);
15685
15686         flush_work(&dev_priv->atomic_helper.free_work);
15687         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15688
15689         intel_disable_gt_powersave(dev_priv);
15690
15691         /*
15692          * Interrupts and polling as the first thing to avoid creating havoc.
15693          * Too much stuff here (turning of connectors, ...) would
15694          * experience fancy races otherwise.
15695          */
15696         intel_irq_uninstall(dev_priv);
15697
15698         /*
15699          * Due to the hpd irq storm handling the hotplug work can re-arm the
15700          * poll handlers. Hence disable polling after hpd handling is shut down.
15701          */
15702         drm_kms_helper_poll_fini(dev);
15703
15704         intel_unregister_dsm_handler();
15705
15706         intel_fbc_global_disable(dev_priv);
15707
15708         /* flush any delayed tasks or pending work */
15709         flush_scheduled_work();
15710
15711         drm_mode_config_cleanup(dev);
15712
15713         intel_cleanup_overlay(dev_priv);
15714
15715         intel_cleanup_gt_powersave(dev_priv);
15716
15717         intel_teardown_gmbus(dev_priv);
15718 }
15719
15720 void intel_connector_attach_encoder(struct intel_connector *connector,
15721                                     struct intel_encoder *encoder)
15722 {
15723         connector->encoder = encoder;
15724         drm_mode_connector_attach_encoder(&connector->base,
15725                                           &encoder->base);
15726 }
15727
15728 /*
15729  * set vga decode state - true == enable VGA decode
15730  */
15731 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
15732 {
15733         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15734         u16 gmch_ctrl;
15735
15736         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15737                 DRM_ERROR("failed to read control word\n");
15738                 return -EIO;
15739         }
15740
15741         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15742                 return 0;
15743
15744         if (state)
15745                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15746         else
15747                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15748
15749         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15750                 DRM_ERROR("failed to write control word\n");
15751                 return -EIO;
15752         }
15753
15754         return 0;
15755 }
15756
15757 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15758
15759 struct intel_display_error_state {
15760
15761         u32 power_well_driver;
15762
15763         int num_transcoders;
15764
15765         struct intel_cursor_error_state {
15766                 u32 control;
15767                 u32 position;
15768                 u32 base;
15769                 u32 size;
15770         } cursor[I915_MAX_PIPES];
15771
15772         struct intel_pipe_error_state {
15773                 bool power_domain_on;
15774                 u32 source;
15775                 u32 stat;
15776         } pipe[I915_MAX_PIPES];
15777
15778         struct intel_plane_error_state {
15779                 u32 control;
15780                 u32 stride;
15781                 u32 size;
15782                 u32 pos;
15783                 u32 addr;
15784                 u32 surface;
15785                 u32 tile_offset;
15786         } plane[I915_MAX_PIPES];
15787
15788         struct intel_transcoder_error_state {
15789                 bool power_domain_on;
15790                 enum transcoder cpu_transcoder;
15791
15792                 u32 conf;
15793
15794                 u32 htotal;
15795                 u32 hblank;
15796                 u32 hsync;
15797                 u32 vtotal;
15798                 u32 vblank;
15799                 u32 vsync;
15800         } transcoder[4];
15801 };
15802
15803 struct intel_display_error_state *
15804 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
15805 {
15806         struct intel_display_error_state *error;
15807         int transcoders[] = {
15808                 TRANSCODER_A,
15809                 TRANSCODER_B,
15810                 TRANSCODER_C,
15811                 TRANSCODER_EDP,
15812         };
15813         int i;
15814
15815         if (INTEL_INFO(dev_priv)->num_pipes == 0)
15816                 return NULL;
15817
15818         error = kzalloc(sizeof(*error), GFP_ATOMIC);
15819         if (error == NULL)
15820                 return NULL;
15821
15822         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15823                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15824
15825         for_each_pipe(dev_priv, i) {
15826                 error->pipe[i].power_domain_on =
15827                         __intel_display_power_is_enabled(dev_priv,
15828                                                          POWER_DOMAIN_PIPE(i));
15829                 if (!error->pipe[i].power_domain_on)
15830                         continue;
15831
15832                 error->cursor[i].control = I915_READ(CURCNTR(i));
15833                 error->cursor[i].position = I915_READ(CURPOS(i));
15834                 error->cursor[i].base = I915_READ(CURBASE(i));
15835
15836                 error->plane[i].control = I915_READ(DSPCNTR(i));
15837                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15838                 if (INTEL_GEN(dev_priv) <= 3) {
15839                         error->plane[i].size = I915_READ(DSPSIZE(i));
15840                         error->plane[i].pos = I915_READ(DSPPOS(i));
15841                 }
15842                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15843                         error->plane[i].addr = I915_READ(DSPADDR(i));
15844                 if (INTEL_GEN(dev_priv) >= 4) {
15845                         error->plane[i].surface = I915_READ(DSPSURF(i));
15846                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15847                 }
15848
15849                 error->pipe[i].source = I915_READ(PIPESRC(i));
15850
15851                 if (HAS_GMCH_DISPLAY(dev_priv))
15852                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
15853         }
15854
15855         /* Note: this does not include DSI transcoders. */
15856         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
15857         if (HAS_DDI(dev_priv))
15858                 error->num_transcoders++; /* Account for eDP. */
15859
15860         for (i = 0; i < error->num_transcoders; i++) {
15861                 enum transcoder cpu_transcoder = transcoders[i];
15862
15863                 error->transcoder[i].power_domain_on =
15864                         __intel_display_power_is_enabled(dev_priv,
15865                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15866                 if (!error->transcoder[i].power_domain_on)
15867                         continue;
15868
15869                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15870
15871                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15872                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15873                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15874                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15875                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15876                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15877                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15878         }
15879
15880         return error;
15881 }
15882
15883 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15884
15885 void
15886 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15887                                 struct intel_display_error_state *error)
15888 {
15889         struct drm_i915_private *dev_priv = m->i915;
15890         int i;
15891
15892         if (!error)
15893                 return;
15894
15895         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
15896         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15897                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
15898                            error->power_well_driver);
15899         for_each_pipe(dev_priv, i) {
15900                 err_printf(m, "Pipe [%d]:\n", i);
15901                 err_printf(m, "  Power: %s\n",
15902                            onoff(error->pipe[i].power_domain_on));
15903                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
15904                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
15905
15906                 err_printf(m, "Plane [%d]:\n", i);
15907                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
15908                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
15909                 if (INTEL_GEN(dev_priv) <= 3) {
15910                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
15911                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
15912                 }
15913                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15914                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
15915                 if (INTEL_GEN(dev_priv) >= 4) {
15916                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
15917                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
15918                 }
15919
15920                 err_printf(m, "Cursor [%d]:\n", i);
15921                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
15922                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
15923                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
15924         }
15925
15926         for (i = 0; i < error->num_transcoders; i++) {
15927                 err_printf(m, "CPU transcoder: %s\n",
15928                            transcoder_name(error->transcoder[i].cpu_transcoder));
15929                 err_printf(m, "  Power: %s\n",
15930                            onoff(error->transcoder[i].power_domain_on));
15931                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
15932                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
15933                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
15934                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
15935                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
15936                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
15937                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
15938         }
15939 }
15940
15941 #endif