]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915/skl: handle all pixel formats in skylake_update_primary_plane()
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_rect.h>
45 #include <linux/dma_remapping.h>
46
47 /* Primary plane formats supported by all gen */
48 #define COMMON_PRIMARY_FORMATS \
49         DRM_FORMAT_C8, \
50         DRM_FORMAT_RGB565, \
51         DRM_FORMAT_XRGB8888, \
52         DRM_FORMAT_ARGB8888
53
54 /* Primary plane formats for gen <= 3 */
55 static const uint32_t intel_primary_formats_gen2[] = {
56         COMMON_PRIMARY_FORMATS,
57         DRM_FORMAT_XRGB1555,
58         DRM_FORMAT_ARGB1555,
59 };
60
61 /* Primary plane formats for gen >= 4 */
62 static const uint32_t intel_primary_formats_gen4[] = {
63         COMMON_PRIMARY_FORMATS, \
64         DRM_FORMAT_XBGR8888,
65         DRM_FORMAT_ABGR8888,
66         DRM_FORMAT_XRGB2101010,
67         DRM_FORMAT_ARGB2101010,
68         DRM_FORMAT_XBGR2101010,
69         DRM_FORMAT_ABGR2101010,
70 };
71
72 /* Cursor formats */
73 static const uint32_t intel_cursor_formats[] = {
74         DRM_FORMAT_ARGB8888,
75 };
76
77 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
78
79 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
80                                 struct intel_crtc_state *pipe_config);
81 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
82                                    struct intel_crtc_state *pipe_config);
83
84 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
85                           int x, int y, struct drm_framebuffer *old_fb);
86 static int intel_framebuffer_init(struct drm_device *dev,
87                                   struct intel_framebuffer *ifb,
88                                   struct drm_mode_fb_cmd2 *mode_cmd,
89                                   struct drm_i915_gem_object *obj);
90 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
91 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
92 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
93                                          struct intel_link_m_n *m_n,
94                                          struct intel_link_m_n *m2_n2);
95 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
96 static void haswell_set_pipeconf(struct drm_crtc *crtc);
97 static void intel_set_pipe_csc(struct drm_crtc *crtc);
98 static void vlv_prepare_pll(struct intel_crtc *crtc,
99                             const struct intel_crtc_state *pipe_config);
100 static void chv_prepare_pll(struct intel_crtc *crtc,
101                             const struct intel_crtc_state *pipe_config);
102 static void intel_begin_crtc_commit(struct drm_crtc *crtc);
103 static void intel_finish_crtc_commit(struct drm_crtc *crtc);
104
105 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
106 {
107         if (!connector->mst_port)
108                 return connector->encoder;
109         else
110                 return &connector->mst_port->mst_encoders[pipe]->base;
111 }
112
113 typedef struct {
114         int     min, max;
115 } intel_range_t;
116
117 typedef struct {
118         int     dot_limit;
119         int     p2_slow, p2_fast;
120 } intel_p2_t;
121
122 typedef struct intel_limit intel_limit_t;
123 struct intel_limit {
124         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
125         intel_p2_t          p2;
126 };
127
128 int
129 intel_pch_rawclk(struct drm_device *dev)
130 {
131         struct drm_i915_private *dev_priv = dev->dev_private;
132
133         WARN_ON(!HAS_PCH_SPLIT(dev));
134
135         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
136 }
137
138 static inline u32 /* units of 100MHz */
139 intel_fdi_link_freq(struct drm_device *dev)
140 {
141         if (IS_GEN5(dev)) {
142                 struct drm_i915_private *dev_priv = dev->dev_private;
143                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
144         } else
145                 return 27;
146 }
147
148 static const intel_limit_t intel_limits_i8xx_dac = {
149         .dot = { .min = 25000, .max = 350000 },
150         .vco = { .min = 908000, .max = 1512000 },
151         .n = { .min = 2, .max = 16 },
152         .m = { .min = 96, .max = 140 },
153         .m1 = { .min = 18, .max = 26 },
154         .m2 = { .min = 6, .max = 16 },
155         .p = { .min = 4, .max = 128 },
156         .p1 = { .min = 2, .max = 33 },
157         .p2 = { .dot_limit = 165000,
158                 .p2_slow = 4, .p2_fast = 2 },
159 };
160
161 static const intel_limit_t intel_limits_i8xx_dvo = {
162         .dot = { .min = 25000, .max = 350000 },
163         .vco = { .min = 908000, .max = 1512000 },
164         .n = { .min = 2, .max = 16 },
165         .m = { .min = 96, .max = 140 },
166         .m1 = { .min = 18, .max = 26 },
167         .m2 = { .min = 6, .max = 16 },
168         .p = { .min = 4, .max = 128 },
169         .p1 = { .min = 2, .max = 33 },
170         .p2 = { .dot_limit = 165000,
171                 .p2_slow = 4, .p2_fast = 4 },
172 };
173
174 static const intel_limit_t intel_limits_i8xx_lvds = {
175         .dot = { .min = 25000, .max = 350000 },
176         .vco = { .min = 908000, .max = 1512000 },
177         .n = { .min = 2, .max = 16 },
178         .m = { .min = 96, .max = 140 },
179         .m1 = { .min = 18, .max = 26 },
180         .m2 = { .min = 6, .max = 16 },
181         .p = { .min = 4, .max = 128 },
182         .p1 = { .min = 1, .max = 6 },
183         .p2 = { .dot_limit = 165000,
184                 .p2_slow = 14, .p2_fast = 7 },
185 };
186
187 static const intel_limit_t intel_limits_i9xx_sdvo = {
188         .dot = { .min = 20000, .max = 400000 },
189         .vco = { .min = 1400000, .max = 2800000 },
190         .n = { .min = 1, .max = 6 },
191         .m = { .min = 70, .max = 120 },
192         .m1 = { .min = 8, .max = 18 },
193         .m2 = { .min = 3, .max = 7 },
194         .p = { .min = 5, .max = 80 },
195         .p1 = { .min = 1, .max = 8 },
196         .p2 = { .dot_limit = 200000,
197                 .p2_slow = 10, .p2_fast = 5 },
198 };
199
200 static const intel_limit_t intel_limits_i9xx_lvds = {
201         .dot = { .min = 20000, .max = 400000 },
202         .vco = { .min = 1400000, .max = 2800000 },
203         .n = { .min = 1, .max = 6 },
204         .m = { .min = 70, .max = 120 },
205         .m1 = { .min = 8, .max = 18 },
206         .m2 = { .min = 3, .max = 7 },
207         .p = { .min = 7, .max = 98 },
208         .p1 = { .min = 1, .max = 8 },
209         .p2 = { .dot_limit = 112000,
210                 .p2_slow = 14, .p2_fast = 7 },
211 };
212
213
214 static const intel_limit_t intel_limits_g4x_sdvo = {
215         .dot = { .min = 25000, .max = 270000 },
216         .vco = { .min = 1750000, .max = 3500000},
217         .n = { .min = 1, .max = 4 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 10, .max = 30 },
222         .p1 = { .min = 1, .max = 3},
223         .p2 = { .dot_limit = 270000,
224                 .p2_slow = 10,
225                 .p2_fast = 10
226         },
227 };
228
229 static const intel_limit_t intel_limits_g4x_hdmi = {
230         .dot = { .min = 22000, .max = 400000 },
231         .vco = { .min = 1750000, .max = 3500000},
232         .n = { .min = 1, .max = 4 },
233         .m = { .min = 104, .max = 138 },
234         .m1 = { .min = 16, .max = 23 },
235         .m2 = { .min = 5, .max = 11 },
236         .p = { .min = 5, .max = 80 },
237         .p1 = { .min = 1, .max = 8},
238         .p2 = { .dot_limit = 165000,
239                 .p2_slow = 10, .p2_fast = 5 },
240 };
241
242 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
243         .dot = { .min = 20000, .max = 115000 },
244         .vco = { .min = 1750000, .max = 3500000 },
245         .n = { .min = 1, .max = 3 },
246         .m = { .min = 104, .max = 138 },
247         .m1 = { .min = 17, .max = 23 },
248         .m2 = { .min = 5, .max = 11 },
249         .p = { .min = 28, .max = 112 },
250         .p1 = { .min = 2, .max = 8 },
251         .p2 = { .dot_limit = 0,
252                 .p2_slow = 14, .p2_fast = 14
253         },
254 };
255
256 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
257         .dot = { .min = 80000, .max = 224000 },
258         .vco = { .min = 1750000, .max = 3500000 },
259         .n = { .min = 1, .max = 3 },
260         .m = { .min = 104, .max = 138 },
261         .m1 = { .min = 17, .max = 23 },
262         .m2 = { .min = 5, .max = 11 },
263         .p = { .min = 14, .max = 42 },
264         .p1 = { .min = 2, .max = 6 },
265         .p2 = { .dot_limit = 0,
266                 .p2_slow = 7, .p2_fast = 7
267         },
268 };
269
270 static const intel_limit_t intel_limits_pineview_sdvo = {
271         .dot = { .min = 20000, .max = 400000},
272         .vco = { .min = 1700000, .max = 3500000 },
273         /* Pineview's Ncounter is a ring counter */
274         .n = { .min = 3, .max = 6 },
275         .m = { .min = 2, .max = 256 },
276         /* Pineview only has one combined m divider, which we treat as m2. */
277         .m1 = { .min = 0, .max = 0 },
278         .m2 = { .min = 0, .max = 254 },
279         .p = { .min = 5, .max = 80 },
280         .p1 = { .min = 1, .max = 8 },
281         .p2 = { .dot_limit = 200000,
282                 .p2_slow = 10, .p2_fast = 5 },
283 };
284
285 static const intel_limit_t intel_limits_pineview_lvds = {
286         .dot = { .min = 20000, .max = 400000 },
287         .vco = { .min = 1700000, .max = 3500000 },
288         .n = { .min = 3, .max = 6 },
289         .m = { .min = 2, .max = 256 },
290         .m1 = { .min = 0, .max = 0 },
291         .m2 = { .min = 0, .max = 254 },
292         .p = { .min = 7, .max = 112 },
293         .p1 = { .min = 1, .max = 8 },
294         .p2 = { .dot_limit = 112000,
295                 .p2_slow = 14, .p2_fast = 14 },
296 };
297
298 /* Ironlake / Sandybridge
299  *
300  * We calculate clock using (register_value + 2) for N/M1/M2, so here
301  * the range value for them is (actual_value - 2).
302  */
303 static const intel_limit_t intel_limits_ironlake_dac = {
304         .dot = { .min = 25000, .max = 350000 },
305         .vco = { .min = 1760000, .max = 3510000 },
306         .n = { .min = 1, .max = 5 },
307         .m = { .min = 79, .max = 127 },
308         .m1 = { .min = 12, .max = 22 },
309         .m2 = { .min = 5, .max = 9 },
310         .p = { .min = 5, .max = 80 },
311         .p1 = { .min = 1, .max = 8 },
312         .p2 = { .dot_limit = 225000,
313                 .p2_slow = 10, .p2_fast = 5 },
314 };
315
316 static const intel_limit_t intel_limits_ironlake_single_lvds = {
317         .dot = { .min = 25000, .max = 350000 },
318         .vco = { .min = 1760000, .max = 3510000 },
319         .n = { .min = 1, .max = 3 },
320         .m = { .min = 79, .max = 118 },
321         .m1 = { .min = 12, .max = 22 },
322         .m2 = { .min = 5, .max = 9 },
323         .p = { .min = 28, .max = 112 },
324         .p1 = { .min = 2, .max = 8 },
325         .p2 = { .dot_limit = 225000,
326                 .p2_slow = 14, .p2_fast = 14 },
327 };
328
329 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
330         .dot = { .min = 25000, .max = 350000 },
331         .vco = { .min = 1760000, .max = 3510000 },
332         .n = { .min = 1, .max = 3 },
333         .m = { .min = 79, .max = 127 },
334         .m1 = { .min = 12, .max = 22 },
335         .m2 = { .min = 5, .max = 9 },
336         .p = { .min = 14, .max = 56 },
337         .p1 = { .min = 2, .max = 8 },
338         .p2 = { .dot_limit = 225000,
339                 .p2_slow = 7, .p2_fast = 7 },
340 };
341
342 /* LVDS 100mhz refclk limits. */
343 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
344         .dot = { .min = 25000, .max = 350000 },
345         .vco = { .min = 1760000, .max = 3510000 },
346         .n = { .min = 1, .max = 2 },
347         .m = { .min = 79, .max = 126 },
348         .m1 = { .min = 12, .max = 22 },
349         .m2 = { .min = 5, .max = 9 },
350         .p = { .min = 28, .max = 112 },
351         .p1 = { .min = 2, .max = 8 },
352         .p2 = { .dot_limit = 225000,
353                 .p2_slow = 14, .p2_fast = 14 },
354 };
355
356 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
357         .dot = { .min = 25000, .max = 350000 },
358         .vco = { .min = 1760000, .max = 3510000 },
359         .n = { .min = 1, .max = 3 },
360         .m = { .min = 79, .max = 126 },
361         .m1 = { .min = 12, .max = 22 },
362         .m2 = { .min = 5, .max = 9 },
363         .p = { .min = 14, .max = 42 },
364         .p1 = { .min = 2, .max = 6 },
365         .p2 = { .dot_limit = 225000,
366                 .p2_slow = 7, .p2_fast = 7 },
367 };
368
369 static const intel_limit_t intel_limits_vlv = {
370          /*
371           * These are the data rate limits (measured in fast clocks)
372           * since those are the strictest limits we have. The fast
373           * clock and actual rate limits are more relaxed, so checking
374           * them would make no difference.
375           */
376         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
377         .vco = { .min = 4000000, .max = 6000000 },
378         .n = { .min = 1, .max = 7 },
379         .m1 = { .min = 2, .max = 3 },
380         .m2 = { .min = 11, .max = 156 },
381         .p1 = { .min = 2, .max = 3 },
382         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
383 };
384
385 static const intel_limit_t intel_limits_chv = {
386         /*
387          * These are the data rate limits (measured in fast clocks)
388          * since those are the strictest limits we have.  The fast
389          * clock and actual rate limits are more relaxed, so checking
390          * them would make no difference.
391          */
392         .dot = { .min = 25000 * 5, .max = 540000 * 5},
393         .vco = { .min = 4860000, .max = 6700000 },
394         .n = { .min = 1, .max = 1 },
395         .m1 = { .min = 2, .max = 2 },
396         .m2 = { .min = 24 << 22, .max = 175 << 22 },
397         .p1 = { .min = 2, .max = 4 },
398         .p2 = { .p2_slow = 1, .p2_fast = 14 },
399 };
400
401 static void vlv_clock(int refclk, intel_clock_t *clock)
402 {
403         clock->m = clock->m1 * clock->m2;
404         clock->p = clock->p1 * clock->p2;
405         if (WARN_ON(clock->n == 0 || clock->p == 0))
406                 return;
407         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
408         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
409 }
410
411 /**
412  * Returns whether any output on the specified pipe is of the specified type
413  */
414 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
415 {
416         struct drm_device *dev = crtc->base.dev;
417         struct intel_encoder *encoder;
418
419         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
420                 if (encoder->type == type)
421                         return true;
422
423         return false;
424 }
425
426 /**
427  * Returns whether any output on the specified pipe will have the specified
428  * type after a staged modeset is complete, i.e., the same as
429  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
430  * encoder->crtc.
431  */
432 static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
433 {
434         struct drm_device *dev = crtc->base.dev;
435         struct intel_encoder *encoder;
436
437         for_each_intel_encoder(dev, encoder)
438                 if (encoder->new_crtc == crtc && encoder->type == type)
439                         return true;
440
441         return false;
442 }
443
444 static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
445                                                 int refclk)
446 {
447         struct drm_device *dev = crtc->base.dev;
448         const intel_limit_t *limit;
449
450         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
451                 if (intel_is_dual_link_lvds(dev)) {
452                         if (refclk == 100000)
453                                 limit = &intel_limits_ironlake_dual_lvds_100m;
454                         else
455                                 limit = &intel_limits_ironlake_dual_lvds;
456                 } else {
457                         if (refclk == 100000)
458                                 limit = &intel_limits_ironlake_single_lvds_100m;
459                         else
460                                 limit = &intel_limits_ironlake_single_lvds;
461                 }
462         } else
463                 limit = &intel_limits_ironlake_dac;
464
465         return limit;
466 }
467
468 static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
469 {
470         struct drm_device *dev = crtc->base.dev;
471         const intel_limit_t *limit;
472
473         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
474                 if (intel_is_dual_link_lvds(dev))
475                         limit = &intel_limits_g4x_dual_channel_lvds;
476                 else
477                         limit = &intel_limits_g4x_single_channel_lvds;
478         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
479                    intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
480                 limit = &intel_limits_g4x_hdmi;
481         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
482                 limit = &intel_limits_g4x_sdvo;
483         } else /* The option is for other outputs */
484                 limit = &intel_limits_i9xx_sdvo;
485
486         return limit;
487 }
488
489 static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
490 {
491         struct drm_device *dev = crtc->base.dev;
492         const intel_limit_t *limit;
493
494         if (HAS_PCH_SPLIT(dev))
495                 limit = intel_ironlake_limit(crtc, refclk);
496         else if (IS_G4X(dev)) {
497                 limit = intel_g4x_limit(crtc);
498         } else if (IS_PINEVIEW(dev)) {
499                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
500                         limit = &intel_limits_pineview_lvds;
501                 else
502                         limit = &intel_limits_pineview_sdvo;
503         } else if (IS_CHERRYVIEW(dev)) {
504                 limit = &intel_limits_chv;
505         } else if (IS_VALLEYVIEW(dev)) {
506                 limit = &intel_limits_vlv;
507         } else if (!IS_GEN2(dev)) {
508                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
509                         limit = &intel_limits_i9xx_lvds;
510                 else
511                         limit = &intel_limits_i9xx_sdvo;
512         } else {
513                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
514                         limit = &intel_limits_i8xx_lvds;
515                 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
516                         limit = &intel_limits_i8xx_dvo;
517                 else
518                         limit = &intel_limits_i8xx_dac;
519         }
520         return limit;
521 }
522
523 /* m1 is reserved as 0 in Pineview, n is a ring counter */
524 static void pineview_clock(int refclk, intel_clock_t *clock)
525 {
526         clock->m = clock->m2 + 2;
527         clock->p = clock->p1 * clock->p2;
528         if (WARN_ON(clock->n == 0 || clock->p == 0))
529                 return;
530         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
531         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
532 }
533
534 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
535 {
536         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
537 }
538
539 static void i9xx_clock(int refclk, intel_clock_t *clock)
540 {
541         clock->m = i9xx_dpll_compute_m(clock);
542         clock->p = clock->p1 * clock->p2;
543         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
544                 return;
545         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
546         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
547 }
548
549 static void chv_clock(int refclk, intel_clock_t *clock)
550 {
551         clock->m = clock->m1 * clock->m2;
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n == 0 || clock->p == 0))
554                 return;
555         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
556                         clock->n << 22);
557         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558 }
559
560 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
561 /**
562  * Returns whether the given set of divisors are valid for a given refclk with
563  * the given connectors.
564  */
565
566 static bool intel_PLL_is_valid(struct drm_device *dev,
567                                const intel_limit_t *limit,
568                                const intel_clock_t *clock)
569 {
570         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
571                 INTELPllInvalid("n out of range\n");
572         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
573                 INTELPllInvalid("p1 out of range\n");
574         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
575                 INTELPllInvalid("m2 out of range\n");
576         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
577                 INTELPllInvalid("m1 out of range\n");
578
579         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
580                 if (clock->m1 <= clock->m2)
581                         INTELPllInvalid("m1 <= m2\n");
582
583         if (!IS_VALLEYVIEW(dev)) {
584                 if (clock->p < limit->p.min || limit->p.max < clock->p)
585                         INTELPllInvalid("p out of range\n");
586                 if (clock->m < limit->m.min || limit->m.max < clock->m)
587                         INTELPllInvalid("m out of range\n");
588         }
589
590         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
591                 INTELPllInvalid("vco out of range\n");
592         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
593          * connector, etc., rather than just a single range.
594          */
595         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
596                 INTELPllInvalid("dot out of range\n");
597
598         return true;
599 }
600
601 static bool
602 i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
603                     int target, int refclk, intel_clock_t *match_clock,
604                     intel_clock_t *best_clock)
605 {
606         struct drm_device *dev = crtc->base.dev;
607         intel_clock_t clock;
608         int err = target;
609
610         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
611                 /*
612                  * For LVDS just rely on its current settings for dual-channel.
613                  * We haven't figured out how to reliably set up different
614                  * single/dual channel state, if we even can.
615                  */
616                 if (intel_is_dual_link_lvds(dev))
617                         clock.p2 = limit->p2.p2_fast;
618                 else
619                         clock.p2 = limit->p2.p2_slow;
620         } else {
621                 if (target < limit->p2.dot_limit)
622                         clock.p2 = limit->p2.p2_slow;
623                 else
624                         clock.p2 = limit->p2.p2_fast;
625         }
626
627         memset(best_clock, 0, sizeof(*best_clock));
628
629         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630              clock.m1++) {
631                 for (clock.m2 = limit->m2.min;
632                      clock.m2 <= limit->m2.max; clock.m2++) {
633                         if (clock.m2 >= clock.m1)
634                                 break;
635                         for (clock.n = limit->n.min;
636                              clock.n <= limit->n.max; clock.n++) {
637                                 for (clock.p1 = limit->p1.min;
638                                         clock.p1 <= limit->p1.max; clock.p1++) {
639                                         int this_err;
640
641                                         i9xx_clock(refclk, &clock);
642                                         if (!intel_PLL_is_valid(dev, limit,
643                                                                 &clock))
644                                                 continue;
645                                         if (match_clock &&
646                                             clock.p != match_clock->p)
647                                                 continue;
648
649                                         this_err = abs(clock.dot - target);
650                                         if (this_err < err) {
651                                                 *best_clock = clock;
652                                                 err = this_err;
653                                         }
654                                 }
655                         }
656                 }
657         }
658
659         return (err != target);
660 }
661
662 static bool
663 pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
664                    int target, int refclk, intel_clock_t *match_clock,
665                    intel_clock_t *best_clock)
666 {
667         struct drm_device *dev = crtc->base.dev;
668         intel_clock_t clock;
669         int err = target;
670
671         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
672                 /*
673                  * For LVDS just rely on its current settings for dual-channel.
674                  * We haven't figured out how to reliably set up different
675                  * single/dual channel state, if we even can.
676                  */
677                 if (intel_is_dual_link_lvds(dev))
678                         clock.p2 = limit->p2.p2_fast;
679                 else
680                         clock.p2 = limit->p2.p2_slow;
681         } else {
682                 if (target < limit->p2.dot_limit)
683                         clock.p2 = limit->p2.p2_slow;
684                 else
685                         clock.p2 = limit->p2.p2_fast;
686         }
687
688         memset(best_clock, 0, sizeof(*best_clock));
689
690         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
691              clock.m1++) {
692                 for (clock.m2 = limit->m2.min;
693                      clock.m2 <= limit->m2.max; clock.m2++) {
694                         for (clock.n = limit->n.min;
695                              clock.n <= limit->n.max; clock.n++) {
696                                 for (clock.p1 = limit->p1.min;
697                                         clock.p1 <= limit->p1.max; clock.p1++) {
698                                         int this_err;
699
700                                         pineview_clock(refclk, &clock);
701                                         if (!intel_PLL_is_valid(dev, limit,
702                                                                 &clock))
703                                                 continue;
704                                         if (match_clock &&
705                                             clock.p != match_clock->p)
706                                                 continue;
707
708                                         this_err = abs(clock.dot - target);
709                                         if (this_err < err) {
710                                                 *best_clock = clock;
711                                                 err = this_err;
712                                         }
713                                 }
714                         }
715                 }
716         }
717
718         return (err != target);
719 }
720
721 static bool
722 g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
723                    int target, int refclk, intel_clock_t *match_clock,
724                    intel_clock_t *best_clock)
725 {
726         struct drm_device *dev = crtc->base.dev;
727         intel_clock_t clock;
728         int max_n;
729         bool found;
730         /* approximately equals target * 0.00585 */
731         int err_most = (target >> 8) + (target >> 9);
732         found = false;
733
734         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
735                 if (intel_is_dual_link_lvds(dev))
736                         clock.p2 = limit->p2.p2_fast;
737                 else
738                         clock.p2 = limit->p2.p2_slow;
739         } else {
740                 if (target < limit->p2.dot_limit)
741                         clock.p2 = limit->p2.p2_slow;
742                 else
743                         clock.p2 = limit->p2.p2_fast;
744         }
745
746         memset(best_clock, 0, sizeof(*best_clock));
747         max_n = limit->n.max;
748         /* based on hardware requirement, prefer smaller n to precision */
749         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
750                 /* based on hardware requirement, prefere larger m1,m2 */
751                 for (clock.m1 = limit->m1.max;
752                      clock.m1 >= limit->m1.min; clock.m1--) {
753                         for (clock.m2 = limit->m2.max;
754                              clock.m2 >= limit->m2.min; clock.m2--) {
755                                 for (clock.p1 = limit->p1.max;
756                                      clock.p1 >= limit->p1.min; clock.p1--) {
757                                         int this_err;
758
759                                         i9xx_clock(refclk, &clock);
760                                         if (!intel_PLL_is_valid(dev, limit,
761                                                                 &clock))
762                                                 continue;
763
764                                         this_err = abs(clock.dot - target);
765                                         if (this_err < err_most) {
766                                                 *best_clock = clock;
767                                                 err_most = this_err;
768                                                 max_n = clock.n;
769                                                 found = true;
770                                         }
771                                 }
772                         }
773                 }
774         }
775         return found;
776 }
777
778 static bool
779 vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
780                    int target, int refclk, intel_clock_t *match_clock,
781                    intel_clock_t *best_clock)
782 {
783         struct drm_device *dev = crtc->base.dev;
784         intel_clock_t clock;
785         unsigned int bestppm = 1000000;
786         /* min update 19.2 MHz */
787         int max_n = min(limit->n.max, refclk / 19200);
788         bool found = false;
789
790         target *= 5; /* fast clock */
791
792         memset(best_clock, 0, sizeof(*best_clock));
793
794         /* based on hardware requirement, prefer smaller n to precision */
795         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
796                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
797                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
798                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799                                 clock.p = clock.p1 * clock.p2;
800                                 /* based on hardware requirement, prefer bigger m1,m2 values */
801                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
802                                         unsigned int ppm, diff;
803
804                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
805                                                                      refclk * clock.m1);
806
807                                         vlv_clock(refclk, &clock);
808
809                                         if (!intel_PLL_is_valid(dev, limit,
810                                                                 &clock))
811                                                 continue;
812
813                                         diff = abs(clock.dot - target);
814                                         ppm = div_u64(1000000ULL * diff, target);
815
816                                         if (ppm < 100 && clock.p > best_clock->p) {
817                                                 bestppm = 0;
818                                                 *best_clock = clock;
819                                                 found = true;
820                                         }
821
822                                         if (bestppm >= 10 && ppm < bestppm - 10) {
823                                                 bestppm = ppm;
824                                                 *best_clock = clock;
825                                                 found = true;
826                                         }
827                                 }
828                         }
829                 }
830         }
831
832         return found;
833 }
834
835 static bool
836 chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
837                    int target, int refclk, intel_clock_t *match_clock,
838                    intel_clock_t *best_clock)
839 {
840         struct drm_device *dev = crtc->base.dev;
841         intel_clock_t clock;
842         uint64_t m2;
843         int found = false;
844
845         memset(best_clock, 0, sizeof(*best_clock));
846
847         /*
848          * Based on hardware doc, the n always set to 1, and m1 always
849          * set to 2.  If requires to support 200Mhz refclk, we need to
850          * revisit this because n may not 1 anymore.
851          */
852         clock.n = 1, clock.m1 = 2;
853         target *= 5;    /* fast clock */
854
855         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
856                 for (clock.p2 = limit->p2.p2_fast;
857                                 clock.p2 >= limit->p2.p2_slow;
858                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859
860                         clock.p = clock.p1 * clock.p2;
861
862                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
863                                         clock.n) << 22, refclk * clock.m1);
864
865                         if (m2 > INT_MAX/clock.m1)
866                                 continue;
867
868                         clock.m2 = m2;
869
870                         chv_clock(refclk, &clock);
871
872                         if (!intel_PLL_is_valid(dev, limit, &clock))
873                                 continue;
874
875                         /* based on hardware requirement, prefer bigger p
876                          */
877                         if (clock.p > best_clock->p) {
878                                 *best_clock = clock;
879                                 found = true;
880                         }
881                 }
882         }
883
884         return found;
885 }
886
887 bool intel_crtc_active(struct drm_crtc *crtc)
888 {
889         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890
891         /* Be paranoid as we can arrive here with only partial
892          * state retrieved from the hardware during setup.
893          *
894          * We can ditch the adjusted_mode.crtc_clock check as soon
895          * as Haswell has gained clock readout/fastboot support.
896          *
897          * We can ditch the crtc->primary->fb check as soon as we can
898          * properly reconstruct framebuffers.
899          */
900         return intel_crtc->active && crtc->primary->fb &&
901                 intel_crtc->config->base.adjusted_mode.crtc_clock;
902 }
903
904 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
905                                              enum pipe pipe)
906 {
907         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
908         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
909
910         return intel_crtc->config->cpu_transcoder;
911 }
912
913 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
914 {
915         struct drm_i915_private *dev_priv = dev->dev_private;
916         u32 reg = PIPEDSL(pipe);
917         u32 line1, line2;
918         u32 line_mask;
919
920         if (IS_GEN2(dev))
921                 line_mask = DSL_LINEMASK_GEN2;
922         else
923                 line_mask = DSL_LINEMASK_GEN3;
924
925         line1 = I915_READ(reg) & line_mask;
926         mdelay(5);
927         line2 = I915_READ(reg) & line_mask;
928
929         return line1 == line2;
930 }
931
932 /*
933  * intel_wait_for_pipe_off - wait for pipe to turn off
934  * @crtc: crtc whose pipe to wait for
935  *
936  * After disabling a pipe, we can't wait for vblank in the usual way,
937  * spinning on the vblank interrupt status bit, since we won't actually
938  * see an interrupt when the pipe is disabled.
939  *
940  * On Gen4 and above:
941  *   wait for the pipe register state bit to turn off
942  *
943  * Otherwise:
944  *   wait for the display line value to settle (it usually
945  *   ends up stopping at the start of the next frame).
946  *
947  */
948 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
949 {
950         struct drm_device *dev = crtc->base.dev;
951         struct drm_i915_private *dev_priv = dev->dev_private;
952         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
953         enum pipe pipe = crtc->pipe;
954
955         if (INTEL_INFO(dev)->gen >= 4) {
956                 int reg = PIPECONF(cpu_transcoder);
957
958                 /* Wait for the Pipe State to go off */
959                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
960                              100))
961                         WARN(1, "pipe_off wait timed out\n");
962         } else {
963                 /* Wait for the display line to settle */
964                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
965                         WARN(1, "pipe_off wait timed out\n");
966         }
967 }
968
969 /*
970  * ibx_digital_port_connected - is the specified port connected?
971  * @dev_priv: i915 private structure
972  * @port: the port to test
973  *
974  * Returns true if @port is connected, false otherwise.
975  */
976 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
977                                 struct intel_digital_port *port)
978 {
979         u32 bit;
980
981         if (HAS_PCH_IBX(dev_priv->dev)) {
982                 switch (port->port) {
983                 case PORT_B:
984                         bit = SDE_PORTB_HOTPLUG;
985                         break;
986                 case PORT_C:
987                         bit = SDE_PORTC_HOTPLUG;
988                         break;
989                 case PORT_D:
990                         bit = SDE_PORTD_HOTPLUG;
991                         break;
992                 default:
993                         return true;
994                 }
995         } else {
996                 switch (port->port) {
997                 case PORT_B:
998                         bit = SDE_PORTB_HOTPLUG_CPT;
999                         break;
1000                 case PORT_C:
1001                         bit = SDE_PORTC_HOTPLUG_CPT;
1002                         break;
1003                 case PORT_D:
1004                         bit = SDE_PORTD_HOTPLUG_CPT;
1005                         break;
1006                 default:
1007                         return true;
1008                 }
1009         }
1010
1011         return I915_READ(SDEISR) & bit;
1012 }
1013
1014 static const char *state_string(bool enabled)
1015 {
1016         return enabled ? "on" : "off";
1017 }
1018
1019 /* Only for pre-ILK configs */
1020 void assert_pll(struct drm_i915_private *dev_priv,
1021                 enum pipe pipe, bool state)
1022 {
1023         int reg;
1024         u32 val;
1025         bool cur_state;
1026
1027         reg = DPLL(pipe);
1028         val = I915_READ(reg);
1029         cur_state = !!(val & DPLL_VCO_ENABLE);
1030         I915_STATE_WARN(cur_state != state,
1031              "PLL state assertion failure (expected %s, current %s)\n",
1032              state_string(state), state_string(cur_state));
1033 }
1034
1035 /* XXX: the dsi pll is shared between MIPI DSI ports */
1036 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1037 {
1038         u32 val;
1039         bool cur_state;
1040
1041         mutex_lock(&dev_priv->dpio_lock);
1042         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1043         mutex_unlock(&dev_priv->dpio_lock);
1044
1045         cur_state = val & DSI_PLL_VCO_EN;
1046         I915_STATE_WARN(cur_state != state,
1047              "DSI PLL state assertion failure (expected %s, current %s)\n",
1048              state_string(state), state_string(cur_state));
1049 }
1050 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1051 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1052
1053 struct intel_shared_dpll *
1054 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1055 {
1056         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1057
1058         if (crtc->config->shared_dpll < 0)
1059                 return NULL;
1060
1061         return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1062 }
1063
1064 /* For ILK+ */
1065 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1066                         struct intel_shared_dpll *pll,
1067                         bool state)
1068 {
1069         bool cur_state;
1070         struct intel_dpll_hw_state hw_state;
1071
1072         if (WARN (!pll,
1073                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1074                 return;
1075
1076         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1077         I915_STATE_WARN(cur_state != state,
1078              "%s assertion failure (expected %s, current %s)\n",
1079              pll->name, state_string(state), state_string(cur_state));
1080 }
1081
1082 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1083                           enum pipe pipe, bool state)
1084 {
1085         int reg;
1086         u32 val;
1087         bool cur_state;
1088         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1089                                                                       pipe);
1090
1091         if (HAS_DDI(dev_priv->dev)) {
1092                 /* DDI does not have a specific FDI_TX register */
1093                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1094                 val = I915_READ(reg);
1095                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1096         } else {
1097                 reg = FDI_TX_CTL(pipe);
1098                 val = I915_READ(reg);
1099                 cur_state = !!(val & FDI_TX_ENABLE);
1100         }
1101         I915_STATE_WARN(cur_state != state,
1102              "FDI TX state assertion failure (expected %s, current %s)\n",
1103              state_string(state), state_string(cur_state));
1104 }
1105 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1106 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1107
1108 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1109                           enum pipe pipe, bool state)
1110 {
1111         int reg;
1112         u32 val;
1113         bool cur_state;
1114
1115         reg = FDI_RX_CTL(pipe);
1116         val = I915_READ(reg);
1117         cur_state = !!(val & FDI_RX_ENABLE);
1118         I915_STATE_WARN(cur_state != state,
1119              "FDI RX state assertion failure (expected %s, current %s)\n",
1120              state_string(state), state_string(cur_state));
1121 }
1122 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1123 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1124
1125 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1126                                       enum pipe pipe)
1127 {
1128         int reg;
1129         u32 val;
1130
1131         /* ILK FDI PLL is always enabled */
1132         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1133                 return;
1134
1135         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1136         if (HAS_DDI(dev_priv->dev))
1137                 return;
1138
1139         reg = FDI_TX_CTL(pipe);
1140         val = I915_READ(reg);
1141         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1142 }
1143
1144 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1145                        enum pipe pipe, bool state)
1146 {
1147         int reg;
1148         u32 val;
1149         bool cur_state;
1150
1151         reg = FDI_RX_CTL(pipe);
1152         val = I915_READ(reg);
1153         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1154         I915_STATE_WARN(cur_state != state,
1155              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1156              state_string(state), state_string(cur_state));
1157 }
1158
1159 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1160                            enum pipe pipe)
1161 {
1162         struct drm_device *dev = dev_priv->dev;
1163         int pp_reg;
1164         u32 val;
1165         enum pipe panel_pipe = PIPE_A;
1166         bool locked = true;
1167
1168         if (WARN_ON(HAS_DDI(dev)))
1169                 return;
1170
1171         if (HAS_PCH_SPLIT(dev)) {
1172                 u32 port_sel;
1173
1174                 pp_reg = PCH_PP_CONTROL;
1175                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1176
1177                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1178                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1179                         panel_pipe = PIPE_B;
1180                 /* XXX: else fix for eDP */
1181         } else if (IS_VALLEYVIEW(dev)) {
1182                 /* presumably write lock depends on pipe, not port select */
1183                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1184                 panel_pipe = pipe;
1185         } else {
1186                 pp_reg = PP_CONTROL;
1187                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1188                         panel_pipe = PIPE_B;
1189         }
1190
1191         val = I915_READ(pp_reg);
1192         if (!(val & PANEL_POWER_ON) ||
1193             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1194                 locked = false;
1195
1196         I915_STATE_WARN(panel_pipe == pipe && locked,
1197              "panel assertion failure, pipe %c regs locked\n",
1198              pipe_name(pipe));
1199 }
1200
1201 static void assert_cursor(struct drm_i915_private *dev_priv,
1202                           enum pipe pipe, bool state)
1203 {
1204         struct drm_device *dev = dev_priv->dev;
1205         bool cur_state;
1206
1207         if (IS_845G(dev) || IS_I865G(dev))
1208                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1209         else
1210                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1211
1212         I915_STATE_WARN(cur_state != state,
1213              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1214              pipe_name(pipe), state_string(state), state_string(cur_state));
1215 }
1216 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1217 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1218
1219 void assert_pipe(struct drm_i915_private *dev_priv,
1220                  enum pipe pipe, bool state)
1221 {
1222         int reg;
1223         u32 val;
1224         bool cur_state;
1225         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1226                                                                       pipe);
1227
1228         /* if we need the pipe quirk it must be always on */
1229         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1230             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1231                 state = true;
1232
1233         if (!intel_display_power_is_enabled(dev_priv,
1234                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1235                 cur_state = false;
1236         } else {
1237                 reg = PIPECONF(cpu_transcoder);
1238                 val = I915_READ(reg);
1239                 cur_state = !!(val & PIPECONF_ENABLE);
1240         }
1241
1242         I915_STATE_WARN(cur_state != state,
1243              "pipe %c assertion failure (expected %s, current %s)\n",
1244              pipe_name(pipe), state_string(state), state_string(cur_state));
1245 }
1246
1247 static void assert_plane(struct drm_i915_private *dev_priv,
1248                          enum plane plane, bool state)
1249 {
1250         int reg;
1251         u32 val;
1252         bool cur_state;
1253
1254         reg = DSPCNTR(plane);
1255         val = I915_READ(reg);
1256         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1257         I915_STATE_WARN(cur_state != state,
1258              "plane %c assertion failure (expected %s, current %s)\n",
1259              plane_name(plane), state_string(state), state_string(cur_state));
1260 }
1261
1262 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1263 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1264
1265 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1266                                    enum pipe pipe)
1267 {
1268         struct drm_device *dev = dev_priv->dev;
1269         int reg, i;
1270         u32 val;
1271         int cur_pipe;
1272
1273         /* Primary planes are fixed to pipes on gen4+ */
1274         if (INTEL_INFO(dev)->gen >= 4) {
1275                 reg = DSPCNTR(pipe);
1276                 val = I915_READ(reg);
1277                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1278                      "plane %c assertion failure, should be disabled but not\n",
1279                      plane_name(pipe));
1280                 return;
1281         }
1282
1283         /* Need to check both planes against the pipe */
1284         for_each_pipe(dev_priv, i) {
1285                 reg = DSPCNTR(i);
1286                 val = I915_READ(reg);
1287                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1288                         DISPPLANE_SEL_PIPE_SHIFT;
1289                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1290                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1291                      plane_name(i), pipe_name(pipe));
1292         }
1293 }
1294
1295 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1296                                     enum pipe pipe)
1297 {
1298         struct drm_device *dev = dev_priv->dev;
1299         int reg, sprite;
1300         u32 val;
1301
1302         if (INTEL_INFO(dev)->gen >= 9) {
1303                 for_each_sprite(pipe, sprite) {
1304                         val = I915_READ(PLANE_CTL(pipe, sprite));
1305                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1306                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1307                              sprite, pipe_name(pipe));
1308                 }
1309         } else if (IS_VALLEYVIEW(dev)) {
1310                 for_each_sprite(pipe, sprite) {
1311                         reg = SPCNTR(pipe, sprite);
1312                         val = I915_READ(reg);
1313                         I915_STATE_WARN(val & SP_ENABLE,
1314                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1315                              sprite_name(pipe, sprite), pipe_name(pipe));
1316                 }
1317         } else if (INTEL_INFO(dev)->gen >= 7) {
1318                 reg = SPRCTL(pipe);
1319                 val = I915_READ(reg);
1320                 I915_STATE_WARN(val & SPRITE_ENABLE,
1321                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1322                      plane_name(pipe), pipe_name(pipe));
1323         } else if (INTEL_INFO(dev)->gen >= 5) {
1324                 reg = DVSCNTR(pipe);
1325                 val = I915_READ(reg);
1326                 I915_STATE_WARN(val & DVS_ENABLE,
1327                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1328                      plane_name(pipe), pipe_name(pipe));
1329         }
1330 }
1331
1332 static void assert_vblank_disabled(struct drm_crtc *crtc)
1333 {
1334         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1335                 drm_crtc_vblank_put(crtc);
1336 }
1337
1338 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1339 {
1340         u32 val;
1341         bool enabled;
1342
1343         I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1344
1345         val = I915_READ(PCH_DREF_CONTROL);
1346         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1347                             DREF_SUPERSPREAD_SOURCE_MASK));
1348         I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1349 }
1350
1351 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1352                                            enum pipe pipe)
1353 {
1354         int reg;
1355         u32 val;
1356         bool enabled;
1357
1358         reg = PCH_TRANSCONF(pipe);
1359         val = I915_READ(reg);
1360         enabled = !!(val & TRANS_ENABLE);
1361         I915_STATE_WARN(enabled,
1362              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1363              pipe_name(pipe));
1364 }
1365
1366 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1367                             enum pipe pipe, u32 port_sel, u32 val)
1368 {
1369         if ((val & DP_PORT_EN) == 0)
1370                 return false;
1371
1372         if (HAS_PCH_CPT(dev_priv->dev)) {
1373                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1374                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1375                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1376                         return false;
1377         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1378                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1379                         return false;
1380         } else {
1381                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1382                         return false;
1383         }
1384         return true;
1385 }
1386
1387 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1388                               enum pipe pipe, u32 val)
1389 {
1390         if ((val & SDVO_ENABLE) == 0)
1391                 return false;
1392
1393         if (HAS_PCH_CPT(dev_priv->dev)) {
1394                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1395                         return false;
1396         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1397                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1398                         return false;
1399         } else {
1400                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1401                         return false;
1402         }
1403         return true;
1404 }
1405
1406 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1407                               enum pipe pipe, u32 val)
1408 {
1409         if ((val & LVDS_PORT_EN) == 0)
1410                 return false;
1411
1412         if (HAS_PCH_CPT(dev_priv->dev)) {
1413                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1414                         return false;
1415         } else {
1416                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1417                         return false;
1418         }
1419         return true;
1420 }
1421
1422 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1423                               enum pipe pipe, u32 val)
1424 {
1425         if ((val & ADPA_DAC_ENABLE) == 0)
1426                 return false;
1427         if (HAS_PCH_CPT(dev_priv->dev)) {
1428                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1429                         return false;
1430         } else {
1431                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1432                         return false;
1433         }
1434         return true;
1435 }
1436
1437 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1438                                    enum pipe pipe, int reg, u32 port_sel)
1439 {
1440         u32 val = I915_READ(reg);
1441         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1442              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1443              reg, pipe_name(pipe));
1444
1445         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1446              && (val & DP_PIPEB_SELECT),
1447              "IBX PCH dp port still using transcoder B\n");
1448 }
1449
1450 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1451                                      enum pipe pipe, int reg)
1452 {
1453         u32 val = I915_READ(reg);
1454         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1455              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1456              reg, pipe_name(pipe));
1457
1458         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1459              && (val & SDVO_PIPE_B_SELECT),
1460              "IBX PCH hdmi port still using transcoder B\n");
1461 }
1462
1463 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1464                                       enum pipe pipe)
1465 {
1466         int reg;
1467         u32 val;
1468
1469         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1470         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1471         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1472
1473         reg = PCH_ADPA;
1474         val = I915_READ(reg);
1475         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1476              "PCH VGA enabled on transcoder %c, should be disabled\n",
1477              pipe_name(pipe));
1478
1479         reg = PCH_LVDS;
1480         val = I915_READ(reg);
1481         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1482              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1483              pipe_name(pipe));
1484
1485         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1486         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1487         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1488 }
1489
1490 static void intel_init_dpio(struct drm_device *dev)
1491 {
1492         struct drm_i915_private *dev_priv = dev->dev_private;
1493
1494         if (!IS_VALLEYVIEW(dev))
1495                 return;
1496
1497         /*
1498          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1499          * CHV x1 PHY (DP/HDMI D)
1500          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1501          */
1502         if (IS_CHERRYVIEW(dev)) {
1503                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1504                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1505         } else {
1506                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1507         }
1508 }
1509
1510 static void vlv_enable_pll(struct intel_crtc *crtc,
1511                            const struct intel_crtc_state *pipe_config)
1512 {
1513         struct drm_device *dev = crtc->base.dev;
1514         struct drm_i915_private *dev_priv = dev->dev_private;
1515         int reg = DPLL(crtc->pipe);
1516         u32 dpll = pipe_config->dpll_hw_state.dpll;
1517
1518         assert_pipe_disabled(dev_priv, crtc->pipe);
1519
1520         /* No really, not for ILK+ */
1521         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1522
1523         /* PLL is protected by panel, make sure we can write it */
1524         if (IS_MOBILE(dev_priv->dev))
1525                 assert_panel_unlocked(dev_priv, crtc->pipe);
1526
1527         I915_WRITE(reg, dpll);
1528         POSTING_READ(reg);
1529         udelay(150);
1530
1531         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1532                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1533
1534         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1535         POSTING_READ(DPLL_MD(crtc->pipe));
1536
1537         /* We do this three times for luck */
1538         I915_WRITE(reg, dpll);
1539         POSTING_READ(reg);
1540         udelay(150); /* wait for warmup */
1541         I915_WRITE(reg, dpll);
1542         POSTING_READ(reg);
1543         udelay(150); /* wait for warmup */
1544         I915_WRITE(reg, dpll);
1545         POSTING_READ(reg);
1546         udelay(150); /* wait for warmup */
1547 }
1548
1549 static void chv_enable_pll(struct intel_crtc *crtc,
1550                            const struct intel_crtc_state *pipe_config)
1551 {
1552         struct drm_device *dev = crtc->base.dev;
1553         struct drm_i915_private *dev_priv = dev->dev_private;
1554         int pipe = crtc->pipe;
1555         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1556         u32 tmp;
1557
1558         assert_pipe_disabled(dev_priv, crtc->pipe);
1559
1560         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1561
1562         mutex_lock(&dev_priv->dpio_lock);
1563
1564         /* Enable back the 10bit clock to display controller */
1565         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1566         tmp |= DPIO_DCLKP_EN;
1567         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1568
1569         /*
1570          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1571          */
1572         udelay(1);
1573
1574         /* Enable PLL */
1575         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1576
1577         /* Check PLL is locked */
1578         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1579                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1580
1581         /* not sure when this should be written */
1582         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1583         POSTING_READ(DPLL_MD(pipe));
1584
1585         mutex_unlock(&dev_priv->dpio_lock);
1586 }
1587
1588 static int intel_num_dvo_pipes(struct drm_device *dev)
1589 {
1590         struct intel_crtc *crtc;
1591         int count = 0;
1592
1593         for_each_intel_crtc(dev, crtc)
1594                 count += crtc->active &&
1595                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1596
1597         return count;
1598 }
1599
1600 static void i9xx_enable_pll(struct intel_crtc *crtc)
1601 {
1602         struct drm_device *dev = crtc->base.dev;
1603         struct drm_i915_private *dev_priv = dev->dev_private;
1604         int reg = DPLL(crtc->pipe);
1605         u32 dpll = crtc->config->dpll_hw_state.dpll;
1606
1607         assert_pipe_disabled(dev_priv, crtc->pipe);
1608
1609         /* No really, not for ILK+ */
1610         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1611
1612         /* PLL is protected by panel, make sure we can write it */
1613         if (IS_MOBILE(dev) && !IS_I830(dev))
1614                 assert_panel_unlocked(dev_priv, crtc->pipe);
1615
1616         /* Enable DVO 2x clock on both PLLs if necessary */
1617         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1618                 /*
1619                  * It appears to be important that we don't enable this
1620                  * for the current pipe before otherwise configuring the
1621                  * PLL. No idea how this should be handled if multiple
1622                  * DVO outputs are enabled simultaneosly.
1623                  */
1624                 dpll |= DPLL_DVO_2X_MODE;
1625                 I915_WRITE(DPLL(!crtc->pipe),
1626                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1627         }
1628
1629         /* Wait for the clocks to stabilize. */
1630         POSTING_READ(reg);
1631         udelay(150);
1632
1633         if (INTEL_INFO(dev)->gen >= 4) {
1634                 I915_WRITE(DPLL_MD(crtc->pipe),
1635                            crtc->config->dpll_hw_state.dpll_md);
1636         } else {
1637                 /* The pixel multiplier can only be updated once the
1638                  * DPLL is enabled and the clocks are stable.
1639                  *
1640                  * So write it again.
1641                  */
1642                 I915_WRITE(reg, dpll);
1643         }
1644
1645         /* We do this three times for luck */
1646         I915_WRITE(reg, dpll);
1647         POSTING_READ(reg);
1648         udelay(150); /* wait for warmup */
1649         I915_WRITE(reg, dpll);
1650         POSTING_READ(reg);
1651         udelay(150); /* wait for warmup */
1652         I915_WRITE(reg, dpll);
1653         POSTING_READ(reg);
1654         udelay(150); /* wait for warmup */
1655 }
1656
1657 /**
1658  * i9xx_disable_pll - disable a PLL
1659  * @dev_priv: i915 private structure
1660  * @pipe: pipe PLL to disable
1661  *
1662  * Disable the PLL for @pipe, making sure the pipe is off first.
1663  *
1664  * Note!  This is for pre-ILK only.
1665  */
1666 static void i9xx_disable_pll(struct intel_crtc *crtc)
1667 {
1668         struct drm_device *dev = crtc->base.dev;
1669         struct drm_i915_private *dev_priv = dev->dev_private;
1670         enum pipe pipe = crtc->pipe;
1671
1672         /* Disable DVO 2x clock on both PLLs if necessary */
1673         if (IS_I830(dev) &&
1674             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1675             intel_num_dvo_pipes(dev) == 1) {
1676                 I915_WRITE(DPLL(PIPE_B),
1677                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1678                 I915_WRITE(DPLL(PIPE_A),
1679                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1680         }
1681
1682         /* Don't disable pipe or pipe PLLs if needed */
1683         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1684             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1685                 return;
1686
1687         /* Make sure the pipe isn't still relying on us */
1688         assert_pipe_disabled(dev_priv, pipe);
1689
1690         I915_WRITE(DPLL(pipe), 0);
1691         POSTING_READ(DPLL(pipe));
1692 }
1693
1694 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1695 {
1696         u32 val = 0;
1697
1698         /* Make sure the pipe isn't still relying on us */
1699         assert_pipe_disabled(dev_priv, pipe);
1700
1701         /*
1702          * Leave integrated clock source and reference clock enabled for pipe B.
1703          * The latter is needed for VGA hotplug / manual detection.
1704          */
1705         if (pipe == PIPE_B)
1706                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1707         I915_WRITE(DPLL(pipe), val);
1708         POSTING_READ(DPLL(pipe));
1709
1710 }
1711
1712 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1713 {
1714         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1715         u32 val;
1716
1717         /* Make sure the pipe isn't still relying on us */
1718         assert_pipe_disabled(dev_priv, pipe);
1719
1720         /* Set PLL en = 0 */
1721         val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1722         if (pipe != PIPE_A)
1723                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1724         I915_WRITE(DPLL(pipe), val);
1725         POSTING_READ(DPLL(pipe));
1726
1727         mutex_lock(&dev_priv->dpio_lock);
1728
1729         /* Disable 10bit clock to display controller */
1730         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1731         val &= ~DPIO_DCLKP_EN;
1732         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1733
1734         /* disable left/right clock distribution */
1735         if (pipe != PIPE_B) {
1736                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1737                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1738                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1739         } else {
1740                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1741                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1742                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1743         }
1744
1745         mutex_unlock(&dev_priv->dpio_lock);
1746 }
1747
1748 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1749                 struct intel_digital_port *dport)
1750 {
1751         u32 port_mask;
1752         int dpll_reg;
1753
1754         switch (dport->port) {
1755         case PORT_B:
1756                 port_mask = DPLL_PORTB_READY_MASK;
1757                 dpll_reg = DPLL(0);
1758                 break;
1759         case PORT_C:
1760                 port_mask = DPLL_PORTC_READY_MASK;
1761                 dpll_reg = DPLL(0);
1762                 break;
1763         case PORT_D:
1764                 port_mask = DPLL_PORTD_READY_MASK;
1765                 dpll_reg = DPIO_PHY_STATUS;
1766                 break;
1767         default:
1768                 BUG();
1769         }
1770
1771         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1772                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1773                      port_name(dport->port), I915_READ(dpll_reg));
1774 }
1775
1776 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1777 {
1778         struct drm_device *dev = crtc->base.dev;
1779         struct drm_i915_private *dev_priv = dev->dev_private;
1780         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1781
1782         if (WARN_ON(pll == NULL))
1783                 return;
1784
1785         WARN_ON(!pll->config.crtc_mask);
1786         if (pll->active == 0) {
1787                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1788                 WARN_ON(pll->on);
1789                 assert_shared_dpll_disabled(dev_priv, pll);
1790
1791                 pll->mode_set(dev_priv, pll);
1792         }
1793 }
1794
1795 /**
1796  * intel_enable_shared_dpll - enable PCH PLL
1797  * @dev_priv: i915 private structure
1798  * @pipe: pipe PLL to enable
1799  *
1800  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1801  * drives the transcoder clock.
1802  */
1803 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1804 {
1805         struct drm_device *dev = crtc->base.dev;
1806         struct drm_i915_private *dev_priv = dev->dev_private;
1807         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1808
1809         if (WARN_ON(pll == NULL))
1810                 return;
1811
1812         if (WARN_ON(pll->config.crtc_mask == 0))
1813                 return;
1814
1815         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1816                       pll->name, pll->active, pll->on,
1817                       crtc->base.base.id);
1818
1819         if (pll->active++) {
1820                 WARN_ON(!pll->on);
1821                 assert_shared_dpll_enabled(dev_priv, pll);
1822                 return;
1823         }
1824         WARN_ON(pll->on);
1825
1826         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1827
1828         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1829         pll->enable(dev_priv, pll);
1830         pll->on = true;
1831 }
1832
1833 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1834 {
1835         struct drm_device *dev = crtc->base.dev;
1836         struct drm_i915_private *dev_priv = dev->dev_private;
1837         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1838
1839         /* PCH only available on ILK+ */
1840         BUG_ON(INTEL_INFO(dev)->gen < 5);
1841         if (WARN_ON(pll == NULL))
1842                return;
1843
1844         if (WARN_ON(pll->config.crtc_mask == 0))
1845                 return;
1846
1847         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1848                       pll->name, pll->active, pll->on,
1849                       crtc->base.base.id);
1850
1851         if (WARN_ON(pll->active == 0)) {
1852                 assert_shared_dpll_disabled(dev_priv, pll);
1853                 return;
1854         }
1855
1856         assert_shared_dpll_enabled(dev_priv, pll);
1857         WARN_ON(!pll->on);
1858         if (--pll->active)
1859                 return;
1860
1861         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1862         pll->disable(dev_priv, pll);
1863         pll->on = false;
1864
1865         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1866 }
1867
1868 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1869                                            enum pipe pipe)
1870 {
1871         struct drm_device *dev = dev_priv->dev;
1872         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1873         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1874         uint32_t reg, val, pipeconf_val;
1875
1876         /* PCH only available on ILK+ */
1877         BUG_ON(!HAS_PCH_SPLIT(dev));
1878
1879         /* Make sure PCH DPLL is enabled */
1880         assert_shared_dpll_enabled(dev_priv,
1881                                    intel_crtc_to_shared_dpll(intel_crtc));
1882
1883         /* FDI must be feeding us bits for PCH ports */
1884         assert_fdi_tx_enabled(dev_priv, pipe);
1885         assert_fdi_rx_enabled(dev_priv, pipe);
1886
1887         if (HAS_PCH_CPT(dev)) {
1888                 /* Workaround: Set the timing override bit before enabling the
1889                  * pch transcoder. */
1890                 reg = TRANS_CHICKEN2(pipe);
1891                 val = I915_READ(reg);
1892                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1893                 I915_WRITE(reg, val);
1894         }
1895
1896         reg = PCH_TRANSCONF(pipe);
1897         val = I915_READ(reg);
1898         pipeconf_val = I915_READ(PIPECONF(pipe));
1899
1900         if (HAS_PCH_IBX(dev_priv->dev)) {
1901                 /*
1902                  * make the BPC in transcoder be consistent with
1903                  * that in pipeconf reg.
1904                  */
1905                 val &= ~PIPECONF_BPC_MASK;
1906                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1907         }
1908
1909         val &= ~TRANS_INTERLACE_MASK;
1910         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1911                 if (HAS_PCH_IBX(dev_priv->dev) &&
1912                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1913                         val |= TRANS_LEGACY_INTERLACED_ILK;
1914                 else
1915                         val |= TRANS_INTERLACED;
1916         else
1917                 val |= TRANS_PROGRESSIVE;
1918
1919         I915_WRITE(reg, val | TRANS_ENABLE);
1920         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1921                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1922 }
1923
1924 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1925                                       enum transcoder cpu_transcoder)
1926 {
1927         u32 val, pipeconf_val;
1928
1929         /* PCH only available on ILK+ */
1930         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1931
1932         /* FDI must be feeding us bits for PCH ports */
1933         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1934         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1935
1936         /* Workaround: set timing override bit. */
1937         val = I915_READ(_TRANSA_CHICKEN2);
1938         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1939         I915_WRITE(_TRANSA_CHICKEN2, val);
1940
1941         val = TRANS_ENABLE;
1942         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1943
1944         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1945             PIPECONF_INTERLACED_ILK)
1946                 val |= TRANS_INTERLACED;
1947         else
1948                 val |= TRANS_PROGRESSIVE;
1949
1950         I915_WRITE(LPT_TRANSCONF, val);
1951         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1952                 DRM_ERROR("Failed to enable PCH transcoder\n");
1953 }
1954
1955 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1956                                             enum pipe pipe)
1957 {
1958         struct drm_device *dev = dev_priv->dev;
1959         uint32_t reg, val;
1960
1961         /* FDI relies on the transcoder */
1962         assert_fdi_tx_disabled(dev_priv, pipe);
1963         assert_fdi_rx_disabled(dev_priv, pipe);
1964
1965         /* Ports must be off as well */
1966         assert_pch_ports_disabled(dev_priv, pipe);
1967
1968         reg = PCH_TRANSCONF(pipe);
1969         val = I915_READ(reg);
1970         val &= ~TRANS_ENABLE;
1971         I915_WRITE(reg, val);
1972         /* wait for PCH transcoder off, transcoder state */
1973         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1974                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1975
1976         if (!HAS_PCH_IBX(dev)) {
1977                 /* Workaround: Clear the timing override chicken bit again. */
1978                 reg = TRANS_CHICKEN2(pipe);
1979                 val = I915_READ(reg);
1980                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1981                 I915_WRITE(reg, val);
1982         }
1983 }
1984
1985 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1986 {
1987         u32 val;
1988
1989         val = I915_READ(LPT_TRANSCONF);
1990         val &= ~TRANS_ENABLE;
1991         I915_WRITE(LPT_TRANSCONF, val);
1992         /* wait for PCH transcoder off, transcoder state */
1993         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1994                 DRM_ERROR("Failed to disable PCH transcoder\n");
1995
1996         /* Workaround: clear timing override bit. */
1997         val = I915_READ(_TRANSA_CHICKEN2);
1998         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1999         I915_WRITE(_TRANSA_CHICKEN2, val);
2000 }
2001
2002 /**
2003  * intel_enable_pipe - enable a pipe, asserting requirements
2004  * @crtc: crtc responsible for the pipe
2005  *
2006  * Enable @crtc's pipe, making sure that various hardware specific requirements
2007  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2008  */
2009 static void intel_enable_pipe(struct intel_crtc *crtc)
2010 {
2011         struct drm_device *dev = crtc->base.dev;
2012         struct drm_i915_private *dev_priv = dev->dev_private;
2013         enum pipe pipe = crtc->pipe;
2014         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2015                                                                       pipe);
2016         enum pipe pch_transcoder;
2017         int reg;
2018         u32 val;
2019
2020         assert_planes_disabled(dev_priv, pipe);
2021         assert_cursor_disabled(dev_priv, pipe);
2022         assert_sprites_disabled(dev_priv, pipe);
2023
2024         if (HAS_PCH_LPT(dev_priv->dev))
2025                 pch_transcoder = TRANSCODER_A;
2026         else
2027                 pch_transcoder = pipe;
2028
2029         /*
2030          * A pipe without a PLL won't actually be able to drive bits from
2031          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2032          * need the check.
2033          */
2034         if (!HAS_PCH_SPLIT(dev_priv->dev))
2035                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2036                         assert_dsi_pll_enabled(dev_priv);
2037                 else
2038                         assert_pll_enabled(dev_priv, pipe);
2039         else {
2040                 if (crtc->config->has_pch_encoder) {
2041                         /* if driving the PCH, we need FDI enabled */
2042                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2043                         assert_fdi_tx_pll_enabled(dev_priv,
2044                                                   (enum pipe) cpu_transcoder);
2045                 }
2046                 /* FIXME: assert CPU port conditions for SNB+ */
2047         }
2048
2049         reg = PIPECONF(cpu_transcoder);
2050         val = I915_READ(reg);
2051         if (val & PIPECONF_ENABLE) {
2052                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2053                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2054                 return;
2055         }
2056
2057         I915_WRITE(reg, val | PIPECONF_ENABLE);
2058         POSTING_READ(reg);
2059 }
2060
2061 /**
2062  * intel_disable_pipe - disable a pipe, asserting requirements
2063  * @crtc: crtc whose pipes is to be disabled
2064  *
2065  * Disable the pipe of @crtc, making sure that various hardware
2066  * specific requirements are met, if applicable, e.g. plane
2067  * disabled, panel fitter off, etc.
2068  *
2069  * Will wait until the pipe has shut down before returning.
2070  */
2071 static void intel_disable_pipe(struct intel_crtc *crtc)
2072 {
2073         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2074         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2075         enum pipe pipe = crtc->pipe;
2076         int reg;
2077         u32 val;
2078
2079         /*
2080          * Make sure planes won't keep trying to pump pixels to us,
2081          * or we might hang the display.
2082          */
2083         assert_planes_disabled(dev_priv, pipe);
2084         assert_cursor_disabled(dev_priv, pipe);
2085         assert_sprites_disabled(dev_priv, pipe);
2086
2087         reg = PIPECONF(cpu_transcoder);
2088         val = I915_READ(reg);
2089         if ((val & PIPECONF_ENABLE) == 0)
2090                 return;
2091
2092         /*
2093          * Double wide has implications for planes
2094          * so best keep it disabled when not needed.
2095          */
2096         if (crtc->config->double_wide)
2097                 val &= ~PIPECONF_DOUBLE_WIDE;
2098
2099         /* Don't disable pipe or pipe PLLs if needed */
2100         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2101             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2102                 val &= ~PIPECONF_ENABLE;
2103
2104         I915_WRITE(reg, val);
2105         if ((val & PIPECONF_ENABLE) == 0)
2106                 intel_wait_for_pipe_off(crtc);
2107 }
2108
2109 /*
2110  * Plane regs are double buffered, going from enabled->disabled needs a
2111  * trigger in order to latch.  The display address reg provides this.
2112  */
2113 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2114                                enum plane plane)
2115 {
2116         struct drm_device *dev = dev_priv->dev;
2117         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2118
2119         I915_WRITE(reg, I915_READ(reg));
2120         POSTING_READ(reg);
2121 }
2122
2123 /**
2124  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2125  * @plane:  plane to be enabled
2126  * @crtc: crtc for the plane
2127  *
2128  * Enable @plane on @crtc, making sure that the pipe is running first.
2129  */
2130 static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2131                                           struct drm_crtc *crtc)
2132 {
2133         struct drm_device *dev = plane->dev;
2134         struct drm_i915_private *dev_priv = dev->dev_private;
2135         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2136
2137         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2138         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2139
2140         if (intel_crtc->primary_enabled)
2141                 return;
2142
2143         intel_crtc->primary_enabled = true;
2144
2145         dev_priv->display.update_primary_plane(crtc, plane->fb,
2146                                                crtc->x, crtc->y);
2147
2148         /*
2149          * BDW signals flip done immediately if the plane
2150          * is disabled, even if the plane enable is already
2151          * armed to occur at the next vblank :(
2152          */
2153         if (IS_BROADWELL(dev))
2154                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2155 }
2156
2157 /**
2158  * intel_disable_primary_hw_plane - disable the primary hardware plane
2159  * @plane: plane to be disabled
2160  * @crtc: crtc for the plane
2161  *
2162  * Disable @plane on @crtc, making sure that the pipe is running first.
2163  */
2164 static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2165                                            struct drm_crtc *crtc)
2166 {
2167         struct drm_device *dev = plane->dev;
2168         struct drm_i915_private *dev_priv = dev->dev_private;
2169         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2170
2171         if (WARN_ON(!intel_crtc->active))
2172                 return;
2173
2174         if (!intel_crtc->primary_enabled)
2175                 return;
2176
2177         intel_crtc->primary_enabled = false;
2178
2179         dev_priv->display.update_primary_plane(crtc, plane->fb,
2180                                                crtc->x, crtc->y);
2181 }
2182
2183 static bool need_vtd_wa(struct drm_device *dev)
2184 {
2185 #ifdef CONFIG_INTEL_IOMMU
2186         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2187                 return true;
2188 #endif
2189         return false;
2190 }
2191
2192 int
2193 intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
2194 {
2195         int tile_height;
2196
2197         tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
2198         return ALIGN(height, tile_height);
2199 }
2200
2201 int
2202 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2203                            struct drm_framebuffer *fb,
2204                            struct intel_engine_cs *pipelined)
2205 {
2206         struct drm_device *dev = fb->dev;
2207         struct drm_i915_private *dev_priv = dev->dev_private;
2208         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2209         u32 alignment;
2210         int ret;
2211
2212         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2213
2214         switch (obj->tiling_mode) {
2215         case I915_TILING_NONE:
2216                 if (INTEL_INFO(dev)->gen >= 9)
2217                         alignment = 256 * 1024;
2218                 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2219                         alignment = 128 * 1024;
2220                 else if (INTEL_INFO(dev)->gen >= 4)
2221                         alignment = 4 * 1024;
2222                 else
2223                         alignment = 64 * 1024;
2224                 break;
2225         case I915_TILING_X:
2226                 if (INTEL_INFO(dev)->gen >= 9)
2227                         alignment = 256 * 1024;
2228                 else {
2229                         /* pin() will align the object as required by fence */
2230                         alignment = 0;
2231                 }
2232                 break;
2233         case I915_TILING_Y:
2234                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2235                 return -EINVAL;
2236         default:
2237                 BUG();
2238         }
2239
2240         /* Note that the w/a also requires 64 PTE of padding following the
2241          * bo. We currently fill all unused PTE with the shadow page and so
2242          * we should always have valid PTE following the scanout preventing
2243          * the VT-d warning.
2244          */
2245         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2246                 alignment = 256 * 1024;
2247
2248         /*
2249          * Global gtt pte registers are special registers which actually forward
2250          * writes to a chunk of system memory. Which means that there is no risk
2251          * that the register values disappear as soon as we call
2252          * intel_runtime_pm_put(), so it is correct to wrap only the
2253          * pin/unpin/fence and not more.
2254          */
2255         intel_runtime_pm_get(dev_priv);
2256
2257         dev_priv->mm.interruptible = false;
2258         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2259         if (ret)
2260                 goto err_interruptible;
2261
2262         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2263          * fence, whereas 965+ only requires a fence if using
2264          * framebuffer compression.  For simplicity, we always install
2265          * a fence as the cost is not that onerous.
2266          */
2267         ret = i915_gem_object_get_fence(obj);
2268         if (ret)
2269                 goto err_unpin;
2270
2271         i915_gem_object_pin_fence(obj);
2272
2273         dev_priv->mm.interruptible = true;
2274         intel_runtime_pm_put(dev_priv);
2275         return 0;
2276
2277 err_unpin:
2278         i915_gem_object_unpin_from_display_plane(obj);
2279 err_interruptible:
2280         dev_priv->mm.interruptible = true;
2281         intel_runtime_pm_put(dev_priv);
2282         return ret;
2283 }
2284
2285 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2286 {
2287         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2288
2289         i915_gem_object_unpin_fence(obj);
2290         i915_gem_object_unpin_from_display_plane(obj);
2291 }
2292
2293 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2294  * is assumed to be a power-of-two. */
2295 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2296                                              unsigned int tiling_mode,
2297                                              unsigned int cpp,
2298                                              unsigned int pitch)
2299 {
2300         if (tiling_mode != I915_TILING_NONE) {
2301                 unsigned int tile_rows, tiles;
2302
2303                 tile_rows = *y / 8;
2304                 *y %= 8;
2305
2306                 tiles = *x / (512/cpp);
2307                 *x %= 512/cpp;
2308
2309                 return tile_rows * pitch * 8 + tiles * 4096;
2310         } else {
2311                 unsigned int offset;
2312
2313                 offset = *y * pitch + *x * cpp;
2314                 *y = 0;
2315                 *x = (offset & 4095) / cpp;
2316                 return offset & -4096;
2317         }
2318 }
2319
2320 static int i9xx_format_to_fourcc(int format)
2321 {
2322         switch (format) {
2323         case DISPPLANE_8BPP:
2324                 return DRM_FORMAT_C8;
2325         case DISPPLANE_BGRX555:
2326                 return DRM_FORMAT_XRGB1555;
2327         case DISPPLANE_BGRX565:
2328                 return DRM_FORMAT_RGB565;
2329         default:
2330         case DISPPLANE_BGRX888:
2331                 return DRM_FORMAT_XRGB8888;
2332         case DISPPLANE_RGBX888:
2333                 return DRM_FORMAT_XBGR8888;
2334         case DISPPLANE_BGRX101010:
2335                 return DRM_FORMAT_XRGB2101010;
2336         case DISPPLANE_RGBX101010:
2337                 return DRM_FORMAT_XBGR2101010;
2338         }
2339 }
2340
2341 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2342 {
2343         switch (format) {
2344         case PLANE_CTL_FORMAT_RGB_565:
2345                 return DRM_FORMAT_RGB565;
2346         default:
2347         case PLANE_CTL_FORMAT_XRGB_8888:
2348                 if (rgb_order) {
2349                         if (alpha)
2350                                 return DRM_FORMAT_ABGR8888;
2351                         else
2352                                 return DRM_FORMAT_XBGR8888;
2353                 } else {
2354                         if (alpha)
2355                                 return DRM_FORMAT_ARGB8888;
2356                         else
2357                                 return DRM_FORMAT_XRGB8888;
2358                 }
2359         case PLANE_CTL_FORMAT_XRGB_2101010:
2360                 if (rgb_order)
2361                         return DRM_FORMAT_XBGR2101010;
2362                 else
2363                         return DRM_FORMAT_XRGB2101010;
2364         }
2365 }
2366
2367 static bool
2368 intel_alloc_plane_obj(struct intel_crtc *crtc,
2369                       struct intel_initial_plane_config *plane_config)
2370 {
2371         struct drm_device *dev = crtc->base.dev;
2372         struct drm_i915_gem_object *obj = NULL;
2373         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2374         u32 base = plane_config->base;
2375
2376         if (plane_config->size == 0)
2377                 return false;
2378
2379         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2380                                                              plane_config->size);
2381         if (!obj)
2382                 return false;
2383
2384         obj->tiling_mode = plane_config->tiling;
2385         if (obj->tiling_mode == I915_TILING_X)
2386                 obj->stride = crtc->base.primary->fb->pitches[0];
2387
2388         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2389         mode_cmd.width = crtc->base.primary->fb->width;
2390         mode_cmd.height = crtc->base.primary->fb->height;
2391         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2392
2393         mutex_lock(&dev->struct_mutex);
2394
2395         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2396                                    &mode_cmd, obj)) {
2397                 DRM_DEBUG_KMS("intel fb init failed\n");
2398                 goto out_unref_obj;
2399         }
2400
2401         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2402         mutex_unlock(&dev->struct_mutex);
2403
2404         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2405         return true;
2406
2407 out_unref_obj:
2408         drm_gem_object_unreference(&obj->base);
2409         mutex_unlock(&dev->struct_mutex);
2410         return false;
2411 }
2412
2413 static void
2414 intel_find_plane_obj(struct intel_crtc *intel_crtc,
2415                      struct intel_initial_plane_config *plane_config)
2416 {
2417         struct drm_device *dev = intel_crtc->base.dev;
2418         struct drm_i915_private *dev_priv = dev->dev_private;
2419         struct drm_crtc *c;
2420         struct intel_crtc *i;
2421         struct drm_i915_gem_object *obj;
2422
2423         if (!intel_crtc->base.primary->fb)
2424                 return;
2425
2426         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2427                 return;
2428
2429         kfree(intel_crtc->base.primary->fb);
2430         intel_crtc->base.primary->fb = NULL;
2431
2432         /*
2433          * Failed to alloc the obj, check to see if we should share
2434          * an fb with another CRTC instead
2435          */
2436         for_each_crtc(dev, c) {
2437                 i = to_intel_crtc(c);
2438
2439                 if (c == &intel_crtc->base)
2440                         continue;
2441
2442                 if (!i->active)
2443                         continue;
2444
2445                 obj = intel_fb_obj(c->primary->fb);
2446                 if (obj == NULL)
2447                         continue;
2448
2449                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2450                         if (obj->tiling_mode != I915_TILING_NONE)
2451                                 dev_priv->preserve_bios_swizzle = true;
2452
2453                         drm_framebuffer_reference(c->primary->fb);
2454                         intel_crtc->base.primary->fb = c->primary->fb;
2455                         obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2456                         break;
2457                 }
2458         }
2459 }
2460
2461 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2462                                       struct drm_framebuffer *fb,
2463                                       int x, int y)
2464 {
2465         struct drm_device *dev = crtc->dev;
2466         struct drm_i915_private *dev_priv = dev->dev_private;
2467         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2468         struct drm_i915_gem_object *obj;
2469         int plane = intel_crtc->plane;
2470         unsigned long linear_offset;
2471         u32 dspcntr;
2472         u32 reg = DSPCNTR(plane);
2473         int pixel_size;
2474
2475         if (!intel_crtc->primary_enabled) {
2476                 I915_WRITE(reg, 0);
2477                 if (INTEL_INFO(dev)->gen >= 4)
2478                         I915_WRITE(DSPSURF(plane), 0);
2479                 else
2480                         I915_WRITE(DSPADDR(plane), 0);
2481                 POSTING_READ(reg);
2482                 return;
2483         }
2484
2485         obj = intel_fb_obj(fb);
2486         if (WARN_ON(obj == NULL))
2487                 return;
2488
2489         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2490
2491         dspcntr = DISPPLANE_GAMMA_ENABLE;
2492
2493         dspcntr |= DISPLAY_PLANE_ENABLE;
2494
2495         if (INTEL_INFO(dev)->gen < 4) {
2496                 if (intel_crtc->pipe == PIPE_B)
2497                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2498
2499                 /* pipesrc and dspsize control the size that is scaled from,
2500                  * which should always be the user's requested size.
2501                  */
2502                 I915_WRITE(DSPSIZE(plane),
2503                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2504                            (intel_crtc->config->pipe_src_w - 1));
2505                 I915_WRITE(DSPPOS(plane), 0);
2506         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2507                 I915_WRITE(PRIMSIZE(plane),
2508                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2509                            (intel_crtc->config->pipe_src_w - 1));
2510                 I915_WRITE(PRIMPOS(plane), 0);
2511                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2512         }
2513
2514         switch (fb->pixel_format) {
2515         case DRM_FORMAT_C8:
2516                 dspcntr |= DISPPLANE_8BPP;
2517                 break;
2518         case DRM_FORMAT_XRGB1555:
2519         case DRM_FORMAT_ARGB1555:
2520                 dspcntr |= DISPPLANE_BGRX555;
2521                 break;
2522         case DRM_FORMAT_RGB565:
2523                 dspcntr |= DISPPLANE_BGRX565;
2524                 break;
2525         case DRM_FORMAT_XRGB8888:
2526         case DRM_FORMAT_ARGB8888:
2527                 dspcntr |= DISPPLANE_BGRX888;
2528                 break;
2529         case DRM_FORMAT_XBGR8888:
2530         case DRM_FORMAT_ABGR8888:
2531                 dspcntr |= DISPPLANE_RGBX888;
2532                 break;
2533         case DRM_FORMAT_XRGB2101010:
2534         case DRM_FORMAT_ARGB2101010:
2535                 dspcntr |= DISPPLANE_BGRX101010;
2536                 break;
2537         case DRM_FORMAT_XBGR2101010:
2538         case DRM_FORMAT_ABGR2101010:
2539                 dspcntr |= DISPPLANE_RGBX101010;
2540                 break;
2541         default:
2542                 BUG();
2543         }
2544
2545         if (INTEL_INFO(dev)->gen >= 4 &&
2546             obj->tiling_mode != I915_TILING_NONE)
2547                 dspcntr |= DISPPLANE_TILED;
2548
2549         if (IS_G4X(dev))
2550                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2551
2552         linear_offset = y * fb->pitches[0] + x * pixel_size;
2553
2554         if (INTEL_INFO(dev)->gen >= 4) {
2555                 intel_crtc->dspaddr_offset =
2556                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2557                                                        pixel_size,
2558                                                        fb->pitches[0]);
2559                 linear_offset -= intel_crtc->dspaddr_offset;
2560         } else {
2561                 intel_crtc->dspaddr_offset = linear_offset;
2562         }
2563
2564         if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2565                 dspcntr |= DISPPLANE_ROTATE_180;
2566
2567                 x += (intel_crtc->config->pipe_src_w - 1);
2568                 y += (intel_crtc->config->pipe_src_h - 1);
2569
2570                 /* Finding the last pixel of the last line of the display
2571                 data and adding to linear_offset*/
2572                 linear_offset +=
2573                         (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2574                         (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2575         }
2576
2577         I915_WRITE(reg, dspcntr);
2578
2579         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2580                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2581                       fb->pitches[0]);
2582         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2583         if (INTEL_INFO(dev)->gen >= 4) {
2584                 I915_WRITE(DSPSURF(plane),
2585                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2586                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2587                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2588         } else
2589                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2590         POSTING_READ(reg);
2591 }
2592
2593 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2594                                           struct drm_framebuffer *fb,
2595                                           int x, int y)
2596 {
2597         struct drm_device *dev = crtc->dev;
2598         struct drm_i915_private *dev_priv = dev->dev_private;
2599         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2600         struct drm_i915_gem_object *obj;
2601         int plane = intel_crtc->plane;
2602         unsigned long linear_offset;
2603         u32 dspcntr;
2604         u32 reg = DSPCNTR(plane);
2605         int pixel_size;
2606
2607         if (!intel_crtc->primary_enabled) {
2608                 I915_WRITE(reg, 0);
2609                 I915_WRITE(DSPSURF(plane), 0);
2610                 POSTING_READ(reg);
2611                 return;
2612         }
2613
2614         obj = intel_fb_obj(fb);
2615         if (WARN_ON(obj == NULL))
2616                 return;
2617
2618         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2619
2620         dspcntr = DISPPLANE_GAMMA_ENABLE;
2621
2622         dspcntr |= DISPLAY_PLANE_ENABLE;
2623
2624         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2625                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2626
2627         switch (fb->pixel_format) {
2628         case DRM_FORMAT_C8:
2629                 dspcntr |= DISPPLANE_8BPP;
2630                 break;
2631         case DRM_FORMAT_RGB565:
2632                 dspcntr |= DISPPLANE_BGRX565;
2633                 break;
2634         case DRM_FORMAT_XRGB8888:
2635         case DRM_FORMAT_ARGB8888:
2636                 dspcntr |= DISPPLANE_BGRX888;
2637                 break;
2638         case DRM_FORMAT_XBGR8888:
2639         case DRM_FORMAT_ABGR8888:
2640                 dspcntr |= DISPPLANE_RGBX888;
2641                 break;
2642         case DRM_FORMAT_XRGB2101010:
2643         case DRM_FORMAT_ARGB2101010:
2644                 dspcntr |= DISPPLANE_BGRX101010;
2645                 break;
2646         case DRM_FORMAT_XBGR2101010:
2647         case DRM_FORMAT_ABGR2101010:
2648                 dspcntr |= DISPPLANE_RGBX101010;
2649                 break;
2650         default:
2651                 BUG();
2652         }
2653
2654         if (obj->tiling_mode != I915_TILING_NONE)
2655                 dspcntr |= DISPPLANE_TILED;
2656
2657         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2658                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2659
2660         linear_offset = y * fb->pitches[0] + x * pixel_size;
2661         intel_crtc->dspaddr_offset =
2662                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2663                                                pixel_size,
2664                                                fb->pitches[0]);
2665         linear_offset -= intel_crtc->dspaddr_offset;
2666         if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2667                 dspcntr |= DISPPLANE_ROTATE_180;
2668
2669                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2670                         x += (intel_crtc->config->pipe_src_w - 1);
2671                         y += (intel_crtc->config->pipe_src_h - 1);
2672
2673                         /* Finding the last pixel of the last line of the display
2674                         data and adding to linear_offset*/
2675                         linear_offset +=
2676                                 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2677                                 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2678                 }
2679         }
2680
2681         I915_WRITE(reg, dspcntr);
2682
2683         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2684                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2685                       fb->pitches[0]);
2686         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2687         I915_WRITE(DSPSURF(plane),
2688                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2689         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2690                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2691         } else {
2692                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2693                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2694         }
2695         POSTING_READ(reg);
2696 }
2697
2698 static void skylake_update_primary_plane(struct drm_crtc *crtc,
2699                                          struct drm_framebuffer *fb,
2700                                          int x, int y)
2701 {
2702         struct drm_device *dev = crtc->dev;
2703         struct drm_i915_private *dev_priv = dev->dev_private;
2704         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2705         struct intel_framebuffer *intel_fb;
2706         struct drm_i915_gem_object *obj;
2707         int pipe = intel_crtc->pipe;
2708         u32 plane_ctl, stride;
2709
2710         if (!intel_crtc->primary_enabled) {
2711                 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2712                 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2713                 POSTING_READ(PLANE_CTL(pipe, 0));
2714                 return;
2715         }
2716
2717         plane_ctl = PLANE_CTL_ENABLE |
2718                     PLANE_CTL_PIPE_GAMMA_ENABLE |
2719                     PLANE_CTL_PIPE_CSC_ENABLE;
2720
2721         switch (fb->pixel_format) {
2722         case DRM_FORMAT_RGB565:
2723                 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2724                 break;
2725         case DRM_FORMAT_XRGB8888:
2726                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2727                 break;
2728         case DRM_FORMAT_ARGB8888:
2729                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2730                 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2731                 break;
2732         case DRM_FORMAT_XBGR8888:
2733                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2734                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2735                 break;
2736         case DRM_FORMAT_ABGR8888:
2737                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2738                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2739                 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2740                 break;
2741         case DRM_FORMAT_XRGB2101010:
2742                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2743                 break;
2744         case DRM_FORMAT_XBGR2101010:
2745                 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2746                 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2747                 break;
2748         default:
2749                 BUG();
2750         }
2751
2752         intel_fb = to_intel_framebuffer(fb);
2753         obj = intel_fb->obj;
2754
2755         /*
2756          * The stride is either expressed as a multiple of 64 bytes chunks for
2757          * linear buffers or in number of tiles for tiled buffers.
2758          */
2759         switch (obj->tiling_mode) {
2760         case I915_TILING_NONE:
2761                 stride = fb->pitches[0] >> 6;
2762                 break;
2763         case I915_TILING_X:
2764                 plane_ctl |= PLANE_CTL_TILED_X;
2765                 stride = fb->pitches[0] >> 9;
2766                 break;
2767         default:
2768                 BUG();
2769         }
2770
2771         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2772         if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
2773                 plane_ctl |= PLANE_CTL_ROTATE_180;
2774
2775         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2776
2777         DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2778                       i915_gem_obj_ggtt_offset(obj),
2779                       x, y, fb->width, fb->height,
2780                       fb->pitches[0]);
2781
2782         I915_WRITE(PLANE_POS(pipe, 0), 0);
2783         I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2784         I915_WRITE(PLANE_SIZE(pipe, 0),
2785                    (intel_crtc->config->pipe_src_h - 1) << 16 |
2786                    (intel_crtc->config->pipe_src_w - 1));
2787         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2788         I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2789
2790         POSTING_READ(PLANE_SURF(pipe, 0));
2791 }
2792
2793 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2794 static int
2795 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2796                            int x, int y, enum mode_set_atomic state)
2797 {
2798         struct drm_device *dev = crtc->dev;
2799         struct drm_i915_private *dev_priv = dev->dev_private;
2800
2801         if (dev_priv->display.disable_fbc)
2802                 dev_priv->display.disable_fbc(dev);
2803
2804         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2805
2806         return 0;
2807 }
2808
2809 static void intel_complete_page_flips(struct drm_device *dev)
2810 {
2811         struct drm_crtc *crtc;
2812
2813         for_each_crtc(dev, crtc) {
2814                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2815                 enum plane plane = intel_crtc->plane;
2816
2817                 intel_prepare_page_flip(dev, plane);
2818                 intel_finish_page_flip_plane(dev, plane);
2819         }
2820 }
2821
2822 static void intel_update_primary_planes(struct drm_device *dev)
2823 {
2824         struct drm_i915_private *dev_priv = dev->dev_private;
2825         struct drm_crtc *crtc;
2826
2827         for_each_crtc(dev, crtc) {
2828                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2829
2830                 drm_modeset_lock(&crtc->mutex, NULL);
2831                 /*
2832                  * FIXME: Once we have proper support for primary planes (and
2833                  * disabling them without disabling the entire crtc) allow again
2834                  * a NULL crtc->primary->fb.
2835                  */
2836                 if (intel_crtc->active && crtc->primary->fb)
2837                         dev_priv->display.update_primary_plane(crtc,
2838                                                                crtc->primary->fb,
2839                                                                crtc->x,
2840                                                                crtc->y);
2841                 drm_modeset_unlock(&crtc->mutex);
2842         }
2843 }
2844
2845 void intel_prepare_reset(struct drm_device *dev)
2846 {
2847         struct drm_i915_private *dev_priv = to_i915(dev);
2848         struct intel_crtc *crtc;
2849
2850         /* no reset support for gen2 */
2851         if (IS_GEN2(dev))
2852                 return;
2853
2854         /* reset doesn't touch the display */
2855         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2856                 return;
2857
2858         drm_modeset_lock_all(dev);
2859
2860         /*
2861          * Disabling the crtcs gracefully seems nicer. Also the
2862          * g33 docs say we should at least disable all the planes.
2863          */
2864         for_each_intel_crtc(dev, crtc) {
2865                 if (crtc->active)
2866                         dev_priv->display.crtc_disable(&crtc->base);
2867         }
2868 }
2869
2870 void intel_finish_reset(struct drm_device *dev)
2871 {
2872         struct drm_i915_private *dev_priv = to_i915(dev);
2873
2874         /*
2875          * Flips in the rings will be nuked by the reset,
2876          * so complete all pending flips so that user space
2877          * will get its events and not get stuck.
2878          */
2879         intel_complete_page_flips(dev);
2880
2881         /* no reset support for gen2 */
2882         if (IS_GEN2(dev))
2883                 return;
2884
2885         /* reset doesn't touch the display */
2886         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2887                 /*
2888                  * Flips in the rings have been nuked by the reset,
2889                  * so update the base address of all primary
2890                  * planes to the the last fb to make sure we're
2891                  * showing the correct fb after a reset.
2892                  */
2893                 intel_update_primary_planes(dev);
2894                 return;
2895         }
2896
2897         /*
2898          * The display has been reset as well,
2899          * so need a full re-initialization.
2900          */
2901         intel_runtime_pm_disable_interrupts(dev_priv);
2902         intel_runtime_pm_enable_interrupts(dev_priv);
2903
2904         intel_modeset_init_hw(dev);
2905
2906         spin_lock_irq(&dev_priv->irq_lock);
2907         if (dev_priv->display.hpd_irq_setup)
2908                 dev_priv->display.hpd_irq_setup(dev);
2909         spin_unlock_irq(&dev_priv->irq_lock);
2910
2911         intel_modeset_setup_hw_state(dev, true);
2912
2913         intel_hpd_init(dev_priv);
2914
2915         drm_modeset_unlock_all(dev);
2916 }
2917
2918 static int
2919 intel_finish_fb(struct drm_framebuffer *old_fb)
2920 {
2921         struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2922         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2923         bool was_interruptible = dev_priv->mm.interruptible;
2924         int ret;
2925
2926         /* Big Hammer, we also need to ensure that any pending
2927          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2928          * current scanout is retired before unpinning the old
2929          * framebuffer.
2930          *
2931          * This should only fail upon a hung GPU, in which case we
2932          * can safely continue.
2933          */
2934         dev_priv->mm.interruptible = false;
2935         ret = i915_gem_object_finish_gpu(obj);
2936         dev_priv->mm.interruptible = was_interruptible;
2937
2938         return ret;
2939 }
2940
2941 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2942 {
2943         struct drm_device *dev = crtc->dev;
2944         struct drm_i915_private *dev_priv = dev->dev_private;
2945         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2946         bool pending;
2947
2948         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2949             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2950                 return false;
2951
2952         spin_lock_irq(&dev->event_lock);
2953         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2954         spin_unlock_irq(&dev->event_lock);
2955
2956         return pending;
2957 }
2958
2959 static void intel_update_pipe_size(struct intel_crtc *crtc)
2960 {
2961         struct drm_device *dev = crtc->base.dev;
2962         struct drm_i915_private *dev_priv = dev->dev_private;
2963         const struct drm_display_mode *adjusted_mode;
2964
2965         if (!i915.fastboot)
2966                 return;
2967
2968         /*
2969          * Update pipe size and adjust fitter if needed: the reason for this is
2970          * that in compute_mode_changes we check the native mode (not the pfit
2971          * mode) to see if we can flip rather than do a full mode set. In the
2972          * fastboot case, we'll flip, but if we don't update the pipesrc and
2973          * pfit state, we'll end up with a big fb scanned out into the wrong
2974          * sized surface.
2975          *
2976          * To fix this properly, we need to hoist the checks up into
2977          * compute_mode_changes (or above), check the actual pfit state and
2978          * whether the platform allows pfit disable with pipe active, and only
2979          * then update the pipesrc and pfit state, even on the flip path.
2980          */
2981
2982         adjusted_mode = &crtc->config->base.adjusted_mode;
2983
2984         I915_WRITE(PIPESRC(crtc->pipe),
2985                    ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2986                    (adjusted_mode->crtc_vdisplay - 1));
2987         if (!crtc->config->pch_pfit.enabled &&
2988             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2989              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2990                 I915_WRITE(PF_CTL(crtc->pipe), 0);
2991                 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2992                 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2993         }
2994         crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
2995         crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
2996 }
2997
2998 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2999 {
3000         struct drm_device *dev = crtc->dev;
3001         struct drm_i915_private *dev_priv = dev->dev_private;
3002         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3003         int pipe = intel_crtc->pipe;
3004         u32 reg, temp;
3005
3006         /* enable normal train */
3007         reg = FDI_TX_CTL(pipe);
3008         temp = I915_READ(reg);
3009         if (IS_IVYBRIDGE(dev)) {
3010                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3011                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3012         } else {
3013                 temp &= ~FDI_LINK_TRAIN_NONE;
3014                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3015         }
3016         I915_WRITE(reg, temp);
3017
3018         reg = FDI_RX_CTL(pipe);
3019         temp = I915_READ(reg);
3020         if (HAS_PCH_CPT(dev)) {
3021                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3022                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3023         } else {
3024                 temp &= ~FDI_LINK_TRAIN_NONE;
3025                 temp |= FDI_LINK_TRAIN_NONE;
3026         }
3027         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3028
3029         /* wait one idle pattern time */
3030         POSTING_READ(reg);
3031         udelay(1000);
3032
3033         /* IVB wants error correction enabled */
3034         if (IS_IVYBRIDGE(dev))
3035                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3036                            FDI_FE_ERRC_ENABLE);
3037 }
3038
3039 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3040 {
3041         return crtc->base.enabled && crtc->active &&
3042                 crtc->config->has_pch_encoder;
3043 }
3044
3045 static void ivb_modeset_global_resources(struct drm_device *dev)
3046 {
3047         struct drm_i915_private *dev_priv = dev->dev_private;
3048         struct intel_crtc *pipe_B_crtc =
3049                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3050         struct intel_crtc *pipe_C_crtc =
3051                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3052         uint32_t temp;
3053
3054         /*
3055          * When everything is off disable fdi C so that we could enable fdi B
3056          * with all lanes. Note that we don't care about enabled pipes without
3057          * an enabled pch encoder.
3058          */
3059         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3060             !pipe_has_enabled_pch(pipe_C_crtc)) {
3061                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3062                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3063
3064                 temp = I915_READ(SOUTH_CHICKEN1);
3065                 temp &= ~FDI_BC_BIFURCATION_SELECT;
3066                 DRM_DEBUG_KMS("disabling fdi C rx\n");
3067                 I915_WRITE(SOUTH_CHICKEN1, temp);
3068         }
3069 }
3070
3071 /* The FDI link training functions for ILK/Ibexpeak. */
3072 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3073 {
3074         struct drm_device *dev = crtc->dev;
3075         struct drm_i915_private *dev_priv = dev->dev_private;
3076         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3077         int pipe = intel_crtc->pipe;
3078         u32 reg, temp, tries;
3079
3080         /* FDI needs bits from pipe first */
3081         assert_pipe_enabled(dev_priv, pipe);
3082
3083         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3084            for train result */
3085         reg = FDI_RX_IMR(pipe);
3086         temp = I915_READ(reg);
3087         temp &= ~FDI_RX_SYMBOL_LOCK;
3088         temp &= ~FDI_RX_BIT_LOCK;
3089         I915_WRITE(reg, temp);
3090         I915_READ(reg);
3091         udelay(150);
3092
3093         /* enable CPU FDI TX and PCH FDI RX */
3094         reg = FDI_TX_CTL(pipe);
3095         temp = I915_READ(reg);
3096         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3097         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3098         temp &= ~FDI_LINK_TRAIN_NONE;
3099         temp |= FDI_LINK_TRAIN_PATTERN_1;
3100         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3101
3102         reg = FDI_RX_CTL(pipe);
3103         temp = I915_READ(reg);
3104         temp &= ~FDI_LINK_TRAIN_NONE;
3105         temp |= FDI_LINK_TRAIN_PATTERN_1;
3106         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3107
3108         POSTING_READ(reg);
3109         udelay(150);
3110
3111         /* Ironlake workaround, enable clock pointer after FDI enable*/
3112         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3113         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3114                    FDI_RX_PHASE_SYNC_POINTER_EN);
3115
3116         reg = FDI_RX_IIR(pipe);
3117         for (tries = 0; tries < 5; tries++) {
3118                 temp = I915_READ(reg);
3119                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3120
3121                 if ((temp & FDI_RX_BIT_LOCK)) {
3122                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3123                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3124                         break;
3125                 }
3126         }
3127         if (tries == 5)
3128                 DRM_ERROR("FDI train 1 fail!\n");
3129
3130         /* Train 2 */
3131         reg = FDI_TX_CTL(pipe);
3132         temp = I915_READ(reg);
3133         temp &= ~FDI_LINK_TRAIN_NONE;
3134         temp |= FDI_LINK_TRAIN_PATTERN_2;
3135         I915_WRITE(reg, temp);
3136
3137         reg = FDI_RX_CTL(pipe);
3138         temp = I915_READ(reg);
3139         temp &= ~FDI_LINK_TRAIN_NONE;
3140         temp |= FDI_LINK_TRAIN_PATTERN_2;
3141         I915_WRITE(reg, temp);
3142
3143         POSTING_READ(reg);
3144         udelay(150);
3145
3146         reg = FDI_RX_IIR(pipe);
3147         for (tries = 0; tries < 5; tries++) {
3148                 temp = I915_READ(reg);
3149                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3150
3151                 if (temp & FDI_RX_SYMBOL_LOCK) {
3152                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3153                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3154                         break;
3155                 }
3156         }
3157         if (tries == 5)
3158                 DRM_ERROR("FDI train 2 fail!\n");
3159
3160         DRM_DEBUG_KMS("FDI train done\n");
3161
3162 }
3163
3164 static const int snb_b_fdi_train_param[] = {
3165         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3166         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3167         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3168         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3169 };
3170
3171 /* The FDI link training functions for SNB/Cougarpoint. */
3172 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3173 {
3174         struct drm_device *dev = crtc->dev;
3175         struct drm_i915_private *dev_priv = dev->dev_private;
3176         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3177         int pipe = intel_crtc->pipe;
3178         u32 reg, temp, i, retry;
3179
3180         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3181            for train result */
3182         reg = FDI_RX_IMR(pipe);
3183         temp = I915_READ(reg);
3184         temp &= ~FDI_RX_SYMBOL_LOCK;
3185         temp &= ~FDI_RX_BIT_LOCK;
3186         I915_WRITE(reg, temp);
3187
3188         POSTING_READ(reg);
3189         udelay(150);
3190
3191         /* enable CPU FDI TX and PCH FDI RX */
3192         reg = FDI_TX_CTL(pipe);
3193         temp = I915_READ(reg);
3194         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3195         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3196         temp &= ~FDI_LINK_TRAIN_NONE;
3197         temp |= FDI_LINK_TRAIN_PATTERN_1;
3198         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3199         /* SNB-B */
3200         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3201         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3202
3203         I915_WRITE(FDI_RX_MISC(pipe),
3204                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3205
3206         reg = FDI_RX_CTL(pipe);
3207         temp = I915_READ(reg);
3208         if (HAS_PCH_CPT(dev)) {
3209                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3210                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3211         } else {
3212                 temp &= ~FDI_LINK_TRAIN_NONE;
3213                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3214         }
3215         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3216
3217         POSTING_READ(reg);
3218         udelay(150);
3219
3220         for (i = 0; i < 4; i++) {
3221                 reg = FDI_TX_CTL(pipe);
3222                 temp = I915_READ(reg);
3223                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3224                 temp |= snb_b_fdi_train_param[i];
3225                 I915_WRITE(reg, temp);
3226
3227                 POSTING_READ(reg);
3228                 udelay(500);
3229
3230                 for (retry = 0; retry < 5; retry++) {
3231                         reg = FDI_RX_IIR(pipe);
3232                         temp = I915_READ(reg);
3233                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3234                         if (temp & FDI_RX_BIT_LOCK) {
3235                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3236                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3237                                 break;
3238                         }
3239                         udelay(50);
3240                 }
3241                 if (retry < 5)
3242                         break;
3243         }
3244         if (i == 4)
3245                 DRM_ERROR("FDI train 1 fail!\n");
3246
3247         /* Train 2 */
3248         reg = FDI_TX_CTL(pipe);
3249         temp = I915_READ(reg);
3250         temp &= ~FDI_LINK_TRAIN_NONE;
3251         temp |= FDI_LINK_TRAIN_PATTERN_2;
3252         if (IS_GEN6(dev)) {
3253                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3254                 /* SNB-B */
3255                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3256         }
3257         I915_WRITE(reg, temp);
3258
3259         reg = FDI_RX_CTL(pipe);
3260         temp = I915_READ(reg);
3261         if (HAS_PCH_CPT(dev)) {
3262                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3263                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3264         } else {
3265                 temp &= ~FDI_LINK_TRAIN_NONE;
3266                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3267         }
3268         I915_WRITE(reg, temp);
3269
3270         POSTING_READ(reg);
3271         udelay(150);
3272
3273         for (i = 0; i < 4; i++) {
3274                 reg = FDI_TX_CTL(pipe);
3275                 temp = I915_READ(reg);
3276                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3277                 temp |= snb_b_fdi_train_param[i];
3278                 I915_WRITE(reg, temp);
3279
3280                 POSTING_READ(reg);
3281                 udelay(500);
3282
3283                 for (retry = 0; retry < 5; retry++) {
3284                         reg = FDI_RX_IIR(pipe);
3285                         temp = I915_READ(reg);
3286                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3287                         if (temp & FDI_RX_SYMBOL_LOCK) {
3288                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3289                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3290                                 break;
3291                         }
3292                         udelay(50);
3293                 }
3294                 if (retry < 5)
3295                         break;
3296         }
3297         if (i == 4)
3298                 DRM_ERROR("FDI train 2 fail!\n");
3299
3300         DRM_DEBUG_KMS("FDI train done.\n");
3301 }
3302
3303 /* Manual link training for Ivy Bridge A0 parts */
3304 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3305 {
3306         struct drm_device *dev = crtc->dev;
3307         struct drm_i915_private *dev_priv = dev->dev_private;
3308         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3309         int pipe = intel_crtc->pipe;
3310         u32 reg, temp, i, j;
3311
3312         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3313            for train result */
3314         reg = FDI_RX_IMR(pipe);
3315         temp = I915_READ(reg);
3316         temp &= ~FDI_RX_SYMBOL_LOCK;
3317         temp &= ~FDI_RX_BIT_LOCK;
3318         I915_WRITE(reg, temp);
3319
3320         POSTING_READ(reg);
3321         udelay(150);
3322
3323         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3324                       I915_READ(FDI_RX_IIR(pipe)));
3325
3326         /* Try each vswing and preemphasis setting twice before moving on */
3327         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3328                 /* disable first in case we need to retry */
3329                 reg = FDI_TX_CTL(pipe);
3330                 temp = I915_READ(reg);
3331                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3332                 temp &= ~FDI_TX_ENABLE;
3333                 I915_WRITE(reg, temp);
3334
3335                 reg = FDI_RX_CTL(pipe);
3336                 temp = I915_READ(reg);
3337                 temp &= ~FDI_LINK_TRAIN_AUTO;
3338                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3339                 temp &= ~FDI_RX_ENABLE;
3340                 I915_WRITE(reg, temp);
3341
3342                 /* enable CPU FDI TX and PCH FDI RX */
3343                 reg = FDI_TX_CTL(pipe);
3344                 temp = I915_READ(reg);
3345                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3346                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3347                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3348                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3349                 temp |= snb_b_fdi_train_param[j/2];
3350                 temp |= FDI_COMPOSITE_SYNC;
3351                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3352
3353                 I915_WRITE(FDI_RX_MISC(pipe),
3354                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3355
3356                 reg = FDI_RX_CTL(pipe);
3357                 temp = I915_READ(reg);
3358                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3359                 temp |= FDI_COMPOSITE_SYNC;
3360                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3361
3362                 POSTING_READ(reg);
3363                 udelay(1); /* should be 0.5us */
3364
3365                 for (i = 0; i < 4; i++) {
3366                         reg = FDI_RX_IIR(pipe);
3367                         temp = I915_READ(reg);
3368                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3369
3370                         if (temp & FDI_RX_BIT_LOCK ||
3371                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3372                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3373                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3374                                               i);
3375                                 break;
3376                         }
3377                         udelay(1); /* should be 0.5us */
3378                 }
3379                 if (i == 4) {
3380                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3381                         continue;
3382                 }
3383
3384                 /* Train 2 */
3385                 reg = FDI_TX_CTL(pipe);
3386                 temp = I915_READ(reg);
3387                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3388                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3389                 I915_WRITE(reg, temp);
3390
3391                 reg = FDI_RX_CTL(pipe);
3392                 temp = I915_READ(reg);
3393                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3394                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3395                 I915_WRITE(reg, temp);
3396
3397                 POSTING_READ(reg);
3398                 udelay(2); /* should be 1.5us */
3399
3400                 for (i = 0; i < 4; i++) {
3401                         reg = FDI_RX_IIR(pipe);
3402                         temp = I915_READ(reg);
3403                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3404
3405                         if (temp & FDI_RX_SYMBOL_LOCK ||
3406                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3407                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3408                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3409                                               i);
3410                                 goto train_done;
3411                         }
3412                         udelay(2); /* should be 1.5us */
3413                 }
3414                 if (i == 4)
3415                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3416         }
3417
3418 train_done:
3419         DRM_DEBUG_KMS("FDI train done.\n");
3420 }
3421
3422 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3423 {
3424         struct drm_device *dev = intel_crtc->base.dev;
3425         struct drm_i915_private *dev_priv = dev->dev_private;
3426         int pipe = intel_crtc->pipe;
3427         u32 reg, temp;
3428
3429
3430         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3431         reg = FDI_RX_CTL(pipe);
3432         temp = I915_READ(reg);
3433         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3434         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3435         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3436         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3437
3438         POSTING_READ(reg);
3439         udelay(200);
3440
3441         /* Switch from Rawclk to PCDclk */
3442         temp = I915_READ(reg);
3443         I915_WRITE(reg, temp | FDI_PCDCLK);
3444
3445         POSTING_READ(reg);
3446         udelay(200);
3447
3448         /* Enable CPU FDI TX PLL, always on for Ironlake */
3449         reg = FDI_TX_CTL(pipe);
3450         temp = I915_READ(reg);
3451         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3452                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3453
3454                 POSTING_READ(reg);
3455                 udelay(100);
3456         }
3457 }
3458
3459 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3460 {
3461         struct drm_device *dev = intel_crtc->base.dev;
3462         struct drm_i915_private *dev_priv = dev->dev_private;
3463         int pipe = intel_crtc->pipe;
3464         u32 reg, temp;
3465
3466         /* Switch from PCDclk to Rawclk */
3467         reg = FDI_RX_CTL(pipe);
3468         temp = I915_READ(reg);
3469         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3470
3471         /* Disable CPU FDI TX PLL */
3472         reg = FDI_TX_CTL(pipe);
3473         temp = I915_READ(reg);
3474         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3475
3476         POSTING_READ(reg);
3477         udelay(100);
3478
3479         reg = FDI_RX_CTL(pipe);
3480         temp = I915_READ(reg);
3481         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3482
3483         /* Wait for the clocks to turn off. */
3484         POSTING_READ(reg);
3485         udelay(100);
3486 }
3487
3488 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3489 {
3490         struct drm_device *dev = crtc->dev;
3491         struct drm_i915_private *dev_priv = dev->dev_private;
3492         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3493         int pipe = intel_crtc->pipe;
3494         u32 reg, temp;
3495
3496         /* disable CPU FDI tx and PCH FDI rx */
3497         reg = FDI_TX_CTL(pipe);
3498         temp = I915_READ(reg);
3499         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3500         POSTING_READ(reg);
3501
3502         reg = FDI_RX_CTL(pipe);
3503         temp = I915_READ(reg);
3504         temp &= ~(0x7 << 16);
3505         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3506         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3507
3508         POSTING_READ(reg);
3509         udelay(100);
3510
3511         /* Ironlake workaround, disable clock pointer after downing FDI */
3512         if (HAS_PCH_IBX(dev))
3513                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3514
3515         /* still set train pattern 1 */
3516         reg = FDI_TX_CTL(pipe);
3517         temp = I915_READ(reg);
3518         temp &= ~FDI_LINK_TRAIN_NONE;
3519         temp |= FDI_LINK_TRAIN_PATTERN_1;
3520         I915_WRITE(reg, temp);
3521
3522         reg = FDI_RX_CTL(pipe);
3523         temp = I915_READ(reg);
3524         if (HAS_PCH_CPT(dev)) {
3525                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3526                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3527         } else {
3528                 temp &= ~FDI_LINK_TRAIN_NONE;
3529                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3530         }
3531         /* BPC in FDI rx is consistent with that in PIPECONF */
3532         temp &= ~(0x07 << 16);
3533         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3534         I915_WRITE(reg, temp);
3535
3536         POSTING_READ(reg);
3537         udelay(100);
3538 }
3539
3540 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3541 {
3542         struct intel_crtc *crtc;
3543
3544         /* Note that we don't need to be called with mode_config.lock here
3545          * as our list of CRTC objects is static for the lifetime of the
3546          * device and so cannot disappear as we iterate. Similarly, we can
3547          * happily treat the predicates as racy, atomic checks as userspace
3548          * cannot claim and pin a new fb without at least acquring the
3549          * struct_mutex and so serialising with us.
3550          */
3551         for_each_intel_crtc(dev, crtc) {
3552                 if (atomic_read(&crtc->unpin_work_count) == 0)
3553                         continue;
3554
3555                 if (crtc->unpin_work)
3556                         intel_wait_for_vblank(dev, crtc->pipe);
3557
3558                 return true;
3559         }
3560
3561         return false;
3562 }
3563
3564 static void page_flip_completed(struct intel_crtc *intel_crtc)
3565 {
3566         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3567         struct intel_unpin_work *work = intel_crtc->unpin_work;
3568
3569         /* ensure that the unpin work is consistent wrt ->pending. */
3570         smp_rmb();
3571         intel_crtc->unpin_work = NULL;
3572
3573         if (work->event)
3574                 drm_send_vblank_event(intel_crtc->base.dev,
3575                                       intel_crtc->pipe,
3576                                       work->event);
3577
3578         drm_crtc_vblank_put(&intel_crtc->base);
3579
3580         wake_up_all(&dev_priv->pending_flip_queue);
3581         queue_work(dev_priv->wq, &work->work);
3582
3583         trace_i915_flip_complete(intel_crtc->plane,
3584                                  work->pending_flip_obj);
3585 }
3586
3587 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3588 {
3589         struct drm_device *dev = crtc->dev;
3590         struct drm_i915_private *dev_priv = dev->dev_private;
3591
3592         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3593         if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3594                                        !intel_crtc_has_pending_flip(crtc),
3595                                        60*HZ) == 0)) {
3596                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3597
3598                 spin_lock_irq(&dev->event_lock);
3599                 if (intel_crtc->unpin_work) {
3600                         WARN_ONCE(1, "Removing stuck page flip\n");
3601                         page_flip_completed(intel_crtc);
3602                 }
3603                 spin_unlock_irq(&dev->event_lock);
3604         }
3605
3606         if (crtc->primary->fb) {
3607                 mutex_lock(&dev->struct_mutex);
3608                 intel_finish_fb(crtc->primary->fb);
3609                 mutex_unlock(&dev->struct_mutex);
3610         }
3611 }
3612
3613 /* Program iCLKIP clock to the desired frequency */
3614 static void lpt_program_iclkip(struct drm_crtc *crtc)
3615 {
3616         struct drm_device *dev = crtc->dev;
3617         struct drm_i915_private *dev_priv = dev->dev_private;
3618         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3619         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3620         u32 temp;
3621
3622         mutex_lock(&dev_priv->dpio_lock);
3623
3624         /* It is necessary to ungate the pixclk gate prior to programming
3625          * the divisors, and gate it back when it is done.
3626          */
3627         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3628
3629         /* Disable SSCCTL */
3630         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3631                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3632                                 SBI_SSCCTL_DISABLE,
3633                         SBI_ICLK);
3634
3635         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3636         if (clock == 20000) {
3637                 auxdiv = 1;
3638                 divsel = 0x41;
3639                 phaseinc = 0x20;
3640         } else {
3641                 /* The iCLK virtual clock root frequency is in MHz,
3642                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3643                  * divisors, it is necessary to divide one by another, so we
3644                  * convert the virtual clock precision to KHz here for higher
3645                  * precision.
3646                  */
3647                 u32 iclk_virtual_root_freq = 172800 * 1000;
3648                 u32 iclk_pi_range = 64;
3649                 u32 desired_divisor, msb_divisor_value, pi_value;
3650
3651                 desired_divisor = (iclk_virtual_root_freq / clock);
3652                 msb_divisor_value = desired_divisor / iclk_pi_range;
3653                 pi_value = desired_divisor % iclk_pi_range;
3654
3655                 auxdiv = 0;
3656                 divsel = msb_divisor_value - 2;
3657                 phaseinc = pi_value;
3658         }
3659
3660         /* This should not happen with any sane values */
3661         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3662                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3663         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3664                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3665
3666         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3667                         clock,
3668                         auxdiv,
3669                         divsel,
3670                         phasedir,
3671                         phaseinc);
3672
3673         /* Program SSCDIVINTPHASE6 */
3674         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3675         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3676         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3677         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3678         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3679         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3680         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3681         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3682
3683         /* Program SSCAUXDIV */
3684         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3685         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3686         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3687         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3688
3689         /* Enable modulator and associated divider */
3690         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3691         temp &= ~SBI_SSCCTL_DISABLE;
3692         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3693
3694         /* Wait for initialization time */
3695         udelay(24);
3696
3697         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3698
3699         mutex_unlock(&dev_priv->dpio_lock);
3700 }
3701
3702 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3703                                                 enum pipe pch_transcoder)
3704 {
3705         struct drm_device *dev = crtc->base.dev;
3706         struct drm_i915_private *dev_priv = dev->dev_private;
3707         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3708
3709         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3710                    I915_READ(HTOTAL(cpu_transcoder)));
3711         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3712                    I915_READ(HBLANK(cpu_transcoder)));
3713         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3714                    I915_READ(HSYNC(cpu_transcoder)));
3715
3716         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3717                    I915_READ(VTOTAL(cpu_transcoder)));
3718         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3719                    I915_READ(VBLANK(cpu_transcoder)));
3720         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3721                    I915_READ(VSYNC(cpu_transcoder)));
3722         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3723                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3724 }
3725
3726 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3727 {
3728         struct drm_i915_private *dev_priv = dev->dev_private;
3729         uint32_t temp;
3730
3731         temp = I915_READ(SOUTH_CHICKEN1);
3732         if (temp & FDI_BC_BIFURCATION_SELECT)
3733                 return;
3734
3735         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3736         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3737
3738         temp |= FDI_BC_BIFURCATION_SELECT;
3739         DRM_DEBUG_KMS("enabling fdi C rx\n");
3740         I915_WRITE(SOUTH_CHICKEN1, temp);
3741         POSTING_READ(SOUTH_CHICKEN1);
3742 }
3743
3744 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3745 {
3746         struct drm_device *dev = intel_crtc->base.dev;
3747         struct drm_i915_private *dev_priv = dev->dev_private;
3748
3749         switch (intel_crtc->pipe) {
3750         case PIPE_A:
3751                 break;
3752         case PIPE_B:
3753                 if (intel_crtc->config->fdi_lanes > 2)
3754                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3755                 else
3756                         cpt_enable_fdi_bc_bifurcation(dev);
3757
3758                 break;
3759         case PIPE_C:
3760                 cpt_enable_fdi_bc_bifurcation(dev);
3761
3762                 break;
3763         default:
3764                 BUG();
3765         }
3766 }
3767
3768 /*
3769  * Enable PCH resources required for PCH ports:
3770  *   - PCH PLLs
3771  *   - FDI training & RX/TX
3772  *   - update transcoder timings
3773  *   - DP transcoding bits
3774  *   - transcoder
3775  */
3776 static void ironlake_pch_enable(struct drm_crtc *crtc)
3777 {
3778         struct drm_device *dev = crtc->dev;
3779         struct drm_i915_private *dev_priv = dev->dev_private;
3780         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3781         int pipe = intel_crtc->pipe;
3782         u32 reg, temp;
3783
3784         assert_pch_transcoder_disabled(dev_priv, pipe);
3785
3786         if (IS_IVYBRIDGE(dev))
3787                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3788
3789         /* Write the TU size bits before fdi link training, so that error
3790          * detection works. */
3791         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3792                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3793
3794         /* For PCH output, training FDI link */
3795         dev_priv->display.fdi_link_train(crtc);
3796
3797         /* We need to program the right clock selection before writing the pixel
3798          * mutliplier into the DPLL. */
3799         if (HAS_PCH_CPT(dev)) {
3800                 u32 sel;
3801
3802                 temp = I915_READ(PCH_DPLL_SEL);
3803                 temp |= TRANS_DPLL_ENABLE(pipe);
3804                 sel = TRANS_DPLLB_SEL(pipe);
3805                 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
3806                         temp |= sel;
3807                 else
3808                         temp &= ~sel;
3809                 I915_WRITE(PCH_DPLL_SEL, temp);
3810         }
3811
3812         /* XXX: pch pll's can be enabled any time before we enable the PCH
3813          * transcoder, and we actually should do this to not upset any PCH
3814          * transcoder that already use the clock when we share it.
3815          *
3816          * Note that enable_shared_dpll tries to do the right thing, but
3817          * get_shared_dpll unconditionally resets the pll - we need that to have
3818          * the right LVDS enable sequence. */
3819         intel_enable_shared_dpll(intel_crtc);
3820
3821         /* set transcoder timing, panel must allow it */
3822         assert_panel_unlocked(dev_priv, pipe);
3823         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3824
3825         intel_fdi_normal_train(crtc);
3826
3827         /* For PCH DP, enable TRANS_DP_CTL */
3828         if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
3829                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3830                 reg = TRANS_DP_CTL(pipe);
3831                 temp = I915_READ(reg);
3832                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3833                           TRANS_DP_SYNC_MASK |
3834                           TRANS_DP_BPC_MASK);
3835                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3836                          TRANS_DP_ENH_FRAMING);
3837                 temp |= bpc << 9; /* same format but at 11:9 */
3838
3839                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3840                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3841                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3842                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3843
3844                 switch (intel_trans_dp_port_sel(crtc)) {
3845                 case PCH_DP_B:
3846                         temp |= TRANS_DP_PORT_SEL_B;
3847                         break;
3848                 case PCH_DP_C:
3849                         temp |= TRANS_DP_PORT_SEL_C;
3850                         break;
3851                 case PCH_DP_D:
3852                         temp |= TRANS_DP_PORT_SEL_D;
3853                         break;
3854                 default:
3855                         BUG();
3856                 }
3857
3858                 I915_WRITE(reg, temp);
3859         }
3860
3861         ironlake_enable_pch_transcoder(dev_priv, pipe);
3862 }
3863
3864 static void lpt_pch_enable(struct drm_crtc *crtc)
3865 {
3866         struct drm_device *dev = crtc->dev;
3867         struct drm_i915_private *dev_priv = dev->dev_private;
3868         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3869         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3870
3871         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3872
3873         lpt_program_iclkip(crtc);
3874
3875         /* Set transcoder timing. */
3876         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3877
3878         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3879 }
3880
3881 void intel_put_shared_dpll(struct intel_crtc *crtc)
3882 {
3883         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3884
3885         if (pll == NULL)
3886                 return;
3887
3888         if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
3889                 WARN(1, "bad %s crtc mask\n", pll->name);
3890                 return;
3891         }
3892
3893         pll->config.crtc_mask &= ~(1 << crtc->pipe);
3894         if (pll->config.crtc_mask == 0) {
3895                 WARN_ON(pll->on);
3896                 WARN_ON(pll->active);
3897         }
3898
3899         crtc->config->shared_dpll = DPLL_ID_PRIVATE;
3900 }
3901
3902 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3903                                                 struct intel_crtc_state *crtc_state)
3904 {
3905         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3906         struct intel_shared_dpll *pll;
3907         enum intel_dpll_id i;
3908
3909         if (HAS_PCH_IBX(dev_priv->dev)) {
3910                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3911                 i = (enum intel_dpll_id) crtc->pipe;
3912                 pll = &dev_priv->shared_dplls[i];
3913
3914                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3915                               crtc->base.base.id, pll->name);
3916
3917                 WARN_ON(pll->new_config->crtc_mask);
3918
3919                 goto found;
3920         }
3921
3922         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3923                 pll = &dev_priv->shared_dplls[i];
3924
3925                 /* Only want to check enabled timings first */
3926                 if (pll->new_config->crtc_mask == 0)
3927                         continue;
3928
3929                 if (memcmp(&crtc_state->dpll_hw_state,
3930                            &pll->new_config->hw_state,
3931                            sizeof(pll->new_config->hw_state)) == 0) {
3932                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
3933                                       crtc->base.base.id, pll->name,
3934                                       pll->new_config->crtc_mask,
3935                                       pll->active);
3936                         goto found;
3937                 }
3938         }
3939
3940         /* Ok no matching timings, maybe there's a free one? */
3941         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3942                 pll = &dev_priv->shared_dplls[i];
3943                 if (pll->new_config->crtc_mask == 0) {
3944                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3945                                       crtc->base.base.id, pll->name);
3946                         goto found;
3947                 }
3948         }
3949
3950         return NULL;
3951
3952 found:
3953         if (pll->new_config->crtc_mask == 0)
3954                 pll->new_config->hw_state = crtc_state->dpll_hw_state;
3955
3956         crtc_state->shared_dpll = i;
3957         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3958                          pipe_name(crtc->pipe));
3959
3960         pll->new_config->crtc_mask |= 1 << crtc->pipe;
3961
3962         return pll;
3963 }
3964
3965 /**
3966  * intel_shared_dpll_start_config - start a new PLL staged config
3967  * @dev_priv: DRM device
3968  * @clear_pipes: mask of pipes that will have their PLLs freed
3969  *
3970  * Starts a new PLL staged config, copying the current config but
3971  * releasing the references of pipes specified in clear_pipes.
3972  */
3973 static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
3974                                           unsigned clear_pipes)
3975 {
3976         struct intel_shared_dpll *pll;
3977         enum intel_dpll_id i;
3978
3979         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3980                 pll = &dev_priv->shared_dplls[i];
3981
3982                 pll->new_config = kmemdup(&pll->config, sizeof pll->config,
3983                                           GFP_KERNEL);
3984                 if (!pll->new_config)
3985                         goto cleanup;
3986
3987                 pll->new_config->crtc_mask &= ~clear_pipes;
3988         }
3989
3990         return 0;
3991
3992 cleanup:
3993         while (--i >= 0) {
3994                 pll = &dev_priv->shared_dplls[i];
3995                 kfree(pll->new_config);
3996                 pll->new_config = NULL;
3997         }
3998
3999         return -ENOMEM;
4000 }
4001
4002 static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
4003 {
4004         struct intel_shared_dpll *pll;
4005         enum intel_dpll_id i;
4006
4007         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4008                 pll = &dev_priv->shared_dplls[i];
4009
4010                 WARN_ON(pll->new_config == &pll->config);
4011
4012                 pll->config = *pll->new_config;
4013                 kfree(pll->new_config);
4014                 pll->new_config = NULL;
4015         }
4016 }
4017
4018 static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
4019 {
4020         struct intel_shared_dpll *pll;
4021         enum intel_dpll_id i;
4022
4023         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4024                 pll = &dev_priv->shared_dplls[i];
4025
4026                 WARN_ON(pll->new_config == &pll->config);
4027
4028                 kfree(pll->new_config);
4029                 pll->new_config = NULL;
4030         }
4031 }
4032
4033 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4034 {
4035         struct drm_i915_private *dev_priv = dev->dev_private;
4036         int dslreg = PIPEDSL(pipe);
4037         u32 temp;
4038
4039         temp = I915_READ(dslreg);
4040         udelay(500);
4041         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4042                 if (wait_for(I915_READ(dslreg) != temp, 5))
4043                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4044         }
4045 }
4046
4047 static void skylake_pfit_enable(struct intel_crtc *crtc)
4048 {
4049         struct drm_device *dev = crtc->base.dev;
4050         struct drm_i915_private *dev_priv = dev->dev_private;
4051         int pipe = crtc->pipe;
4052
4053         if (crtc->config->pch_pfit.enabled) {
4054                 I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4055                 I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4056                 I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4057         }
4058 }
4059
4060 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4061 {
4062         struct drm_device *dev = crtc->base.dev;
4063         struct drm_i915_private *dev_priv = dev->dev_private;
4064         int pipe = crtc->pipe;
4065
4066         if (crtc->config->pch_pfit.enabled) {
4067                 /* Force use of hard-coded filter coefficients
4068                  * as some pre-programmed values are broken,
4069                  * e.g. x201.
4070                  */
4071                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4072                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4073                                                  PF_PIPE_SEL_IVB(pipe));
4074                 else
4075                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4076                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4077                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4078         }
4079 }
4080
4081 static void intel_enable_sprite_planes(struct drm_crtc *crtc)
4082 {
4083         struct drm_device *dev = crtc->dev;
4084         enum pipe pipe = to_intel_crtc(crtc)->pipe;
4085         struct drm_plane *plane;
4086         struct intel_plane *intel_plane;
4087
4088         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4089                 intel_plane = to_intel_plane(plane);
4090                 if (intel_plane->pipe == pipe)
4091                         intel_plane_restore(&intel_plane->base);
4092         }
4093 }
4094
4095 static void intel_disable_sprite_planes(struct drm_crtc *crtc)
4096 {
4097         struct drm_device *dev = crtc->dev;
4098         enum pipe pipe = to_intel_crtc(crtc)->pipe;
4099         struct drm_plane *plane;
4100         struct intel_plane *intel_plane;
4101
4102         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4103                 intel_plane = to_intel_plane(plane);
4104                 if (intel_plane->pipe == pipe)
4105                         plane->funcs->disable_plane(plane);
4106         }
4107 }
4108
4109 void hsw_enable_ips(struct intel_crtc *crtc)
4110 {
4111         struct drm_device *dev = crtc->base.dev;
4112         struct drm_i915_private *dev_priv = dev->dev_private;
4113
4114         if (!crtc->config->ips_enabled)
4115                 return;
4116
4117         /* We can only enable IPS after we enable a plane and wait for a vblank */
4118         intel_wait_for_vblank(dev, crtc->pipe);
4119
4120         assert_plane_enabled(dev_priv, crtc->plane);
4121         if (IS_BROADWELL(dev)) {
4122                 mutex_lock(&dev_priv->rps.hw_lock);
4123                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4124                 mutex_unlock(&dev_priv->rps.hw_lock);
4125                 /* Quoting Art Runyan: "its not safe to expect any particular
4126                  * value in IPS_CTL bit 31 after enabling IPS through the
4127                  * mailbox." Moreover, the mailbox may return a bogus state,
4128                  * so we need to just enable it and continue on.
4129                  */
4130         } else {
4131                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4132                 /* The bit only becomes 1 in the next vblank, so this wait here
4133                  * is essentially intel_wait_for_vblank. If we don't have this
4134                  * and don't wait for vblanks until the end of crtc_enable, then
4135                  * the HW state readout code will complain that the expected
4136                  * IPS_CTL value is not the one we read. */
4137                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4138                         DRM_ERROR("Timed out waiting for IPS enable\n");
4139         }
4140 }
4141
4142 void hsw_disable_ips(struct intel_crtc *crtc)
4143 {
4144         struct drm_device *dev = crtc->base.dev;
4145         struct drm_i915_private *dev_priv = dev->dev_private;
4146
4147         if (!crtc->config->ips_enabled)
4148                 return;
4149
4150         assert_plane_enabled(dev_priv, crtc->plane);
4151         if (IS_BROADWELL(dev)) {
4152                 mutex_lock(&dev_priv->rps.hw_lock);
4153                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4154                 mutex_unlock(&dev_priv->rps.hw_lock);
4155                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4156                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4157                         DRM_ERROR("Timed out waiting for IPS disable\n");
4158         } else {
4159                 I915_WRITE(IPS_CTL, 0);
4160                 POSTING_READ(IPS_CTL);
4161         }
4162
4163         /* We need to wait for a vblank before we can disable the plane. */
4164         intel_wait_for_vblank(dev, crtc->pipe);
4165 }
4166
4167 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4168 static void intel_crtc_load_lut(struct drm_crtc *crtc)
4169 {
4170         struct drm_device *dev = crtc->dev;
4171         struct drm_i915_private *dev_priv = dev->dev_private;
4172         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4173         enum pipe pipe = intel_crtc->pipe;
4174         int palreg = PALETTE(pipe);
4175         int i;
4176         bool reenable_ips = false;
4177
4178         /* The clocks have to be on to load the palette. */
4179         if (!crtc->enabled || !intel_crtc->active)
4180                 return;
4181
4182         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
4183                 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4184                         assert_dsi_pll_enabled(dev_priv);
4185                 else
4186                         assert_pll_enabled(dev_priv, pipe);
4187         }
4188
4189         /* use legacy palette for Ironlake */
4190         if (!HAS_GMCH_DISPLAY(dev))
4191                 palreg = LGC_PALETTE(pipe);
4192
4193         /* Workaround : Do not read or write the pipe palette/gamma data while
4194          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4195          */
4196         if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4197             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4198              GAMMA_MODE_MODE_SPLIT)) {
4199                 hsw_disable_ips(intel_crtc);
4200                 reenable_ips = true;
4201         }
4202
4203         for (i = 0; i < 256; i++) {
4204                 I915_WRITE(palreg + 4 * i,
4205                            (intel_crtc->lut_r[i] << 16) |
4206                            (intel_crtc->lut_g[i] << 8) |
4207                            intel_crtc->lut_b[i]);
4208         }
4209
4210         if (reenable_ips)
4211                 hsw_enable_ips(intel_crtc);
4212 }
4213
4214 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4215 {
4216         if (!enable && intel_crtc->overlay) {
4217                 struct drm_device *dev = intel_crtc->base.dev;
4218                 struct drm_i915_private *dev_priv = dev->dev_private;
4219
4220                 mutex_lock(&dev->struct_mutex);
4221                 dev_priv->mm.interruptible = false;
4222                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4223                 dev_priv->mm.interruptible = true;
4224                 mutex_unlock(&dev->struct_mutex);
4225         }
4226
4227         /* Let userspace switch the overlay on again. In most cases userspace
4228          * has to recompute where to put it anyway.
4229          */
4230 }
4231
4232 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4233 {
4234         struct drm_device *dev = crtc->dev;
4235         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4236         int pipe = intel_crtc->pipe;
4237
4238         intel_enable_primary_hw_plane(crtc->primary, crtc);
4239         intel_enable_sprite_planes(crtc);
4240         intel_crtc_update_cursor(crtc, true);
4241         intel_crtc_dpms_overlay(intel_crtc, true);
4242
4243         hsw_enable_ips(intel_crtc);
4244
4245         mutex_lock(&dev->struct_mutex);
4246         intel_fbc_update(dev);
4247         mutex_unlock(&dev->struct_mutex);
4248
4249         /*
4250          * FIXME: Once we grow proper nuclear flip support out of this we need
4251          * to compute the mask of flip planes precisely. For the time being
4252          * consider this a flip from a NULL plane.
4253          */
4254         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4255 }
4256
4257 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4258 {
4259         struct drm_device *dev = crtc->dev;
4260         struct drm_i915_private *dev_priv = dev->dev_private;
4261         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4262         int pipe = intel_crtc->pipe;
4263         int plane = intel_crtc->plane;
4264
4265         intel_crtc_wait_for_pending_flips(crtc);
4266
4267         if (dev_priv->fbc.plane == plane)
4268                 intel_fbc_disable(dev);
4269
4270         hsw_disable_ips(intel_crtc);
4271
4272         intel_crtc_dpms_overlay(intel_crtc, false);
4273         intel_crtc_update_cursor(crtc, false);
4274         intel_disable_sprite_planes(crtc);
4275         intel_disable_primary_hw_plane(crtc->primary, crtc);
4276
4277         /*
4278          * FIXME: Once we grow proper nuclear flip support out of this we need
4279          * to compute the mask of flip planes precisely. For the time being
4280          * consider this a flip to a NULL plane.
4281          */
4282         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4283 }
4284
4285 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4286 {
4287         struct drm_device *dev = crtc->dev;
4288         struct drm_i915_private *dev_priv = dev->dev_private;
4289         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4290         struct intel_encoder *encoder;
4291         int pipe = intel_crtc->pipe;
4292
4293         WARN_ON(!crtc->enabled);
4294
4295         if (intel_crtc->active)
4296                 return;
4297
4298         if (intel_crtc->config->has_pch_encoder)
4299                 intel_prepare_shared_dpll(intel_crtc);
4300
4301         if (intel_crtc->config->has_dp_encoder)
4302                 intel_dp_set_m_n(intel_crtc);
4303
4304         intel_set_pipe_timings(intel_crtc);
4305
4306         if (intel_crtc->config->has_pch_encoder) {
4307                 intel_cpu_transcoder_set_m_n(intel_crtc,
4308                                      &intel_crtc->config->fdi_m_n, NULL);
4309         }
4310
4311         ironlake_set_pipeconf(crtc);
4312
4313         intel_crtc->active = true;
4314
4315         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4316         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4317
4318         for_each_encoder_on_crtc(dev, crtc, encoder)
4319                 if (encoder->pre_enable)
4320                         encoder->pre_enable(encoder);
4321
4322         if (intel_crtc->config->has_pch_encoder) {
4323                 /* Note: FDI PLL enabling _must_ be done before we enable the
4324                  * cpu pipes, hence this is separate from all the other fdi/pch
4325                  * enabling. */
4326                 ironlake_fdi_pll_enable(intel_crtc);
4327         } else {
4328                 assert_fdi_tx_disabled(dev_priv, pipe);
4329                 assert_fdi_rx_disabled(dev_priv, pipe);
4330         }
4331
4332         ironlake_pfit_enable(intel_crtc);
4333
4334         /*
4335          * On ILK+ LUT must be loaded before the pipe is running but with
4336          * clocks enabled
4337          */
4338         intel_crtc_load_lut(crtc);
4339
4340         intel_update_watermarks(crtc);
4341         intel_enable_pipe(intel_crtc);
4342
4343         if (intel_crtc->config->has_pch_encoder)
4344                 ironlake_pch_enable(crtc);
4345
4346         assert_vblank_disabled(crtc);
4347         drm_crtc_vblank_on(crtc);
4348
4349         for_each_encoder_on_crtc(dev, crtc, encoder)
4350                 encoder->enable(encoder);
4351
4352         if (HAS_PCH_CPT(dev))
4353                 cpt_verify_modeset(dev, intel_crtc->pipe);
4354
4355         intel_crtc_enable_planes(crtc);
4356 }
4357
4358 /* IPS only exists on ULT machines and is tied to pipe A. */
4359 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4360 {
4361         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4362 }
4363
4364 /*
4365  * This implements the workaround described in the "notes" section of the mode
4366  * set sequence documentation. When going from no pipes or single pipe to
4367  * multiple pipes, and planes are enabled after the pipe, we need to wait at
4368  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4369  */
4370 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4371 {
4372         struct drm_device *dev = crtc->base.dev;
4373         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4374
4375         /* We want to get the other_active_crtc only if there's only 1 other
4376          * active crtc. */
4377         for_each_intel_crtc(dev, crtc_it) {
4378                 if (!crtc_it->active || crtc_it == crtc)
4379                         continue;
4380
4381                 if (other_active_crtc)
4382                         return;
4383
4384                 other_active_crtc = crtc_it;
4385         }
4386         if (!other_active_crtc)
4387                 return;
4388
4389         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4390         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4391 }
4392
4393 static void haswell_crtc_enable(struct drm_crtc *crtc)
4394 {
4395         struct drm_device *dev = crtc->dev;
4396         struct drm_i915_private *dev_priv = dev->dev_private;
4397         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4398         struct intel_encoder *encoder;
4399         int pipe = intel_crtc->pipe;
4400
4401         WARN_ON(!crtc->enabled);
4402
4403         if (intel_crtc->active)
4404                 return;
4405
4406         if (intel_crtc_to_shared_dpll(intel_crtc))
4407                 intel_enable_shared_dpll(intel_crtc);
4408
4409         if (intel_crtc->config->has_dp_encoder)
4410                 intel_dp_set_m_n(intel_crtc);
4411
4412         intel_set_pipe_timings(intel_crtc);
4413
4414         if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4415                 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4416                            intel_crtc->config->pixel_multiplier - 1);
4417         }
4418
4419         if (intel_crtc->config->has_pch_encoder) {
4420                 intel_cpu_transcoder_set_m_n(intel_crtc,
4421                                      &intel_crtc->config->fdi_m_n, NULL);
4422         }
4423
4424         haswell_set_pipeconf(crtc);
4425
4426         intel_set_pipe_csc(crtc);
4427
4428         intel_crtc->active = true;
4429
4430         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4431         for_each_encoder_on_crtc(dev, crtc, encoder)
4432                 if (encoder->pre_enable)
4433                         encoder->pre_enable(encoder);
4434
4435         if (intel_crtc->config->has_pch_encoder) {
4436                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4437                                                       true);
4438                 dev_priv->display.fdi_link_train(crtc);
4439         }
4440
4441         intel_ddi_enable_pipe_clock(intel_crtc);
4442
4443         if (IS_SKYLAKE(dev))
4444                 skylake_pfit_enable(intel_crtc);
4445         else
4446                 ironlake_pfit_enable(intel_crtc);
4447
4448         /*
4449          * On ILK+ LUT must be loaded before the pipe is running but with
4450          * clocks enabled
4451          */
4452         intel_crtc_load_lut(crtc);
4453
4454         intel_ddi_set_pipe_settings(crtc);
4455         intel_ddi_enable_transcoder_func(crtc);
4456
4457         intel_update_watermarks(crtc);
4458         intel_enable_pipe(intel_crtc);
4459
4460         if (intel_crtc->config->has_pch_encoder)
4461                 lpt_pch_enable(crtc);
4462
4463         if (intel_crtc->config->dp_encoder_is_mst)
4464                 intel_ddi_set_vc_payload_alloc(crtc, true);
4465
4466         assert_vblank_disabled(crtc);
4467         drm_crtc_vblank_on(crtc);
4468
4469         for_each_encoder_on_crtc(dev, crtc, encoder) {
4470                 encoder->enable(encoder);
4471                 intel_opregion_notify_encoder(encoder, true);
4472         }
4473
4474         /* If we change the relative order between pipe/planes enabling, we need
4475          * to change the workaround. */
4476         haswell_mode_set_planes_workaround(intel_crtc);
4477         intel_crtc_enable_planes(crtc);
4478 }
4479
4480 static void skylake_pfit_disable(struct intel_crtc *crtc)
4481 {
4482         struct drm_device *dev = crtc->base.dev;
4483         struct drm_i915_private *dev_priv = dev->dev_private;
4484         int pipe = crtc->pipe;
4485
4486         /* To avoid upsetting the power well on haswell only disable the pfit if
4487          * it's in use. The hw state code will make sure we get this right. */
4488         if (crtc->config->pch_pfit.enabled) {
4489                 I915_WRITE(PS_CTL(pipe), 0);
4490                 I915_WRITE(PS_WIN_POS(pipe), 0);
4491                 I915_WRITE(PS_WIN_SZ(pipe), 0);
4492         }
4493 }
4494
4495 static void ironlake_pfit_disable(struct intel_crtc *crtc)
4496 {
4497         struct drm_device *dev = crtc->base.dev;
4498         struct drm_i915_private *dev_priv = dev->dev_private;
4499         int pipe = crtc->pipe;
4500
4501         /* To avoid upsetting the power well on haswell only disable the pfit if
4502          * it's in use. The hw state code will make sure we get this right. */
4503         if (crtc->config->pch_pfit.enabled) {
4504                 I915_WRITE(PF_CTL(pipe), 0);
4505                 I915_WRITE(PF_WIN_POS(pipe), 0);
4506                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4507         }
4508 }
4509
4510 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4511 {
4512         struct drm_device *dev = crtc->dev;
4513         struct drm_i915_private *dev_priv = dev->dev_private;
4514         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4515         struct intel_encoder *encoder;
4516         int pipe = intel_crtc->pipe;
4517         u32 reg, temp;
4518
4519         if (!intel_crtc->active)
4520                 return;
4521
4522         intel_crtc_disable_planes(crtc);
4523
4524         for_each_encoder_on_crtc(dev, crtc, encoder)
4525                 encoder->disable(encoder);
4526
4527         drm_crtc_vblank_off(crtc);
4528         assert_vblank_disabled(crtc);
4529
4530         if (intel_crtc->config->has_pch_encoder)
4531                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4532
4533         intel_disable_pipe(intel_crtc);
4534
4535         ironlake_pfit_disable(intel_crtc);
4536
4537         for_each_encoder_on_crtc(dev, crtc, encoder)
4538                 if (encoder->post_disable)
4539                         encoder->post_disable(encoder);
4540
4541         if (intel_crtc->config->has_pch_encoder) {
4542                 ironlake_fdi_disable(crtc);
4543
4544                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4545
4546                 if (HAS_PCH_CPT(dev)) {
4547                         /* disable TRANS_DP_CTL */
4548                         reg = TRANS_DP_CTL(pipe);
4549                         temp = I915_READ(reg);
4550                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4551                                   TRANS_DP_PORT_SEL_MASK);
4552                         temp |= TRANS_DP_PORT_SEL_NONE;
4553                         I915_WRITE(reg, temp);
4554
4555                         /* disable DPLL_SEL */
4556                         temp = I915_READ(PCH_DPLL_SEL);
4557                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4558                         I915_WRITE(PCH_DPLL_SEL, temp);
4559                 }
4560
4561                 /* disable PCH DPLL */
4562                 intel_disable_shared_dpll(intel_crtc);
4563
4564                 ironlake_fdi_pll_disable(intel_crtc);
4565         }
4566
4567         intel_crtc->active = false;
4568         intel_update_watermarks(crtc);
4569
4570         mutex_lock(&dev->struct_mutex);
4571         intel_fbc_update(dev);
4572         mutex_unlock(&dev->struct_mutex);
4573 }
4574
4575 static void haswell_crtc_disable(struct drm_crtc *crtc)
4576 {
4577         struct drm_device *dev = crtc->dev;
4578         struct drm_i915_private *dev_priv = dev->dev_private;
4579         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4580         struct intel_encoder *encoder;
4581         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4582
4583         if (!intel_crtc->active)
4584                 return;
4585
4586         intel_crtc_disable_planes(crtc);
4587
4588         for_each_encoder_on_crtc(dev, crtc, encoder) {
4589                 intel_opregion_notify_encoder(encoder, false);
4590                 encoder->disable(encoder);
4591         }
4592
4593         drm_crtc_vblank_off(crtc);
4594         assert_vblank_disabled(crtc);
4595
4596         if (intel_crtc->config->has_pch_encoder)
4597                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4598                                                       false);
4599         intel_disable_pipe(intel_crtc);
4600
4601         if (intel_crtc->config->dp_encoder_is_mst)
4602                 intel_ddi_set_vc_payload_alloc(crtc, false);
4603
4604         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4605
4606         if (IS_SKYLAKE(dev))
4607                 skylake_pfit_disable(intel_crtc);
4608         else
4609                 ironlake_pfit_disable(intel_crtc);
4610
4611         intel_ddi_disable_pipe_clock(intel_crtc);
4612
4613         if (intel_crtc->config->has_pch_encoder) {
4614                 lpt_disable_pch_transcoder(dev_priv);
4615                 intel_ddi_fdi_disable(crtc);
4616         }
4617
4618         for_each_encoder_on_crtc(dev, crtc, encoder)
4619                 if (encoder->post_disable)
4620                         encoder->post_disable(encoder);
4621
4622         intel_crtc->active = false;
4623         intel_update_watermarks(crtc);
4624
4625         mutex_lock(&dev->struct_mutex);
4626         intel_fbc_update(dev);
4627         mutex_unlock(&dev->struct_mutex);
4628
4629         if (intel_crtc_to_shared_dpll(intel_crtc))
4630                 intel_disable_shared_dpll(intel_crtc);
4631 }
4632
4633 static void ironlake_crtc_off(struct drm_crtc *crtc)
4634 {
4635         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4636         intel_put_shared_dpll(intel_crtc);
4637 }
4638
4639
4640 static void i9xx_pfit_enable(struct intel_crtc *crtc)
4641 {
4642         struct drm_device *dev = crtc->base.dev;
4643         struct drm_i915_private *dev_priv = dev->dev_private;
4644         struct intel_crtc_state *pipe_config = crtc->config;
4645
4646         if (!pipe_config->gmch_pfit.control)
4647                 return;
4648
4649         /*
4650          * The panel fitter should only be adjusted whilst the pipe is disabled,
4651          * according to register description and PRM.
4652          */
4653         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4654         assert_pipe_disabled(dev_priv, crtc->pipe);
4655
4656         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4657         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4658
4659         /* Border color in case we don't scale up to the full screen. Black by
4660          * default, change to something else for debugging. */
4661         I915_WRITE(BCLRPAT(crtc->pipe), 0);
4662 }
4663
4664 static enum intel_display_power_domain port_to_power_domain(enum port port)
4665 {
4666         switch (port) {
4667         case PORT_A:
4668                 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4669         case PORT_B:
4670                 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4671         case PORT_C:
4672                 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4673         case PORT_D:
4674                 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4675         default:
4676                 WARN_ON_ONCE(1);
4677                 return POWER_DOMAIN_PORT_OTHER;
4678         }
4679 }
4680
4681 #define for_each_power_domain(domain, mask)                             \
4682         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4683                 if ((1 << (domain)) & (mask))
4684
4685 enum intel_display_power_domain
4686 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4687 {
4688         struct drm_device *dev = intel_encoder->base.dev;
4689         struct intel_digital_port *intel_dig_port;
4690
4691         switch (intel_encoder->type) {
4692         case INTEL_OUTPUT_UNKNOWN:
4693                 /* Only DDI platforms should ever use this output type */
4694                 WARN_ON_ONCE(!HAS_DDI(dev));
4695         case INTEL_OUTPUT_DISPLAYPORT:
4696         case INTEL_OUTPUT_HDMI:
4697         case INTEL_OUTPUT_EDP:
4698                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4699                 return port_to_power_domain(intel_dig_port->port);
4700         case INTEL_OUTPUT_DP_MST:
4701                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4702                 return port_to_power_domain(intel_dig_port->port);
4703         case INTEL_OUTPUT_ANALOG:
4704                 return POWER_DOMAIN_PORT_CRT;
4705         case INTEL_OUTPUT_DSI:
4706                 return POWER_DOMAIN_PORT_DSI;
4707         default:
4708                 return POWER_DOMAIN_PORT_OTHER;
4709         }
4710 }
4711
4712 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4713 {
4714         struct drm_device *dev = crtc->dev;
4715         struct intel_encoder *intel_encoder;
4716         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4717         enum pipe pipe = intel_crtc->pipe;
4718         unsigned long mask;
4719         enum transcoder transcoder;
4720
4721         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4722
4723         mask = BIT(POWER_DOMAIN_PIPE(pipe));
4724         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4725         if (intel_crtc->config->pch_pfit.enabled ||
4726             intel_crtc->config->pch_pfit.force_thru)
4727                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4728
4729         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4730                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4731
4732         return mask;
4733 }
4734
4735 static void modeset_update_crtc_power_domains(struct drm_device *dev)
4736 {
4737         struct drm_i915_private *dev_priv = dev->dev_private;
4738         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4739         struct intel_crtc *crtc;
4740
4741         /*
4742          * First get all needed power domains, then put all unneeded, to avoid
4743          * any unnecessary toggling of the power wells.
4744          */
4745         for_each_intel_crtc(dev, crtc) {
4746                 enum intel_display_power_domain domain;
4747
4748                 if (!crtc->base.enabled)
4749                         continue;
4750
4751                 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4752
4753                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4754                         intel_display_power_get(dev_priv, domain);
4755         }
4756
4757         if (dev_priv->display.modeset_global_resources)
4758                 dev_priv->display.modeset_global_resources(dev);
4759
4760         for_each_intel_crtc(dev, crtc) {
4761                 enum intel_display_power_domain domain;
4762
4763                 for_each_power_domain(domain, crtc->enabled_power_domains)
4764                         intel_display_power_put(dev_priv, domain);
4765
4766                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4767         }
4768
4769         intel_display_set_init_power(dev_priv, false);
4770 }
4771
4772 /* returns HPLL frequency in kHz */
4773 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4774 {
4775         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4776
4777         /* Obtain SKU information */
4778         mutex_lock(&dev_priv->dpio_lock);
4779         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4780                 CCK_FUSE_HPLL_FREQ_MASK;
4781         mutex_unlock(&dev_priv->dpio_lock);
4782
4783         return vco_freq[hpll_freq] * 1000;
4784 }
4785
4786 static void vlv_update_cdclk(struct drm_device *dev)
4787 {
4788         struct drm_i915_private *dev_priv = dev->dev_private;
4789
4790         dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4791         DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
4792                          dev_priv->vlv_cdclk_freq);
4793
4794         /*
4795          * Program the gmbus_freq based on the cdclk frequency.
4796          * BSpec erroneously claims we should aim for 4MHz, but
4797          * in fact 1MHz is the correct frequency.
4798          */
4799         I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
4800 }
4801
4802 /* Adjust CDclk dividers to allow high res or save power if possible */
4803 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4804 {
4805         struct drm_i915_private *dev_priv = dev->dev_private;
4806         u32 val, cmd;
4807
4808         WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4809
4810         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4811                 cmd = 2;
4812         else if (cdclk == 266667)
4813                 cmd = 1;
4814         else
4815                 cmd = 0;
4816
4817         mutex_lock(&dev_priv->rps.hw_lock);
4818         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4819         val &= ~DSPFREQGUAR_MASK;
4820         val |= (cmd << DSPFREQGUAR_SHIFT);
4821         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4822         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4823                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4824                      50)) {
4825                 DRM_ERROR("timed out waiting for CDclk change\n");
4826         }
4827         mutex_unlock(&dev_priv->rps.hw_lock);
4828
4829         if (cdclk == 400000) {
4830                 u32 divider;
4831
4832                 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4833
4834                 mutex_lock(&dev_priv->dpio_lock);
4835                 /* adjust cdclk divider */
4836                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4837                 val &= ~DISPLAY_FREQUENCY_VALUES;
4838                 val |= divider;
4839                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4840
4841                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4842                               DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4843                              50))
4844                         DRM_ERROR("timed out waiting for CDclk change\n");
4845                 mutex_unlock(&dev_priv->dpio_lock);
4846         }
4847
4848         mutex_lock(&dev_priv->dpio_lock);
4849         /* adjust self-refresh exit latency value */
4850         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4851         val &= ~0x7f;
4852
4853         /*
4854          * For high bandwidth configs, we set a higher latency in the bunit
4855          * so that the core display fetch happens in time to avoid underruns.
4856          */
4857         if (cdclk == 400000)
4858                 val |= 4500 / 250; /* 4.5 usec */
4859         else
4860                 val |= 3000 / 250; /* 3.0 usec */
4861         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4862         mutex_unlock(&dev_priv->dpio_lock);
4863
4864         vlv_update_cdclk(dev);
4865 }
4866
4867 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4868 {
4869         struct drm_i915_private *dev_priv = dev->dev_private;
4870         u32 val, cmd;
4871
4872         WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4873
4874         switch (cdclk) {
4875         case 400000:
4876                 cmd = 3;
4877                 break;
4878         case 333333:
4879         case 320000:
4880                 cmd = 2;
4881                 break;
4882         case 266667:
4883                 cmd = 1;
4884                 break;
4885         case 200000:
4886                 cmd = 0;
4887                 break;
4888         default:
4889                 MISSING_CASE(cdclk);
4890                 return;
4891         }
4892
4893         mutex_lock(&dev_priv->rps.hw_lock);
4894         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4895         val &= ~DSPFREQGUAR_MASK_CHV;
4896         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4897         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4898         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4899                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4900                      50)) {
4901                 DRM_ERROR("timed out waiting for CDclk change\n");
4902         }
4903         mutex_unlock(&dev_priv->rps.hw_lock);
4904
4905         vlv_update_cdclk(dev);
4906 }
4907
4908 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4909                                  int max_pixclk)
4910 {
4911         int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
4912
4913         /* FIXME: Punit isn't quite ready yet */
4914         if (IS_CHERRYVIEW(dev_priv->dev))
4915                 return 400000;
4916
4917         /*
4918          * Really only a few cases to deal with, as only 4 CDclks are supported:
4919          *   200MHz
4920          *   267MHz
4921          *   320/333MHz (depends on HPLL freq)
4922          *   400MHz
4923          * So we check to see whether we're above 90% of the lower bin and
4924          * adjust if needed.
4925          *
4926          * We seem to get an unstable or solid color picture at 200MHz.
4927          * Not sure what's wrong. For now use 200MHz only when all pipes
4928          * are off.
4929          */
4930         if (max_pixclk > freq_320*9/10)
4931                 return 400000;
4932         else if (max_pixclk > 266667*9/10)
4933                 return freq_320;
4934         else if (max_pixclk > 0)
4935                 return 266667;
4936         else
4937                 return 200000;
4938 }
4939
4940 /* compute the max pixel clock for new configuration */
4941 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4942 {
4943         struct drm_device *dev = dev_priv->dev;
4944         struct intel_crtc *intel_crtc;
4945         int max_pixclk = 0;
4946
4947         for_each_intel_crtc(dev, intel_crtc) {
4948                 if (intel_crtc->new_enabled)
4949                         max_pixclk = max(max_pixclk,
4950                                          intel_crtc->new_config->base.adjusted_mode.crtc_clock);
4951         }
4952
4953         return max_pixclk;
4954 }
4955
4956 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4957                                             unsigned *prepare_pipes)
4958 {
4959         struct drm_i915_private *dev_priv = dev->dev_private;
4960         struct intel_crtc *intel_crtc;
4961         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4962
4963         if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4964             dev_priv->vlv_cdclk_freq)
4965                 return;
4966
4967         /* disable/enable all currently active pipes while we change cdclk */
4968         for_each_intel_crtc(dev, intel_crtc)
4969                 if (intel_crtc->base.enabled)
4970                         *prepare_pipes |= (1 << intel_crtc->pipe);
4971 }
4972
4973 static void valleyview_modeset_global_resources(struct drm_device *dev)
4974 {
4975         struct drm_i915_private *dev_priv = dev->dev_private;
4976         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4977         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4978
4979         if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4980                 /*
4981                  * FIXME: We can end up here with all power domains off, yet
4982                  * with a CDCLK frequency other than the minimum. To account
4983                  * for this take the PIPE-A power domain, which covers the HW
4984                  * blocks needed for the following programming. This can be
4985                  * removed once it's guaranteed that we get here either with
4986                  * the minimum CDCLK set, or the required power domains
4987                  * enabled.
4988                  */
4989                 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
4990
4991                 if (IS_CHERRYVIEW(dev))
4992                         cherryview_set_cdclk(dev, req_cdclk);
4993                 else
4994                         valleyview_set_cdclk(dev, req_cdclk);
4995
4996                 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4997         }
4998 }
4999
5000 static void valleyview_crtc_enable(struct drm_crtc *crtc)
5001 {
5002         struct drm_device *dev = crtc->dev;
5003         struct drm_i915_private *dev_priv = to_i915(dev);
5004         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5005         struct intel_encoder *encoder;
5006         int pipe = intel_crtc->pipe;
5007         bool is_dsi;
5008
5009         WARN_ON(!crtc->enabled);
5010
5011         if (intel_crtc->active)
5012                 return;
5013
5014         is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5015
5016         if (!is_dsi) {
5017                 if (IS_CHERRYVIEW(dev))
5018                         chv_prepare_pll(intel_crtc, intel_crtc->config);
5019                 else
5020                         vlv_prepare_pll(intel_crtc, intel_crtc->config);
5021         }
5022
5023         if (intel_crtc->config->has_dp_encoder)
5024                 intel_dp_set_m_n(intel_crtc);
5025
5026         intel_set_pipe_timings(intel_crtc);
5027
5028         if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5029                 struct drm_i915_private *dev_priv = dev->dev_private;
5030
5031                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5032                 I915_WRITE(CHV_CANVAS(pipe), 0);
5033         }
5034
5035         i9xx_set_pipeconf(intel_crtc);
5036
5037         intel_crtc->active = true;
5038
5039         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5040
5041         for_each_encoder_on_crtc(dev, crtc, encoder)
5042                 if (encoder->pre_pll_enable)
5043                         encoder->pre_pll_enable(encoder);
5044
5045         if (!is_dsi) {
5046                 if (IS_CHERRYVIEW(dev))
5047                         chv_enable_pll(intel_crtc, intel_crtc->config);
5048                 else
5049                         vlv_enable_pll(intel_crtc, intel_crtc->config);
5050         }
5051
5052         for_each_encoder_on_crtc(dev, crtc, encoder)
5053                 if (encoder->pre_enable)
5054                         encoder->pre_enable(encoder);
5055
5056         i9xx_pfit_enable(intel_crtc);
5057
5058         intel_crtc_load_lut(crtc);
5059
5060         intel_update_watermarks(crtc);
5061         intel_enable_pipe(intel_crtc);
5062
5063         assert_vblank_disabled(crtc);
5064         drm_crtc_vblank_on(crtc);
5065
5066         for_each_encoder_on_crtc(dev, crtc, encoder)
5067                 encoder->enable(encoder);
5068
5069         intel_crtc_enable_planes(crtc);
5070
5071         /* Underruns don't raise interrupts, so check manually. */
5072         i9xx_check_fifo_underruns(dev_priv);
5073 }
5074
5075 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5076 {
5077         struct drm_device *dev = crtc->base.dev;
5078         struct drm_i915_private *dev_priv = dev->dev_private;
5079
5080         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
5081         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5082 }
5083
5084 static void i9xx_crtc_enable(struct drm_crtc *crtc)
5085 {
5086         struct drm_device *dev = crtc->dev;
5087         struct drm_i915_private *dev_priv = to_i915(dev);
5088         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5089         struct intel_encoder *encoder;
5090         int pipe = intel_crtc->pipe;
5091
5092         WARN_ON(!crtc->enabled);
5093
5094         if (intel_crtc->active)
5095                 return;
5096
5097         i9xx_set_pll_dividers(intel_crtc);
5098
5099         if (intel_crtc->config->has_dp_encoder)
5100                 intel_dp_set_m_n(intel_crtc);
5101
5102         intel_set_pipe_timings(intel_crtc);
5103
5104         i9xx_set_pipeconf(intel_crtc);
5105
5106         intel_crtc->active = true;
5107
5108         if (!IS_GEN2(dev))
5109                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5110
5111         for_each_encoder_on_crtc(dev, crtc, encoder)
5112                 if (encoder->pre_enable)
5113                         encoder->pre_enable(encoder);
5114
5115         i9xx_enable_pll(intel_crtc);
5116
5117         i9xx_pfit_enable(intel_crtc);
5118
5119         intel_crtc_load_lut(crtc);
5120
5121         intel_update_watermarks(crtc);
5122         intel_enable_pipe(intel_crtc);
5123
5124         assert_vblank_disabled(crtc);
5125         drm_crtc_vblank_on(crtc);
5126
5127         for_each_encoder_on_crtc(dev, crtc, encoder)
5128                 encoder->enable(encoder);
5129
5130         intel_crtc_enable_planes(crtc);
5131
5132         /*
5133          * Gen2 reports pipe underruns whenever all planes are disabled.
5134          * So don't enable underrun reporting before at least some planes
5135          * are enabled.
5136          * FIXME: Need to fix the logic to work when we turn off all planes
5137          * but leave the pipe running.
5138          */
5139         if (IS_GEN2(dev))
5140                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5141
5142         /* Underruns don't raise interrupts, so check manually. */
5143         i9xx_check_fifo_underruns(dev_priv);
5144 }
5145
5146 static void i9xx_pfit_disable(struct intel_crtc *crtc)
5147 {
5148         struct drm_device *dev = crtc->base.dev;
5149         struct drm_i915_private *dev_priv = dev->dev_private;
5150
5151         if (!crtc->config->gmch_pfit.control)
5152                 return;
5153
5154         assert_pipe_disabled(dev_priv, crtc->pipe);
5155
5156         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5157                          I915_READ(PFIT_CONTROL));
5158         I915_WRITE(PFIT_CONTROL, 0);
5159 }
5160
5161 static void i9xx_crtc_disable(struct drm_crtc *crtc)
5162 {
5163         struct drm_device *dev = crtc->dev;
5164         struct drm_i915_private *dev_priv = dev->dev_private;
5165         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5166         struct intel_encoder *encoder;
5167         int pipe = intel_crtc->pipe;
5168
5169         if (!intel_crtc->active)
5170                 return;
5171
5172         /*
5173          * Gen2 reports pipe underruns whenever all planes are disabled.
5174          * So diasble underrun reporting before all the planes get disabled.
5175          * FIXME: Need to fix the logic to work when we turn off all planes
5176          * but leave the pipe running.
5177          */
5178         if (IS_GEN2(dev))
5179                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5180
5181         /*
5182          * Vblank time updates from the shadow to live plane control register
5183          * are blocked if the memory self-refresh mode is active at that
5184          * moment. So to make sure the plane gets truly disabled, disable
5185          * first the self-refresh mode. The self-refresh enable bit in turn
5186          * will be checked/applied by the HW only at the next frame start
5187          * event which is after the vblank start event, so we need to have a
5188          * wait-for-vblank between disabling the plane and the pipe.
5189          */
5190         intel_set_memory_cxsr(dev_priv, false);
5191         intel_crtc_disable_planes(crtc);
5192
5193         /*
5194          * On gen2 planes are double buffered but the pipe isn't, so we must
5195          * wait for planes to fully turn off before disabling the pipe.
5196          * We also need to wait on all gmch platforms because of the
5197          * self-refresh mode constraint explained above.
5198          */
5199         intel_wait_for_vblank(dev, pipe);
5200
5201         for_each_encoder_on_crtc(dev, crtc, encoder)
5202                 encoder->disable(encoder);
5203
5204         drm_crtc_vblank_off(crtc);
5205         assert_vblank_disabled(crtc);
5206
5207         intel_disable_pipe(intel_crtc);
5208
5209         i9xx_pfit_disable(intel_crtc);
5210
5211         for_each_encoder_on_crtc(dev, crtc, encoder)
5212                 if (encoder->post_disable)
5213                         encoder->post_disable(encoder);
5214
5215         if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5216                 if (IS_CHERRYVIEW(dev))
5217                         chv_disable_pll(dev_priv, pipe);
5218                 else if (IS_VALLEYVIEW(dev))
5219                         vlv_disable_pll(dev_priv, pipe);
5220                 else
5221                         i9xx_disable_pll(intel_crtc);
5222         }
5223
5224         if (!IS_GEN2(dev))
5225                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5226
5227         intel_crtc->active = false;
5228         intel_update_watermarks(crtc);
5229
5230         mutex_lock(&dev->struct_mutex);
5231         intel_fbc_update(dev);
5232         mutex_unlock(&dev->struct_mutex);
5233 }
5234
5235 static void i9xx_crtc_off(struct drm_crtc *crtc)
5236 {
5237 }
5238
5239 /* Master function to enable/disable CRTC and corresponding power wells */
5240 void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5241 {
5242         struct drm_device *dev = crtc->dev;
5243         struct drm_i915_private *dev_priv = dev->dev_private;
5244         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5245         enum intel_display_power_domain domain;
5246         unsigned long domains;
5247
5248         if (enable) {
5249                 if (!intel_crtc->active) {
5250                         domains = get_crtc_power_domains(crtc);
5251                         for_each_power_domain(domain, domains)
5252                                 intel_display_power_get(dev_priv, domain);
5253                         intel_crtc->enabled_power_domains = domains;
5254
5255                         dev_priv->display.crtc_enable(crtc);
5256                 }
5257         } else {
5258                 if (intel_crtc->active) {
5259                         dev_priv->display.crtc_disable(crtc);
5260
5261                         domains = intel_crtc->enabled_power_domains;
5262                         for_each_power_domain(domain, domains)
5263                                 intel_display_power_put(dev_priv, domain);
5264                         intel_crtc->enabled_power_domains = 0;
5265                 }
5266         }
5267 }
5268
5269 /**
5270  * Sets the power management mode of the pipe and plane.
5271  */
5272 void intel_crtc_update_dpms(struct drm_crtc *crtc)
5273 {
5274         struct drm_device *dev = crtc->dev;
5275         struct intel_encoder *intel_encoder;
5276         bool enable = false;
5277
5278         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5279                 enable |= intel_encoder->connectors_active;
5280
5281         intel_crtc_control(crtc, enable);
5282 }
5283
5284 static void intel_crtc_disable(struct drm_crtc *crtc)
5285 {
5286         struct drm_device *dev = crtc->dev;
5287         struct drm_connector *connector;
5288         struct drm_i915_private *dev_priv = dev->dev_private;
5289
5290         /* crtc should still be enabled when we disable it. */
5291         WARN_ON(!crtc->enabled);
5292
5293         dev_priv->display.crtc_disable(crtc);
5294         dev_priv->display.off(crtc);
5295
5296         crtc->primary->funcs->disable_plane(crtc->primary);
5297
5298         /* Update computed state. */
5299         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5300                 if (!connector->encoder || !connector->encoder->crtc)
5301                         continue;
5302
5303                 if (connector->encoder->crtc != crtc)
5304                         continue;
5305
5306                 connector->dpms = DRM_MODE_DPMS_OFF;
5307                 to_intel_encoder(connector->encoder)->connectors_active = false;
5308         }
5309 }
5310
5311 void intel_encoder_destroy(struct drm_encoder *encoder)
5312 {
5313         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5314
5315         drm_encoder_cleanup(encoder);
5316         kfree(intel_encoder);
5317 }
5318
5319 /* Simple dpms helper for encoders with just one connector, no cloning and only
5320  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5321  * state of the entire output pipe. */
5322 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5323 {
5324         if (mode == DRM_MODE_DPMS_ON) {
5325                 encoder->connectors_active = true;
5326
5327                 intel_crtc_update_dpms(encoder->base.crtc);
5328         } else {
5329                 encoder->connectors_active = false;
5330
5331                 intel_crtc_update_dpms(encoder->base.crtc);
5332         }
5333 }
5334
5335 /* Cross check the actual hw state with our own modeset state tracking (and it's
5336  * internal consistency). */
5337 static void intel_connector_check_state(struct intel_connector *connector)
5338 {
5339         if (connector->get_hw_state(connector)) {
5340                 struct intel_encoder *encoder = connector->encoder;
5341                 struct drm_crtc *crtc;
5342                 bool encoder_enabled;
5343                 enum pipe pipe;
5344
5345                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5346                               connector->base.base.id,
5347                               connector->base.name);
5348
5349                 /* there is no real hw state for MST connectors */
5350                 if (connector->mst_port)
5351                         return;
5352
5353                 I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5354                      "wrong connector dpms state\n");
5355                 I915_STATE_WARN(connector->base.encoder != &encoder->base,
5356                      "active connector not linked to encoder\n");
5357
5358                 if (encoder) {
5359                         I915_STATE_WARN(!encoder->connectors_active,
5360                              "encoder->connectors_active not set\n");
5361
5362                         encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5363                         I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
5364                         if (I915_STATE_WARN_ON(!encoder->base.crtc))
5365                                 return;
5366
5367                         crtc = encoder->base.crtc;
5368
5369                         I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
5370                         I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5371                         I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
5372                              "encoder active on the wrong pipe\n");
5373                 }
5374         }
5375 }
5376
5377 /* Even simpler default implementation, if there's really no special case to
5378  * consider. */
5379 void intel_connector_dpms(struct drm_connector *connector, int mode)
5380 {
5381         /* All the simple cases only support two dpms states. */
5382         if (mode != DRM_MODE_DPMS_ON)
5383                 mode = DRM_MODE_DPMS_OFF;
5384
5385         if (mode == connector->dpms)
5386                 return;
5387
5388         connector->dpms = mode;
5389
5390         /* Only need to change hw state when actually enabled */
5391         if (connector->encoder)
5392                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5393
5394         intel_modeset_check_state(connector->dev);
5395 }
5396
5397 /* Simple connector->get_hw_state implementation for encoders that support only
5398  * one connector and no cloning and hence the encoder state determines the state
5399  * of the connector. */
5400 bool intel_connector_get_hw_state(struct intel_connector *connector)
5401 {
5402         enum pipe pipe = 0;
5403         struct intel_encoder *encoder = connector->encoder;
5404
5405         return encoder->get_hw_state(encoder, &pipe);
5406 }
5407
5408 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5409                                      struct intel_crtc_state *pipe_config)
5410 {
5411         struct drm_i915_private *dev_priv = dev->dev_private;
5412         struct intel_crtc *pipe_B_crtc =
5413                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5414
5415         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5416                       pipe_name(pipe), pipe_config->fdi_lanes);
5417         if (pipe_config->fdi_lanes > 4) {
5418                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5419                               pipe_name(pipe), pipe_config->fdi_lanes);
5420                 return false;
5421         }
5422
5423         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5424                 if (pipe_config->fdi_lanes > 2) {
5425                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5426                                       pipe_config->fdi_lanes);
5427                         return false;
5428                 } else {
5429                         return true;
5430                 }
5431         }
5432
5433         if (INTEL_INFO(dev)->num_pipes == 2)
5434                 return true;
5435
5436         /* Ivybridge 3 pipe is really complicated */
5437         switch (pipe) {
5438         case PIPE_A:
5439                 return true;
5440         case PIPE_B:
5441                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5442                     pipe_config->fdi_lanes > 2) {
5443                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5444                                       pipe_name(pipe), pipe_config->fdi_lanes);
5445                         return false;
5446                 }
5447                 return true;
5448         case PIPE_C:
5449                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5450                     pipe_B_crtc->config->fdi_lanes <= 2) {
5451                         if (pipe_config->fdi_lanes > 2) {
5452                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5453                                               pipe_name(pipe), pipe_config->fdi_lanes);
5454                                 return false;
5455                         }
5456                 } else {
5457                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5458                         return false;
5459                 }
5460                 return true;
5461         default:
5462                 BUG();
5463         }
5464 }
5465
5466 #define RETRY 1
5467 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5468                                        struct intel_crtc_state *pipe_config)
5469 {
5470         struct drm_device *dev = intel_crtc->base.dev;
5471         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5472         int lane, link_bw, fdi_dotclock;
5473         bool setup_ok, needs_recompute = false;
5474
5475 retry:
5476         /* FDI is a binary signal running at ~2.7GHz, encoding
5477          * each output octet as 10 bits. The actual frequency
5478          * is stored as a divider into a 100MHz clock, and the
5479          * mode pixel clock is stored in units of 1KHz.
5480          * Hence the bw of each lane in terms of the mode signal
5481          * is:
5482          */
5483         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5484
5485         fdi_dotclock = adjusted_mode->crtc_clock;
5486
5487         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5488                                            pipe_config->pipe_bpp);
5489
5490         pipe_config->fdi_lanes = lane;
5491
5492         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5493                                link_bw, &pipe_config->fdi_m_n);
5494
5495         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5496                                             intel_crtc->pipe, pipe_config);
5497         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5498                 pipe_config->pipe_bpp -= 2*3;
5499                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5500                               pipe_config->pipe_bpp);
5501                 needs_recompute = true;
5502                 pipe_config->bw_constrained = true;
5503
5504                 goto retry;
5505         }
5506
5507         if (needs_recompute)
5508                 return RETRY;
5509
5510         return setup_ok ? 0 : -EINVAL;
5511 }
5512
5513 static void hsw_compute_ips_config(struct intel_crtc *crtc,
5514                                    struct intel_crtc_state *pipe_config)
5515 {
5516         pipe_config->ips_enabled = i915.enable_ips &&
5517                                    hsw_crtc_supports_ips(crtc) &&
5518                                    pipe_config->pipe_bpp <= 24;
5519 }
5520
5521 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5522                                      struct intel_crtc_state *pipe_config)
5523 {
5524         struct drm_device *dev = crtc->base.dev;
5525         struct drm_i915_private *dev_priv = dev->dev_private;
5526         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5527
5528         /* FIXME should check pixel clock limits on all platforms */
5529         if (INTEL_INFO(dev)->gen < 4) {
5530                 int clock_limit =
5531                         dev_priv->display.get_display_clock_speed(dev);
5532
5533                 /*
5534                  * Enable pixel doubling when the dot clock
5535                  * is > 90% of the (display) core speed.
5536                  *
5537                  * GDG double wide on either pipe,
5538                  * otherwise pipe A only.
5539                  */
5540                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5541                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5542                         clock_limit *= 2;
5543                         pipe_config->double_wide = true;
5544                 }
5545
5546                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5547                         return -EINVAL;
5548         }
5549
5550         /*
5551          * Pipe horizontal size must be even in:
5552          * - DVO ganged mode
5553          * - LVDS dual channel mode
5554          * - Double wide pipe
5555          */
5556         if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5557              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5558                 pipe_config->pipe_src_w &= ~1;
5559
5560         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5561          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5562          */
5563         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5564                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5565                 return -EINVAL;
5566
5567         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5568                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5569         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5570                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5571                  * for lvds. */
5572                 pipe_config->pipe_bpp = 8*3;
5573         }
5574
5575         if (HAS_IPS(dev))
5576                 hsw_compute_ips_config(crtc, pipe_config);
5577
5578         if (pipe_config->has_pch_encoder)
5579                 return ironlake_fdi_compute_config(crtc, pipe_config);
5580
5581         return 0;
5582 }
5583
5584 static int valleyview_get_display_clock_speed(struct drm_device *dev)
5585 {
5586         struct drm_i915_private *dev_priv = dev->dev_private;
5587         u32 val;
5588         int divider;
5589
5590         /* FIXME: Punit isn't quite ready yet */
5591         if (IS_CHERRYVIEW(dev))
5592                 return 400000;
5593
5594         if (dev_priv->hpll_freq == 0)
5595                 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5596
5597         mutex_lock(&dev_priv->dpio_lock);
5598         val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5599         mutex_unlock(&dev_priv->dpio_lock);
5600
5601         divider = val & DISPLAY_FREQUENCY_VALUES;
5602
5603         WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5604              (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5605              "cdclk change in progress\n");
5606
5607         return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
5608 }
5609
5610 static int i945_get_display_clock_speed(struct drm_device *dev)
5611 {
5612         return 400000;
5613 }
5614
5615 static int i915_get_display_clock_speed(struct drm_device *dev)
5616 {
5617         return 333000;
5618 }
5619
5620 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5621 {
5622         return 200000;
5623 }
5624
5625 static int pnv_get_display_clock_speed(struct drm_device *dev)
5626 {
5627         u16 gcfgc = 0;
5628
5629         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5630
5631         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5632         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5633                 return 267000;
5634         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5635                 return 333000;
5636         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5637                 return 444000;
5638         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5639                 return 200000;
5640         default:
5641                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5642         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5643                 return 133000;
5644         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5645                 return 167000;
5646         }
5647 }
5648
5649 static int i915gm_get_display_clock_speed(struct drm_device *dev)
5650 {
5651         u16 gcfgc = 0;
5652
5653         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5654
5655         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5656                 return 133000;
5657         else {
5658                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5659                 case GC_DISPLAY_CLOCK_333_MHZ:
5660                         return 333000;
5661                 default:
5662                 case GC_DISPLAY_CLOCK_190_200_MHZ:
5663                         return 190000;
5664                 }
5665         }
5666 }
5667
5668 static int i865_get_display_clock_speed(struct drm_device *dev)
5669 {
5670         return 266000;
5671 }
5672
5673 static int i855_get_display_clock_speed(struct drm_device *dev)
5674 {
5675         u16 hpllcc = 0;
5676         /* Assume that the hardware is in the high speed state.  This
5677          * should be the default.
5678          */
5679         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5680         case GC_CLOCK_133_200:
5681         case GC_CLOCK_100_200:
5682                 return 200000;
5683         case GC_CLOCK_166_250:
5684                 return 250000;
5685         case GC_CLOCK_100_133:
5686                 return 133000;
5687         }
5688
5689         /* Shouldn't happen */
5690         return 0;
5691 }
5692
5693 static int i830_get_display_clock_speed(struct drm_device *dev)
5694 {
5695         return 133000;
5696 }
5697
5698 static void
5699 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5700 {
5701         while (*num > DATA_LINK_M_N_MASK ||
5702                *den > DATA_LINK_M_N_MASK) {
5703                 *num >>= 1;
5704                 *den >>= 1;
5705         }
5706 }
5707
5708 static void compute_m_n(unsigned int m, unsigned int n,
5709                         uint32_t *ret_m, uint32_t *ret_n)
5710 {
5711         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5712         *ret_m = div_u64((uint64_t) m * *ret_n, n);
5713         intel_reduce_m_n_ratio(ret_m, ret_n);
5714 }
5715
5716 void
5717 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5718                        int pixel_clock, int link_clock,
5719                        struct intel_link_m_n *m_n)
5720 {
5721         m_n->tu = 64;
5722
5723         compute_m_n(bits_per_pixel * pixel_clock,
5724                     link_clock * nlanes * 8,
5725                     &m_n->gmch_m, &m_n->gmch_n);
5726
5727         compute_m_n(pixel_clock, link_clock,
5728                     &m_n->link_m, &m_n->link_n);
5729 }
5730
5731 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5732 {
5733         if (i915.panel_use_ssc >= 0)
5734                 return i915.panel_use_ssc != 0;
5735         return dev_priv->vbt.lvds_use_ssc
5736                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5737 }
5738
5739 static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
5740 {
5741         struct drm_device *dev = crtc->base.dev;
5742         struct drm_i915_private *dev_priv = dev->dev_private;
5743         int refclk;
5744
5745         if (IS_VALLEYVIEW(dev)) {
5746                 refclk = 100000;
5747         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5748             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5749                 refclk = dev_priv->vbt.lvds_ssc_freq;
5750                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5751         } else if (!IS_GEN2(dev)) {
5752                 refclk = 96000;
5753         } else {
5754                 refclk = 48000;
5755         }
5756
5757         return refclk;
5758 }
5759
5760 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5761 {
5762         return (1 << dpll->n) << 16 | dpll->m2;
5763 }
5764
5765 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5766 {
5767         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5768 }
5769
5770 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5771                                      struct intel_crtc_state *crtc_state,
5772                                      intel_clock_t *reduced_clock)
5773 {
5774         struct drm_device *dev = crtc->base.dev;
5775         u32 fp, fp2 = 0;
5776
5777         if (IS_PINEVIEW(dev)) {
5778                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
5779                 if (reduced_clock)
5780                         fp2 = pnv_dpll_compute_fp(reduced_clock);
5781         } else {
5782                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
5783                 if (reduced_clock)
5784                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
5785         }
5786
5787         crtc_state->dpll_hw_state.fp0 = fp;
5788
5789         crtc->lowfreq_avail = false;
5790         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5791             reduced_clock && i915.powersave) {
5792                 crtc_state->dpll_hw_state.fp1 = fp2;
5793                 crtc->lowfreq_avail = true;
5794         } else {
5795                 crtc_state->dpll_hw_state.fp1 = fp;
5796         }
5797 }
5798
5799 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5800                 pipe)
5801 {
5802         u32 reg_val;
5803
5804         /*
5805          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5806          * and set it to a reasonable value instead.
5807          */
5808         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5809         reg_val &= 0xffffff00;
5810         reg_val |= 0x00000030;
5811         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5812
5813         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5814         reg_val &= 0x8cffffff;
5815         reg_val = 0x8c000000;
5816         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5817
5818         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5819         reg_val &= 0xffffff00;
5820         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5821
5822         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5823         reg_val &= 0x00ffffff;
5824         reg_val |= 0xb0000000;
5825         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5826 }
5827
5828 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5829                                          struct intel_link_m_n *m_n)
5830 {
5831         struct drm_device *dev = crtc->base.dev;
5832         struct drm_i915_private *dev_priv = dev->dev_private;
5833         int pipe = crtc->pipe;
5834
5835         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5836         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5837         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5838         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5839 }
5840
5841 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5842                                          struct intel_link_m_n *m_n,
5843                                          struct intel_link_m_n *m2_n2)
5844 {
5845         struct drm_device *dev = crtc->base.dev;
5846         struct drm_i915_private *dev_priv = dev->dev_private;
5847         int pipe = crtc->pipe;
5848         enum transcoder transcoder = crtc->config->cpu_transcoder;
5849
5850         if (INTEL_INFO(dev)->gen >= 5) {
5851                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5852                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5853                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5854                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5855                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5856                  * for gen < 8) and if DRRS is supported (to make sure the
5857                  * registers are not unnecessarily accessed).
5858                  */
5859                 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5860                         crtc->config->has_drrs) {
5861                         I915_WRITE(PIPE_DATA_M2(transcoder),
5862                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5863                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5864                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5865                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5866                 }
5867         } else {
5868                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5869                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5870                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5871                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5872         }
5873 }
5874
5875 void intel_dp_set_m_n(struct intel_crtc *crtc)
5876 {
5877         if (crtc->config->has_pch_encoder)
5878                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
5879         else
5880                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
5881                                                    &crtc->config->dp_m2_n2);
5882 }
5883
5884 static void vlv_update_pll(struct intel_crtc *crtc,
5885                            struct intel_crtc_state *pipe_config)
5886 {
5887         u32 dpll, dpll_md;
5888
5889         /*
5890          * Enable DPIO clock input. We should never disable the reference
5891          * clock for pipe B, since VGA hotplug / manual detection depends
5892          * on it.
5893          */
5894         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5895                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5896         /* We should never disable this, set it here for state tracking */
5897         if (crtc->pipe == PIPE_B)
5898                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5899         dpll |= DPLL_VCO_ENABLE;
5900         pipe_config->dpll_hw_state.dpll = dpll;
5901
5902         dpll_md = (pipe_config->pixel_multiplier - 1)
5903                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5904         pipe_config->dpll_hw_state.dpll_md = dpll_md;
5905 }
5906
5907 static void vlv_prepare_pll(struct intel_crtc *crtc,
5908                             const struct intel_crtc_state *pipe_config)
5909 {
5910         struct drm_device *dev = crtc->base.dev;
5911         struct drm_i915_private *dev_priv = dev->dev_private;
5912         int pipe = crtc->pipe;
5913         u32 mdiv;
5914         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5915         u32 coreclk, reg_val;
5916
5917         mutex_lock(&dev_priv->dpio_lock);
5918
5919         bestn = pipe_config->dpll.n;
5920         bestm1 = pipe_config->dpll.m1;
5921         bestm2 = pipe_config->dpll.m2;
5922         bestp1 = pipe_config->dpll.p1;
5923         bestp2 = pipe_config->dpll.p2;
5924
5925         /* See eDP HDMI DPIO driver vbios notes doc */
5926
5927         /* PLL B needs special handling */
5928         if (pipe == PIPE_B)
5929                 vlv_pllb_recal_opamp(dev_priv, pipe);
5930
5931         /* Set up Tx target for periodic Rcomp update */
5932         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5933
5934         /* Disable target IRef on PLL */
5935         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5936         reg_val &= 0x00ffffff;
5937         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5938
5939         /* Disable fast lock */
5940         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5941
5942         /* Set idtafcrecal before PLL is enabled */
5943         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5944         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5945         mdiv |= ((bestn << DPIO_N_SHIFT));
5946         mdiv |= (1 << DPIO_K_SHIFT);
5947
5948         /*
5949          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5950          * but we don't support that).
5951          * Note: don't use the DAC post divider as it seems unstable.
5952          */
5953         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5954         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5955
5956         mdiv |= DPIO_ENABLE_CALIBRATION;
5957         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5958
5959         /* Set HBR and RBR LPF coefficients */
5960         if (pipe_config->port_clock == 162000 ||
5961             intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5962             intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
5963                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5964                                  0x009f0003);
5965         else
5966                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5967                                  0x00d0000f);
5968
5969         if (pipe_config->has_dp_encoder) {
5970                 /* Use SSC source */
5971                 if (pipe == PIPE_A)
5972                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5973                                          0x0df40000);
5974                 else
5975                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5976                                          0x0df70000);
5977         } else { /* HDMI or VGA */
5978                 /* Use bend source */
5979                 if (pipe == PIPE_A)
5980                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5981                                          0x0df70000);
5982                 else
5983                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5984                                          0x0df40000);
5985         }
5986
5987         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5988         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5989         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
5990             intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5991                 coreclk |= 0x01000000;
5992         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5993
5994         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5995         mutex_unlock(&dev_priv->dpio_lock);
5996 }
5997
5998 static void chv_update_pll(struct intel_crtc *crtc,
5999                            struct intel_crtc_state *pipe_config)
6000 {
6001         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
6002                 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
6003                 DPLL_VCO_ENABLE;
6004         if (crtc->pipe != PIPE_A)
6005                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6006
6007         pipe_config->dpll_hw_state.dpll_md =
6008                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6009 }
6010
6011 static void chv_prepare_pll(struct intel_crtc *crtc,
6012                             const struct intel_crtc_state *pipe_config)
6013 {
6014         struct drm_device *dev = crtc->base.dev;
6015         struct drm_i915_private *dev_priv = dev->dev_private;
6016         int pipe = crtc->pipe;
6017         int dpll_reg = DPLL(crtc->pipe);
6018         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6019         u32 loopfilter, intcoeff;
6020         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6021         int refclk;
6022
6023         bestn = pipe_config->dpll.n;
6024         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6025         bestm1 = pipe_config->dpll.m1;
6026         bestm2 = pipe_config->dpll.m2 >> 22;
6027         bestp1 = pipe_config->dpll.p1;
6028         bestp2 = pipe_config->dpll.p2;
6029
6030         /*
6031          * Enable Refclk and SSC
6032          */
6033         I915_WRITE(dpll_reg,
6034                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6035
6036         mutex_lock(&dev_priv->dpio_lock);
6037
6038         /* p1 and p2 divider */
6039         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6040                         5 << DPIO_CHV_S1_DIV_SHIFT |
6041                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6042                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6043                         1 << DPIO_CHV_K_DIV_SHIFT);
6044
6045         /* Feedback post-divider - m2 */
6046         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6047
6048         /* Feedback refclk divider - n and m1 */
6049         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6050                         DPIO_CHV_M1_DIV_BY_2 |
6051                         1 << DPIO_CHV_N_DIV_SHIFT);
6052
6053         /* M2 fraction division */
6054         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6055
6056         /* M2 fraction division enable */
6057         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
6058                        DPIO_CHV_FRAC_DIV_EN |
6059                        (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
6060
6061         /* Loop filter */
6062         refclk = i9xx_get_refclk(crtc, 0);
6063         loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
6064                 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
6065         if (refclk == 100000)
6066                 intcoeff = 11;
6067         else if (refclk == 38400)
6068                 intcoeff = 10;
6069         else
6070                 intcoeff = 9;
6071         loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
6072         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6073
6074         /* AFC Recal */
6075         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6076                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6077                         DPIO_AFC_RECAL);
6078
6079         mutex_unlock(&dev_priv->dpio_lock);
6080 }
6081
6082 /**
6083  * vlv_force_pll_on - forcibly enable just the PLL
6084  * @dev_priv: i915 private structure
6085  * @pipe: pipe PLL to enable
6086  * @dpll: PLL configuration
6087  *
6088  * Enable the PLL for @pipe using the supplied @dpll config. To be used
6089  * in cases where we need the PLL enabled even when @pipe is not going to
6090  * be enabled.
6091  */
6092 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6093                       const struct dpll *dpll)
6094 {
6095         struct intel_crtc *crtc =
6096                 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6097         struct intel_crtc_state pipe_config = {
6098                 .pixel_multiplier = 1,
6099                 .dpll = *dpll,
6100         };
6101
6102         if (IS_CHERRYVIEW(dev)) {
6103                 chv_update_pll(crtc, &pipe_config);
6104                 chv_prepare_pll(crtc, &pipe_config);
6105                 chv_enable_pll(crtc, &pipe_config);
6106         } else {
6107                 vlv_update_pll(crtc, &pipe_config);
6108                 vlv_prepare_pll(crtc, &pipe_config);
6109                 vlv_enable_pll(crtc, &pipe_config);
6110         }
6111 }
6112
6113 /**
6114  * vlv_force_pll_off - forcibly disable just the PLL
6115  * @dev_priv: i915 private structure
6116  * @pipe: pipe PLL to disable
6117  *
6118  * Disable the PLL for @pipe. To be used in cases where we need
6119  * the PLL enabled even when @pipe is not going to be enabled.
6120  */
6121 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
6122 {
6123         if (IS_CHERRYVIEW(dev))
6124                 chv_disable_pll(to_i915(dev), pipe);
6125         else
6126                 vlv_disable_pll(to_i915(dev), pipe);
6127 }
6128
6129 static void i9xx_update_pll(struct intel_crtc *crtc,
6130                             struct intel_crtc_state *crtc_state,
6131                             intel_clock_t *reduced_clock,
6132                             int num_connectors)
6133 {
6134         struct drm_device *dev = crtc->base.dev;
6135         struct drm_i915_private *dev_priv = dev->dev_private;
6136         u32 dpll;
6137         bool is_sdvo;
6138         struct dpll *clock = &crtc_state->dpll;
6139
6140         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6141
6142         is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
6143                 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
6144
6145         dpll = DPLL_VGA_MODE_DIS;
6146
6147         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
6148                 dpll |= DPLLB_MODE_LVDS;
6149         else
6150                 dpll |= DPLLB_MODE_DAC_SERIAL;
6151
6152         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6153                 dpll |= (crtc_state->pixel_multiplier - 1)
6154                         << SDVO_MULTIPLIER_SHIFT_HIRES;
6155         }
6156
6157         if (is_sdvo)
6158                 dpll |= DPLL_SDVO_HIGH_SPEED;
6159
6160         if (crtc_state->has_dp_encoder)
6161                 dpll |= DPLL_SDVO_HIGH_SPEED;
6162
6163         /* compute bitmask from p1 value */
6164         if (IS_PINEVIEW(dev))
6165                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6166         else {
6167                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6168                 if (IS_G4X(dev) && reduced_clock)
6169                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6170         }
6171         switch (clock->p2) {
6172         case 5:
6173                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6174                 break;
6175         case 7:
6176                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6177                 break;
6178         case 10:
6179                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6180                 break;
6181         case 14:
6182                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6183                 break;
6184         }
6185         if (INTEL_INFO(dev)->gen >= 4)
6186                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
6187
6188         if (crtc_state->sdvo_tv_clock)
6189                 dpll |= PLL_REF_INPUT_TVCLKINBC;
6190         else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
6191                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6192                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6193         else
6194                 dpll |= PLL_REF_INPUT_DREFCLK;
6195
6196         dpll |= DPLL_VCO_ENABLE;
6197         crtc_state->dpll_hw_state.dpll = dpll;
6198
6199         if (INTEL_INFO(dev)->gen >= 4) {
6200                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
6201                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6202                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
6203         }
6204 }
6205
6206 static void i8xx_update_pll(struct intel_crtc *crtc,
6207                             struct intel_crtc_state *crtc_state,
6208                             intel_clock_t *reduced_clock,
6209                             int num_connectors)
6210 {
6211         struct drm_device *dev = crtc->base.dev;
6212         struct drm_i915_private *dev_priv = dev->dev_private;
6213         u32 dpll;
6214         struct dpll *clock = &crtc_state->dpll;
6215
6216         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6217
6218         dpll = DPLL_VGA_MODE_DIS;
6219
6220         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
6221                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6222         } else {
6223                 if (clock->p1 == 2)
6224                         dpll |= PLL_P1_DIVIDE_BY_TWO;
6225                 else
6226                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6227                 if (clock->p2 == 4)
6228                         dpll |= PLL_P2_DIVIDE_BY_4;
6229         }
6230
6231         if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
6232                 dpll |= DPLL_DVO_2X_MODE;
6233
6234         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
6235                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6236                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6237         else
6238                 dpll |= PLL_REF_INPUT_DREFCLK;
6239
6240         dpll |= DPLL_VCO_ENABLE;
6241         crtc_state->dpll_hw_state.dpll = dpll;
6242 }
6243
6244 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6245 {
6246         struct drm_device *dev = intel_crtc->base.dev;
6247         struct drm_i915_private *dev_priv = dev->dev_private;
6248         enum pipe pipe = intel_crtc->pipe;
6249         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
6250         struct drm_display_mode *adjusted_mode =
6251                 &intel_crtc->config->base.adjusted_mode;
6252         uint32_t crtc_vtotal, crtc_vblank_end;
6253         int vsyncshift = 0;
6254
6255         /* We need to be careful not to changed the adjusted mode, for otherwise
6256          * the hw state checker will get angry at the mismatch. */
6257         crtc_vtotal = adjusted_mode->crtc_vtotal;
6258         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6259
6260         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6261                 /* the chip adds 2 halflines automatically */
6262                 crtc_vtotal -= 1;
6263                 crtc_vblank_end -= 1;
6264
6265                 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6266                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6267                 else
6268                         vsyncshift = adjusted_mode->crtc_hsync_start -
6269                                 adjusted_mode->crtc_htotal / 2;
6270                 if (vsyncshift < 0)
6271                         vsyncshift += adjusted_mode->crtc_htotal;
6272         }
6273
6274         if (INTEL_INFO(dev)->gen > 3)
6275                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6276
6277         I915_WRITE(HTOTAL(cpu_transcoder),
6278                    (adjusted_mode->crtc_hdisplay - 1) |
6279                    ((adjusted_mode->crtc_htotal - 1) << 16));
6280         I915_WRITE(HBLANK(cpu_transcoder),
6281                    (adjusted_mode->crtc_hblank_start - 1) |
6282                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
6283         I915_WRITE(HSYNC(cpu_transcoder),
6284                    (adjusted_mode->crtc_hsync_start - 1) |
6285                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
6286
6287         I915_WRITE(VTOTAL(cpu_transcoder),
6288                    (adjusted_mode->crtc_vdisplay - 1) |
6289                    ((crtc_vtotal - 1) << 16));
6290         I915_WRITE(VBLANK(cpu_transcoder),
6291                    (adjusted_mode->crtc_vblank_start - 1) |
6292                    ((crtc_vblank_end - 1) << 16));
6293         I915_WRITE(VSYNC(cpu_transcoder),
6294                    (adjusted_mode->crtc_vsync_start - 1) |
6295                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
6296
6297         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6298          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6299          * documented on the DDI_FUNC_CTL register description, EDP Input Select
6300          * bits. */
6301         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6302             (pipe == PIPE_B || pipe == PIPE_C))
6303                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6304
6305         /* pipesrc controls the size that is scaled from, which should
6306          * always be the user's requested size.
6307          */
6308         I915_WRITE(PIPESRC(pipe),
6309                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
6310                    (intel_crtc->config->pipe_src_h - 1));
6311 }
6312
6313 static void intel_get_pipe_timings(struct intel_crtc *crtc,
6314                                    struct intel_crtc_state *pipe_config)
6315 {
6316         struct drm_device *dev = crtc->base.dev;
6317         struct drm_i915_private *dev_priv = dev->dev_private;
6318         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6319         uint32_t tmp;
6320
6321         tmp = I915_READ(HTOTAL(cpu_transcoder));
6322         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6323         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6324         tmp = I915_READ(HBLANK(cpu_transcoder));
6325         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6326         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6327         tmp = I915_READ(HSYNC(cpu_transcoder));
6328         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6329         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6330
6331         tmp = I915_READ(VTOTAL(cpu_transcoder));
6332         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6333         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6334         tmp = I915_READ(VBLANK(cpu_transcoder));
6335         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6336         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6337         tmp = I915_READ(VSYNC(cpu_transcoder));
6338         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6339         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6340
6341         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6342                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6343                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
6344                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
6345         }
6346
6347         tmp = I915_READ(PIPESRC(crtc->pipe));
6348         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6349         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6350
6351         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
6352         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
6353 }
6354
6355 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6356                                  struct intel_crtc_state *pipe_config)
6357 {
6358         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
6359         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
6360         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
6361         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
6362
6363         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
6364         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
6365         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
6366         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
6367
6368         mode->flags = pipe_config->base.adjusted_mode.flags;
6369
6370         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
6371         mode->flags |= pipe_config->base.adjusted_mode.flags;
6372 }
6373
6374 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6375 {
6376         struct drm_device *dev = intel_crtc->base.dev;
6377         struct drm_i915_private *dev_priv = dev->dev_private;
6378         uint32_t pipeconf;
6379
6380         pipeconf = 0;
6381
6382         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
6383             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
6384                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
6385
6386         if (intel_crtc->config->double_wide)
6387                 pipeconf |= PIPECONF_DOUBLE_WIDE;
6388
6389         /* only g4x and later have fancy bpc/dither controls */
6390         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6391                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6392                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
6393                         pipeconf |= PIPECONF_DITHER_EN |
6394                                     PIPECONF_DITHER_TYPE_SP;
6395
6396                 switch (intel_crtc->config->pipe_bpp) {
6397                 case 18:
6398                         pipeconf |= PIPECONF_6BPC;
6399                         break;
6400                 case 24:
6401                         pipeconf |= PIPECONF_8BPC;
6402                         break;
6403                 case 30:
6404                         pipeconf |= PIPECONF_10BPC;
6405                         break;
6406                 default:
6407                         /* Case prevented by intel_choose_pipe_bpp_dither. */
6408                         BUG();
6409                 }
6410         }
6411
6412         if (HAS_PIPE_CXSR(dev)) {
6413                 if (intel_crtc->lowfreq_avail) {
6414                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6415                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6416                 } else {
6417                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6418                 }
6419         }
6420
6421         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6422                 if (INTEL_INFO(dev)->gen < 4 ||
6423                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6424                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6425                 else
6426                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6427         } else
6428                 pipeconf |= PIPECONF_PROGRESSIVE;
6429
6430         if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
6431                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6432
6433         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6434         POSTING_READ(PIPECONF(intel_crtc->pipe));
6435 }
6436
6437 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6438                                    struct intel_crtc_state *crtc_state)
6439 {
6440         struct drm_device *dev = crtc->base.dev;
6441         struct drm_i915_private *dev_priv = dev->dev_private;
6442         int refclk, num_connectors = 0;
6443         intel_clock_t clock, reduced_clock;
6444         bool ok, has_reduced_clock = false;
6445         bool is_lvds = false, is_dsi = false;
6446         struct intel_encoder *encoder;
6447         const intel_limit_t *limit;
6448
6449         for_each_intel_encoder(dev, encoder) {
6450                 if (encoder->new_crtc != crtc)
6451                         continue;
6452
6453                 switch (encoder->type) {
6454                 case INTEL_OUTPUT_LVDS:
6455                         is_lvds = true;
6456                         break;
6457                 case INTEL_OUTPUT_DSI:
6458                         is_dsi = true;
6459                         break;
6460                 default:
6461                         break;
6462                 }
6463
6464                 num_connectors++;
6465         }
6466
6467         if (is_dsi)
6468                 return 0;
6469
6470         if (!crtc_state->clock_set) {
6471                 refclk = i9xx_get_refclk(crtc, num_connectors);
6472
6473                 /*
6474                  * Returns a set of divisors for the desired target clock with
6475                  * the given refclk, or FALSE.  The returned values represent
6476                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6477                  * 2) / p1 / p2.
6478                  */
6479                 limit = intel_limit(crtc, refclk);
6480                 ok = dev_priv->display.find_dpll(limit, crtc,
6481                                                  crtc_state->port_clock,
6482                                                  refclk, NULL, &clock);
6483                 if (!ok) {
6484                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
6485                         return -EINVAL;
6486                 }
6487
6488                 if (is_lvds && dev_priv->lvds_downclock_avail) {
6489                         /*
6490                          * Ensure we match the reduced clock's P to the target
6491                          * clock.  If the clocks don't match, we can't switch
6492                          * the display clock by using the FP0/FP1. In such case
6493                          * we will disable the LVDS downclock feature.
6494                          */
6495                         has_reduced_clock =
6496                                 dev_priv->display.find_dpll(limit, crtc,
6497                                                             dev_priv->lvds_downclock,
6498                                                             refclk, &clock,
6499                                                             &reduced_clock);
6500                 }
6501                 /* Compat-code for transition, will disappear. */
6502                 crtc_state->dpll.n = clock.n;
6503                 crtc_state->dpll.m1 = clock.m1;
6504                 crtc_state->dpll.m2 = clock.m2;
6505                 crtc_state->dpll.p1 = clock.p1;
6506                 crtc_state->dpll.p2 = clock.p2;
6507         }
6508
6509         if (IS_GEN2(dev)) {
6510                 i8xx_update_pll(crtc, crtc_state,
6511                                 has_reduced_clock ? &reduced_clock : NULL,
6512                                 num_connectors);
6513         } else if (IS_CHERRYVIEW(dev)) {
6514                 chv_update_pll(crtc, crtc_state);
6515         } else if (IS_VALLEYVIEW(dev)) {
6516                 vlv_update_pll(crtc, crtc_state);
6517         } else {
6518                 i9xx_update_pll(crtc, crtc_state,
6519                                 has_reduced_clock ? &reduced_clock : NULL,
6520                                 num_connectors);
6521         }
6522
6523         return 0;
6524 }
6525
6526 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6527                                  struct intel_crtc_state *pipe_config)
6528 {
6529         struct drm_device *dev = crtc->base.dev;
6530         struct drm_i915_private *dev_priv = dev->dev_private;
6531         uint32_t tmp;
6532
6533         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6534                 return;
6535
6536         tmp = I915_READ(PFIT_CONTROL);
6537         if (!(tmp & PFIT_ENABLE))
6538                 return;
6539
6540         /* Check whether the pfit is attached to our pipe. */
6541         if (INTEL_INFO(dev)->gen < 4) {
6542                 if (crtc->pipe != PIPE_B)
6543                         return;
6544         } else {
6545                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6546                         return;
6547         }
6548
6549         pipe_config->gmch_pfit.control = tmp;
6550         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6551         if (INTEL_INFO(dev)->gen < 5)
6552                 pipe_config->gmch_pfit.lvds_border_bits =
6553                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6554 }
6555
6556 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6557                                struct intel_crtc_state *pipe_config)
6558 {
6559         struct drm_device *dev = crtc->base.dev;
6560         struct drm_i915_private *dev_priv = dev->dev_private;
6561         int pipe = pipe_config->cpu_transcoder;
6562         intel_clock_t clock;
6563         u32 mdiv;
6564         int refclk = 100000;
6565
6566         /* In case of MIPI DPLL will not even be used */
6567         if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6568                 return;
6569
6570         mutex_lock(&dev_priv->dpio_lock);
6571         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6572         mutex_unlock(&dev_priv->dpio_lock);
6573
6574         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6575         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6576         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6577         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6578         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6579
6580         vlv_clock(refclk, &clock);
6581
6582         /* clock.dot is the fast clock */
6583         pipe_config->port_clock = clock.dot / 5;
6584 }
6585
6586 static void
6587 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6588                               struct intel_initial_plane_config *plane_config)
6589 {
6590         struct drm_device *dev = crtc->base.dev;
6591         struct drm_i915_private *dev_priv = dev->dev_private;
6592         u32 val, base, offset;
6593         int pipe = crtc->pipe, plane = crtc->plane;
6594         int fourcc, pixel_format;
6595         int aligned_height;
6596         struct drm_framebuffer *fb;
6597         struct intel_framebuffer *intel_fb;
6598
6599         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6600         if (!intel_fb) {
6601                 DRM_DEBUG_KMS("failed to alloc fb\n");
6602                 return;
6603         }
6604
6605         fb = &intel_fb->base;
6606
6607         val = I915_READ(DSPCNTR(plane));
6608
6609         if (INTEL_INFO(dev)->gen >= 4)
6610                 if (val & DISPPLANE_TILED)
6611                         plane_config->tiling = I915_TILING_X;
6612
6613         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6614         fourcc = i9xx_format_to_fourcc(pixel_format);
6615         fb->pixel_format = fourcc;
6616         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
6617
6618         if (INTEL_INFO(dev)->gen >= 4) {
6619                 if (plane_config->tiling)
6620                         offset = I915_READ(DSPTILEOFF(plane));
6621                 else
6622                         offset = I915_READ(DSPLINOFF(plane));
6623                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6624         } else {
6625                 base = I915_READ(DSPADDR(plane));
6626         }
6627         plane_config->base = base;
6628
6629         val = I915_READ(PIPESRC(pipe));
6630         fb->width = ((val >> 16) & 0xfff) + 1;
6631         fb->height = ((val >> 0) & 0xfff) + 1;
6632
6633         val = I915_READ(DSPSTRIDE(pipe));
6634         fb->pitches[0] = val & 0xffffffc0;
6635
6636         aligned_height = intel_fb_align_height(dev, fb->height,
6637                                                plane_config->tiling);
6638
6639         plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
6640
6641         DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6642                       pipe_name(pipe), plane, fb->width, fb->height,
6643                       fb->bits_per_pixel, base, fb->pitches[0],
6644                       plane_config->size);
6645
6646         crtc->base.primary->fb = fb;
6647 }
6648
6649 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6650                                struct intel_crtc_state *pipe_config)
6651 {
6652         struct drm_device *dev = crtc->base.dev;
6653         struct drm_i915_private *dev_priv = dev->dev_private;
6654         int pipe = pipe_config->cpu_transcoder;
6655         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6656         intel_clock_t clock;
6657         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6658         int refclk = 100000;
6659
6660         mutex_lock(&dev_priv->dpio_lock);
6661         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6662         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6663         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6664         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6665         mutex_unlock(&dev_priv->dpio_lock);
6666
6667         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6668         clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6669         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6670         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6671         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6672
6673         chv_clock(refclk, &clock);
6674
6675         /* clock.dot is the fast clock */
6676         pipe_config->port_clock = clock.dot / 5;
6677 }
6678
6679 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6680                                  struct intel_crtc_state *pipe_config)
6681 {
6682         struct drm_device *dev = crtc->base.dev;
6683         struct drm_i915_private *dev_priv = dev->dev_private;
6684         uint32_t tmp;
6685
6686         if (!intel_display_power_is_enabled(dev_priv,
6687                                             POWER_DOMAIN_PIPE(crtc->pipe)))
6688                 return false;
6689
6690         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6691         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6692
6693         tmp = I915_READ(PIPECONF(crtc->pipe));
6694         if (!(tmp & PIPECONF_ENABLE))
6695                 return false;
6696
6697         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6698                 switch (tmp & PIPECONF_BPC_MASK) {
6699                 case PIPECONF_6BPC:
6700                         pipe_config->pipe_bpp = 18;
6701                         break;
6702                 case PIPECONF_8BPC:
6703                         pipe_config->pipe_bpp = 24;
6704                         break;
6705                 case PIPECONF_10BPC:
6706                         pipe_config->pipe_bpp = 30;
6707                         break;
6708                 default:
6709                         break;
6710                 }
6711         }
6712
6713         if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6714                 pipe_config->limited_color_range = true;
6715
6716         if (INTEL_INFO(dev)->gen < 4)
6717                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6718
6719         intel_get_pipe_timings(crtc, pipe_config);
6720
6721         i9xx_get_pfit_config(crtc, pipe_config);
6722
6723         if (INTEL_INFO(dev)->gen >= 4) {
6724                 tmp = I915_READ(DPLL_MD(crtc->pipe));
6725                 pipe_config->pixel_multiplier =
6726                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6727                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6728                 pipe_config->dpll_hw_state.dpll_md = tmp;
6729         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6730                 tmp = I915_READ(DPLL(crtc->pipe));
6731                 pipe_config->pixel_multiplier =
6732                         ((tmp & SDVO_MULTIPLIER_MASK)
6733                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6734         } else {
6735                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6736                  * port and will be fixed up in the encoder->get_config
6737                  * function. */
6738                 pipe_config->pixel_multiplier = 1;
6739         }
6740         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6741         if (!IS_VALLEYVIEW(dev)) {
6742                 /*
6743                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6744                  * on 830. Filter it out here so that we don't
6745                  * report errors due to that.
6746                  */
6747                 if (IS_I830(dev))
6748                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6749
6750                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6751                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6752         } else {
6753                 /* Mask out read-only status bits. */
6754                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6755                                                      DPLL_PORTC_READY_MASK |
6756                                                      DPLL_PORTB_READY_MASK);
6757         }
6758
6759         if (IS_CHERRYVIEW(dev))
6760                 chv_crtc_clock_get(crtc, pipe_config);
6761         else if (IS_VALLEYVIEW(dev))
6762                 vlv_crtc_clock_get(crtc, pipe_config);
6763         else
6764                 i9xx_crtc_clock_get(crtc, pipe_config);
6765
6766         return true;
6767 }
6768
6769 static void ironlake_init_pch_refclk(struct drm_device *dev)
6770 {
6771         struct drm_i915_private *dev_priv = dev->dev_private;
6772         struct intel_encoder *encoder;
6773         u32 val, final;
6774         bool has_lvds = false;
6775         bool has_cpu_edp = false;
6776         bool has_panel = false;
6777         bool has_ck505 = false;
6778         bool can_ssc = false;
6779
6780         /* We need to take the global config into account */
6781         for_each_intel_encoder(dev, encoder) {
6782                 switch (encoder->type) {
6783                 case INTEL_OUTPUT_LVDS:
6784                         has_panel = true;
6785                         has_lvds = true;
6786                         break;
6787                 case INTEL_OUTPUT_EDP:
6788                         has_panel = true;
6789                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6790                                 has_cpu_edp = true;
6791                         break;
6792                 default:
6793                         break;
6794                 }
6795         }
6796
6797         if (HAS_PCH_IBX(dev)) {
6798                 has_ck505 = dev_priv->vbt.display_clock_mode;
6799                 can_ssc = has_ck505;
6800         } else {
6801                 has_ck505 = false;
6802                 can_ssc = true;
6803         }
6804
6805         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6806                       has_panel, has_lvds, has_ck505);
6807
6808         /* Ironlake: try to setup display ref clock before DPLL
6809          * enabling. This is only under driver's control after
6810          * PCH B stepping, previous chipset stepping should be
6811          * ignoring this setting.
6812          */
6813         val = I915_READ(PCH_DREF_CONTROL);
6814
6815         /* As we must carefully and slowly disable/enable each source in turn,
6816          * compute the final state we want first and check if we need to
6817          * make any changes at all.
6818          */
6819         final = val;
6820         final &= ~DREF_NONSPREAD_SOURCE_MASK;
6821         if (has_ck505)
6822                 final |= DREF_NONSPREAD_CK505_ENABLE;
6823         else
6824                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6825
6826         final &= ~DREF_SSC_SOURCE_MASK;
6827         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6828         final &= ~DREF_SSC1_ENABLE;
6829
6830         if (has_panel) {
6831                 final |= DREF_SSC_SOURCE_ENABLE;
6832
6833                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6834                         final |= DREF_SSC1_ENABLE;
6835
6836                 if (has_cpu_edp) {
6837                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
6838                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6839                         else
6840                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6841                 } else
6842                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6843         } else {
6844                 final |= DREF_SSC_SOURCE_DISABLE;
6845                 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6846         }
6847
6848         if (final == val)
6849                 return;
6850
6851         /* Always enable nonspread source */
6852         val &= ~DREF_NONSPREAD_SOURCE_MASK;
6853
6854         if (has_ck505)
6855                 val |= DREF_NONSPREAD_CK505_ENABLE;
6856         else
6857                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6858
6859         if (has_panel) {
6860                 val &= ~DREF_SSC_SOURCE_MASK;
6861                 val |= DREF_SSC_SOURCE_ENABLE;
6862
6863                 /* SSC must be turned on before enabling the CPU output  */
6864                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6865                         DRM_DEBUG_KMS("Using SSC on panel\n");
6866                         val |= DREF_SSC1_ENABLE;
6867                 } else
6868                         val &= ~DREF_SSC1_ENABLE;
6869
6870                 /* Get SSC going before enabling the outputs */
6871                 I915_WRITE(PCH_DREF_CONTROL, val);
6872                 POSTING_READ(PCH_DREF_CONTROL);
6873                 udelay(200);
6874
6875                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6876
6877                 /* Enable CPU source on CPU attached eDP */
6878                 if (has_cpu_edp) {
6879                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6880                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
6881                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6882                         } else
6883                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6884                 } else
6885                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6886
6887                 I915_WRITE(PCH_DREF_CONTROL, val);
6888                 POSTING_READ(PCH_DREF_CONTROL);
6889                 udelay(200);
6890         } else {
6891                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
6892
6893                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6894
6895                 /* Turn off CPU output */
6896                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6897
6898                 I915_WRITE(PCH_DREF_CONTROL, val);
6899                 POSTING_READ(PCH_DREF_CONTROL);
6900                 udelay(200);
6901
6902                 /* Turn off the SSC source */
6903                 val &= ~DREF_SSC_SOURCE_MASK;
6904                 val |= DREF_SSC_SOURCE_DISABLE;
6905
6906                 /* Turn off SSC1 */
6907                 val &= ~DREF_SSC1_ENABLE;
6908
6909                 I915_WRITE(PCH_DREF_CONTROL, val);
6910                 POSTING_READ(PCH_DREF_CONTROL);
6911                 udelay(200);
6912         }
6913
6914         BUG_ON(val != final);
6915 }
6916
6917 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6918 {
6919         uint32_t tmp;
6920
6921         tmp = I915_READ(SOUTH_CHICKEN2);
6922         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6923         I915_WRITE(SOUTH_CHICKEN2, tmp);
6924
6925         if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6926                                FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6927                 DRM_ERROR("FDI mPHY reset assert timeout\n");
6928
6929         tmp = I915_READ(SOUTH_CHICKEN2);
6930         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6931         I915_WRITE(SOUTH_CHICKEN2, tmp);
6932
6933         if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6934                                 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6935                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6936 }
6937
6938 /* WaMPhyProgramming:hsw */
6939 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6940 {
6941         uint32_t tmp;
6942
6943         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6944         tmp &= ~(0xFF << 24);
6945         tmp |= (0x12 << 24);
6946         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6947
6948         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6949         tmp |= (1 << 11);
6950         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6951
6952         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6953         tmp |= (1 << 11);
6954         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6955
6956         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6957         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6958         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6959
6960         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6961         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6962         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6963
6964         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6965         tmp &= ~(7 << 13);
6966         tmp |= (5 << 13);
6967         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6968
6969         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6970         tmp &= ~(7 << 13);
6971         tmp |= (5 << 13);
6972         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6973
6974         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6975         tmp &= ~0xFF;
6976         tmp |= 0x1C;
6977         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6978
6979         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6980         tmp &= ~0xFF;
6981         tmp |= 0x1C;
6982         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6983
6984         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6985         tmp &= ~(0xFF << 16);
6986         tmp |= (0x1C << 16);
6987         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6988
6989         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6990         tmp &= ~(0xFF << 16);
6991         tmp |= (0x1C << 16);
6992         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6993
6994         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6995         tmp |= (1 << 27);
6996         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6997
6998         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6999         tmp |= (1 << 27);
7000         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7001
7002         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7003         tmp &= ~(0xF << 28);
7004         tmp |= (4 << 28);
7005         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7006
7007         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7008         tmp &= ~(0xF << 28);
7009         tmp |= (4 << 28);
7010         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
7011 }
7012
7013 /* Implements 3 different sequences from BSpec chapter "Display iCLK
7014  * Programming" based on the parameters passed:
7015  * - Sequence to enable CLKOUT_DP
7016  * - Sequence to enable CLKOUT_DP without spread
7017  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7018  */
7019 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
7020                                  bool with_fdi)
7021 {
7022         struct drm_i915_private *dev_priv = dev->dev_private;
7023         uint32_t reg, tmp;
7024
7025         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7026                 with_spread = true;
7027         if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
7028                  with_fdi, "LP PCH doesn't have FDI\n"))
7029                 with_fdi = false;
7030
7031         mutex_lock(&dev_priv->dpio_lock);
7032
7033         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7034         tmp &= ~SBI_SSCCTL_DISABLE;
7035         tmp |= SBI_SSCCTL_PATHALT;
7036         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7037
7038         udelay(24);
7039
7040         if (with_spread) {
7041                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7042                 tmp &= ~SBI_SSCCTL_PATHALT;
7043                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7044
7045                 if (with_fdi) {
7046                         lpt_reset_fdi_mphy(dev_priv);
7047                         lpt_program_fdi_mphy(dev_priv);
7048                 }
7049         }
7050
7051         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7052                SBI_GEN0 : SBI_DBUFF0;
7053         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7054         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7055         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7056
7057         mutex_unlock(&dev_priv->dpio_lock);
7058 }
7059
7060 /* Sequence to disable CLKOUT_DP */
7061 static void lpt_disable_clkout_dp(struct drm_device *dev)
7062 {
7063         struct drm_i915_private *dev_priv = dev->dev_private;
7064         uint32_t reg, tmp;
7065
7066         mutex_lock(&dev_priv->dpio_lock);
7067
7068         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7069                SBI_GEN0 : SBI_DBUFF0;
7070         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7071         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7072         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7073
7074         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7075         if (!(tmp & SBI_SSCCTL_DISABLE)) {
7076                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
7077                         tmp |= SBI_SSCCTL_PATHALT;
7078                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7079                         udelay(32);
7080                 }
7081                 tmp |= SBI_SSCCTL_DISABLE;
7082                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7083         }
7084
7085         mutex_unlock(&dev_priv->dpio_lock);
7086 }
7087
7088 static void lpt_init_pch_refclk(struct drm_device *dev)
7089 {
7090         struct intel_encoder *encoder;
7091         bool has_vga = false;
7092
7093         for_each_intel_encoder(dev, encoder) {
7094                 switch (encoder->type) {
7095                 case INTEL_OUTPUT_ANALOG:
7096                         has_vga = true;
7097                         break;
7098                 default:
7099                         break;
7100                 }
7101         }
7102
7103         if (has_vga)
7104                 lpt_enable_clkout_dp(dev, true, true);
7105         else
7106                 lpt_disable_clkout_dp(dev);
7107 }
7108
7109 /*
7110  * Initialize reference clocks when the driver loads
7111  */
7112 void intel_init_pch_refclk(struct drm_device *dev)
7113 {
7114         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7115                 ironlake_init_pch_refclk(dev);
7116         else if (HAS_PCH_LPT(dev))
7117                 lpt_init_pch_refclk(dev);
7118 }
7119
7120 static int ironlake_get_refclk(struct drm_crtc *crtc)
7121 {
7122         struct drm_device *dev = crtc->dev;
7123         struct drm_i915_private *dev_priv = dev->dev_private;
7124         struct intel_encoder *encoder;
7125         int num_connectors = 0;
7126         bool is_lvds = false;
7127
7128         for_each_intel_encoder(dev, encoder) {
7129                 if (encoder->new_crtc != to_intel_crtc(crtc))
7130                         continue;
7131
7132                 switch (encoder->type) {
7133                 case INTEL_OUTPUT_LVDS:
7134                         is_lvds = true;
7135                         break;
7136                 default:
7137                         break;
7138                 }
7139                 num_connectors++;
7140         }
7141
7142         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7143                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
7144                               dev_priv->vbt.lvds_ssc_freq);
7145                 return dev_priv->vbt.lvds_ssc_freq;
7146         }
7147
7148         return 120000;
7149 }
7150
7151 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
7152 {
7153         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
7154         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7155         int pipe = intel_crtc->pipe;
7156         uint32_t val;
7157
7158         val = 0;
7159
7160         switch (intel_crtc->config->pipe_bpp) {
7161         case 18:
7162                 val |= PIPECONF_6BPC;
7163                 break;
7164         case 24:
7165                 val |= PIPECONF_8BPC;
7166                 break;
7167         case 30:
7168                 val |= PIPECONF_10BPC;
7169                 break;
7170         case 36:
7171                 val |= PIPECONF_12BPC;
7172                 break;
7173         default:
7174                 /* Case prevented by intel_choose_pipe_bpp_dither. */
7175                 BUG();
7176         }
7177
7178         if (intel_crtc->config->dither)
7179                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7180
7181         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7182                 val |= PIPECONF_INTERLACED_ILK;
7183         else
7184                 val |= PIPECONF_PROGRESSIVE;
7185
7186         if (intel_crtc->config->limited_color_range)
7187                 val |= PIPECONF_COLOR_RANGE_SELECT;
7188
7189         I915_WRITE(PIPECONF(pipe), val);
7190         POSTING_READ(PIPECONF(pipe));
7191 }
7192
7193 /*
7194  * Set up the pipe CSC unit.
7195  *
7196  * Currently only full range RGB to limited range RGB conversion
7197  * is supported, but eventually this should handle various
7198  * RGB<->YCbCr scenarios as well.
7199  */
7200 static void intel_set_pipe_csc(struct drm_crtc *crtc)
7201 {
7202         struct drm_device *dev = crtc->dev;
7203         struct drm_i915_private *dev_priv = dev->dev_private;
7204         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7205         int pipe = intel_crtc->pipe;
7206         uint16_t coeff = 0x7800; /* 1.0 */
7207
7208         /*
7209          * TODO: Check what kind of values actually come out of the pipe
7210          * with these coeff/postoff values and adjust to get the best
7211          * accuracy. Perhaps we even need to take the bpc value into
7212          * consideration.
7213          */
7214
7215         if (intel_crtc->config->limited_color_range)
7216                 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
7217
7218         /*
7219          * GY/GU and RY/RU should be the other way around according
7220          * to BSpec, but reality doesn't agree. Just set them up in
7221          * a way that results in the correct picture.
7222          */
7223         I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
7224         I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
7225
7226         I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
7227         I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
7228
7229         I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
7230         I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
7231
7232         I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
7233         I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
7234         I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
7235
7236         if (INTEL_INFO(dev)->gen > 6) {
7237                 uint16_t postoff = 0;
7238
7239                 if (intel_crtc->config->limited_color_range)
7240                         postoff = (16 * (1 << 12) / 255) & 0x1fff;
7241
7242                 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7243                 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
7244                 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
7245
7246                 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
7247         } else {
7248                 uint32_t mode = CSC_MODE_YUV_TO_RGB;
7249
7250                 if (intel_crtc->config->limited_color_range)
7251                         mode |= CSC_BLACK_SCREEN_OFFSET;
7252
7253                 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7254         }
7255 }
7256
7257 static void haswell_set_pipeconf(struct drm_crtc *crtc)
7258 {
7259         struct drm_device *dev = crtc->dev;
7260         struct drm_i915_private *dev_priv = dev->dev_private;
7261         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7262         enum pipe pipe = intel_crtc->pipe;
7263         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7264         uint32_t val;
7265
7266         val = 0;
7267
7268         if (IS_HASWELL(dev) && intel_crtc->config->dither)
7269                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7270
7271         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7272                 val |= PIPECONF_INTERLACED_ILK;
7273         else
7274                 val |= PIPECONF_PROGRESSIVE;
7275
7276         I915_WRITE(PIPECONF(cpu_transcoder), val);
7277         POSTING_READ(PIPECONF(cpu_transcoder));
7278
7279         I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7280         POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
7281
7282         if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
7283                 val = 0;
7284
7285                 switch (intel_crtc->config->pipe_bpp) {
7286                 case 18:
7287                         val |= PIPEMISC_DITHER_6_BPC;
7288                         break;
7289                 case 24:
7290                         val |= PIPEMISC_DITHER_8_BPC;
7291                         break;
7292                 case 30:
7293                         val |= PIPEMISC_DITHER_10_BPC;
7294                         break;
7295                 case 36:
7296                         val |= PIPEMISC_DITHER_12_BPC;
7297                         break;
7298                 default:
7299                         /* Case prevented by pipe_config_set_bpp. */
7300                         BUG();
7301                 }
7302
7303                 if (intel_crtc->config->dither)
7304                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7305
7306                 I915_WRITE(PIPEMISC(pipe), val);
7307         }
7308 }
7309
7310 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7311                                     struct intel_crtc_state *crtc_state,
7312                                     intel_clock_t *clock,
7313                                     bool *has_reduced_clock,
7314                                     intel_clock_t *reduced_clock)
7315 {
7316         struct drm_device *dev = crtc->dev;
7317         struct drm_i915_private *dev_priv = dev->dev_private;
7318         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7319         int refclk;
7320         const intel_limit_t *limit;
7321         bool ret, is_lvds = false;
7322
7323         is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
7324
7325         refclk = ironlake_get_refclk(crtc);
7326
7327         /*
7328          * Returns a set of divisors for the desired target clock with the given
7329          * refclk, or FALSE.  The returned values represent the clock equation:
7330          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7331          */
7332         limit = intel_limit(intel_crtc, refclk);
7333         ret = dev_priv->display.find_dpll(limit, intel_crtc,
7334                                           crtc_state->port_clock,
7335                                           refclk, NULL, clock);
7336         if (!ret)
7337                 return false;
7338
7339         if (is_lvds && dev_priv->lvds_downclock_avail) {
7340                 /*
7341                  * Ensure we match the reduced clock's P to the target clock.
7342                  * If the clocks don't match, we can't switch the display clock
7343                  * by using the FP0/FP1. In such case we will disable the LVDS
7344                  * downclock feature.
7345                 */
7346                 *has_reduced_clock =
7347                         dev_priv->display.find_dpll(limit, intel_crtc,
7348                                                     dev_priv->lvds_downclock,
7349                                                     refclk, clock,
7350                                                     reduced_clock);
7351         }
7352
7353         return true;
7354 }
7355
7356 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
7357 {
7358         /*
7359          * Account for spread spectrum to avoid
7360          * oversubscribing the link. Max center spread
7361          * is 2.5%; use 5% for safety's sake.
7362          */
7363         u32 bps = target_clock * bpp * 21 / 20;
7364         return DIV_ROUND_UP(bps, link_bw * 8);
7365 }
7366
7367 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
7368 {
7369         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
7370 }
7371
7372 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7373                                       struct intel_crtc_state *crtc_state,
7374                                       u32 *fp,
7375                                       intel_clock_t *reduced_clock, u32 *fp2)
7376 {
7377         struct drm_crtc *crtc = &intel_crtc->base;
7378         struct drm_device *dev = crtc->dev;
7379         struct drm_i915_private *dev_priv = dev->dev_private;
7380         struct intel_encoder *intel_encoder;
7381         uint32_t dpll;
7382         int factor, num_connectors = 0;
7383         bool is_lvds = false, is_sdvo = false;
7384
7385         for_each_intel_encoder(dev, intel_encoder) {
7386                 if (intel_encoder->new_crtc != to_intel_crtc(crtc))
7387                         continue;
7388
7389                 switch (intel_encoder->type) {
7390                 case INTEL_OUTPUT_LVDS:
7391                         is_lvds = true;
7392                         break;
7393                 case INTEL_OUTPUT_SDVO:
7394                 case INTEL_OUTPUT_HDMI:
7395                         is_sdvo = true;
7396                         break;
7397                 default:
7398                         break;
7399                 }
7400
7401                 num_connectors++;
7402         }
7403
7404         /* Enable autotuning of the PLL clock (if permissible) */
7405         factor = 21;
7406         if (is_lvds) {
7407                 if ((intel_panel_use_ssc(dev_priv) &&
7408                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
7409                     (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
7410                         factor = 25;
7411         } else if (crtc_state->sdvo_tv_clock)
7412                 factor = 20;
7413
7414         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
7415                 *fp |= FP_CB_TUNE;
7416
7417         if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7418                 *fp2 |= FP_CB_TUNE;
7419
7420         dpll = 0;
7421
7422         if (is_lvds)
7423                 dpll |= DPLLB_MODE_LVDS;
7424         else
7425                 dpll |= DPLLB_MODE_DAC_SERIAL;
7426
7427         dpll |= (crtc_state->pixel_multiplier - 1)
7428                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7429
7430         if (is_sdvo)
7431                 dpll |= DPLL_SDVO_HIGH_SPEED;
7432         if (crtc_state->has_dp_encoder)
7433                 dpll |= DPLL_SDVO_HIGH_SPEED;
7434
7435         /* compute bitmask from p1 value */
7436         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7437         /* also FPA1 */
7438         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7439
7440         switch (crtc_state->dpll.p2) {
7441         case 5:
7442                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7443                 break;
7444         case 7:
7445                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7446                 break;
7447         case 10:
7448                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7449                 break;
7450         case 14:
7451                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7452                 break;
7453         }
7454
7455         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7456                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7457         else
7458                 dpll |= PLL_REF_INPUT_DREFCLK;
7459
7460         return dpll | DPLL_VCO_ENABLE;
7461 }
7462
7463 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
7464                                        struct intel_crtc_state *crtc_state)
7465 {
7466         struct drm_device *dev = crtc->base.dev;
7467         intel_clock_t clock, reduced_clock;
7468         u32 dpll = 0, fp = 0, fp2 = 0;
7469         bool ok, has_reduced_clock = false;
7470         bool is_lvds = false;
7471         struct intel_shared_dpll *pll;
7472
7473         is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
7474
7475         WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7476              "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7477
7478         ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
7479                                      &has_reduced_clock, &reduced_clock);
7480         if (!ok && !crtc_state->clock_set) {
7481                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7482                 return -EINVAL;
7483         }
7484         /* Compat-code for transition, will disappear. */
7485         if (!crtc_state->clock_set) {
7486                 crtc_state->dpll.n = clock.n;
7487                 crtc_state->dpll.m1 = clock.m1;
7488                 crtc_state->dpll.m2 = clock.m2;
7489                 crtc_state->dpll.p1 = clock.p1;
7490                 crtc_state->dpll.p2 = clock.p2;
7491         }
7492
7493         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7494         if (crtc_state->has_pch_encoder) {
7495                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7496                 if (has_reduced_clock)
7497                         fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7498
7499                 dpll = ironlake_compute_dpll(crtc, crtc_state,
7500                                              &fp, &reduced_clock,
7501                                              has_reduced_clock ? &fp2 : NULL);
7502
7503                 crtc_state->dpll_hw_state.dpll = dpll;
7504                 crtc_state->dpll_hw_state.fp0 = fp;
7505                 if (has_reduced_clock)
7506                         crtc_state->dpll_hw_state.fp1 = fp2;
7507                 else
7508                         crtc_state->dpll_hw_state.fp1 = fp;
7509
7510                 pll = intel_get_shared_dpll(crtc, crtc_state);
7511                 if (pll == NULL) {
7512                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7513                                          pipe_name(crtc->pipe));
7514                         return -EINVAL;
7515                 }
7516         }
7517
7518         if (is_lvds && has_reduced_clock && i915.powersave)
7519                 crtc->lowfreq_avail = true;
7520         else
7521                 crtc->lowfreq_avail = false;
7522
7523         return 0;
7524 }
7525
7526 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7527                                          struct intel_link_m_n *m_n)
7528 {
7529         struct drm_device *dev = crtc->base.dev;
7530         struct drm_i915_private *dev_priv = dev->dev_private;
7531         enum pipe pipe = crtc->pipe;
7532
7533         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7534         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7535         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7536                 & ~TU_SIZE_MASK;
7537         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7538         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7539                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7540 }
7541
7542 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7543                                          enum transcoder transcoder,
7544                                          struct intel_link_m_n *m_n,
7545                                          struct intel_link_m_n *m2_n2)
7546 {
7547         struct drm_device *dev = crtc->base.dev;
7548         struct drm_i915_private *dev_priv = dev->dev_private;
7549         enum pipe pipe = crtc->pipe;
7550
7551         if (INTEL_INFO(dev)->gen >= 5) {
7552                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7553                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7554                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7555                         & ~TU_SIZE_MASK;
7556                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7557                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7558                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7559                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7560                  * gen < 8) and if DRRS is supported (to make sure the
7561                  * registers are not unnecessarily read).
7562                  */
7563                 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7564                         crtc->config->has_drrs) {
7565                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7566                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
7567                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
7568                                         & ~TU_SIZE_MASK;
7569                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
7570                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7571                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7572                 }
7573         } else {
7574                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7575                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7576                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7577                         & ~TU_SIZE_MASK;
7578                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7579                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7580                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7581         }
7582 }
7583
7584 void intel_dp_get_m_n(struct intel_crtc *crtc,
7585                       struct intel_crtc_state *pipe_config)
7586 {
7587         if (pipe_config->has_pch_encoder)
7588                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7589         else
7590                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7591                                              &pipe_config->dp_m_n,
7592                                              &pipe_config->dp_m2_n2);
7593 }
7594
7595 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7596                                         struct intel_crtc_state *pipe_config)
7597 {
7598         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7599                                      &pipe_config->fdi_m_n, NULL);
7600 }
7601
7602 static void skylake_get_pfit_config(struct intel_crtc *crtc,
7603                                     struct intel_crtc_state *pipe_config)
7604 {
7605         struct drm_device *dev = crtc->base.dev;
7606         struct drm_i915_private *dev_priv = dev->dev_private;
7607         uint32_t tmp;
7608
7609         tmp = I915_READ(PS_CTL(crtc->pipe));
7610
7611         if (tmp & PS_ENABLE) {
7612                 pipe_config->pch_pfit.enabled = true;
7613                 pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7614                 pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7615         }
7616 }
7617
7618 static void
7619 skylake_get_initial_plane_config(struct intel_crtc *crtc,
7620                                  struct intel_initial_plane_config *plane_config)
7621 {
7622         struct drm_device *dev = crtc->base.dev;
7623         struct drm_i915_private *dev_priv = dev->dev_private;
7624         u32 val, base, offset, stride_mult;
7625         int pipe = crtc->pipe;
7626         int fourcc, pixel_format;
7627         int aligned_height;
7628         struct drm_framebuffer *fb;
7629         struct intel_framebuffer *intel_fb;
7630
7631         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7632         if (!intel_fb) {
7633                 DRM_DEBUG_KMS("failed to alloc fb\n");
7634                 return;
7635         }
7636
7637         fb = &intel_fb->base;
7638
7639         val = I915_READ(PLANE_CTL(pipe, 0));
7640         if (val & PLANE_CTL_TILED_MASK)
7641                 plane_config->tiling = I915_TILING_X;
7642
7643         pixel_format = val & PLANE_CTL_FORMAT_MASK;
7644         fourcc = skl_format_to_fourcc(pixel_format,
7645                                       val & PLANE_CTL_ORDER_RGBX,
7646                                       val & PLANE_CTL_ALPHA_MASK);
7647         fb->pixel_format = fourcc;
7648         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
7649
7650         base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
7651         plane_config->base = base;
7652
7653         offset = I915_READ(PLANE_OFFSET(pipe, 0));
7654
7655         val = I915_READ(PLANE_SIZE(pipe, 0));
7656         fb->height = ((val >> 16) & 0xfff) + 1;
7657         fb->width = ((val >> 0) & 0x1fff) + 1;
7658
7659         val = I915_READ(PLANE_STRIDE(pipe, 0));
7660         switch (plane_config->tiling) {
7661         case I915_TILING_NONE:
7662                 stride_mult = 64;
7663                 break;
7664         case I915_TILING_X:
7665                 stride_mult = 512;
7666                 break;
7667         default:
7668                 MISSING_CASE(plane_config->tiling);
7669                 goto error;
7670         }
7671         fb->pitches[0] = (val & 0x3ff) * stride_mult;
7672
7673         aligned_height = intel_fb_align_height(dev, fb->height,
7674                                                plane_config->tiling);
7675
7676         plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
7677
7678         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7679                       pipe_name(pipe), fb->width, fb->height,
7680                       fb->bits_per_pixel, base, fb->pitches[0],
7681                       plane_config->size);
7682
7683         crtc->base.primary->fb = fb;
7684         return;
7685
7686 error:
7687         kfree(fb);
7688 }
7689
7690 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7691                                      struct intel_crtc_state *pipe_config)
7692 {
7693         struct drm_device *dev = crtc->base.dev;
7694         struct drm_i915_private *dev_priv = dev->dev_private;
7695         uint32_t tmp;
7696
7697         tmp = I915_READ(PF_CTL(crtc->pipe));
7698
7699         if (tmp & PF_ENABLE) {
7700                 pipe_config->pch_pfit.enabled = true;
7701                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7702                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7703
7704                 /* We currently do not free assignements of panel fitters on
7705                  * ivb/hsw (since we don't use the higher upscaling modes which
7706                  * differentiates them) so just WARN about this case for now. */
7707                 if (IS_GEN7(dev)) {
7708                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7709                                 PF_PIPE_SEL_IVB(crtc->pipe));
7710                 }
7711         }
7712 }
7713
7714 static void
7715 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7716                                   struct intel_initial_plane_config *plane_config)
7717 {
7718         struct drm_device *dev = crtc->base.dev;
7719         struct drm_i915_private *dev_priv = dev->dev_private;
7720         u32 val, base, offset;
7721         int pipe = crtc->pipe;
7722         int fourcc, pixel_format;
7723         int aligned_height;
7724         struct drm_framebuffer *fb;
7725         struct intel_framebuffer *intel_fb;
7726
7727         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7728         if (!intel_fb) {
7729                 DRM_DEBUG_KMS("failed to alloc fb\n");
7730                 return;
7731         }
7732
7733         fb = &intel_fb->base;
7734
7735         val = I915_READ(DSPCNTR(pipe));
7736
7737         if (INTEL_INFO(dev)->gen >= 4)
7738                 if (val & DISPPLANE_TILED)
7739                         plane_config->tiling = I915_TILING_X;
7740
7741         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7742         fourcc = i9xx_format_to_fourcc(pixel_format);
7743         fb->pixel_format = fourcc;
7744         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
7745
7746         base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
7747         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7748                 offset = I915_READ(DSPOFFSET(pipe));
7749         } else {
7750                 if (plane_config->tiling)
7751                         offset = I915_READ(DSPTILEOFF(pipe));
7752                 else
7753                         offset = I915_READ(DSPLINOFF(pipe));
7754         }
7755         plane_config->base = base;
7756
7757         val = I915_READ(PIPESRC(pipe));
7758         fb->width = ((val >> 16) & 0xfff) + 1;
7759         fb->height = ((val >> 0) & 0xfff) + 1;
7760
7761         val = I915_READ(DSPSTRIDE(pipe));
7762         fb->pitches[0] = val & 0xffffffc0;
7763
7764         aligned_height = intel_fb_align_height(dev, fb->height,
7765                                                plane_config->tiling);
7766
7767         plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
7768
7769         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7770                       pipe_name(pipe), fb->width, fb->height,
7771                       fb->bits_per_pixel, base, fb->pitches[0],
7772                       plane_config->size);
7773
7774         crtc->base.primary->fb = fb;
7775 }
7776
7777 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7778                                      struct intel_crtc_state *pipe_config)
7779 {
7780         struct drm_device *dev = crtc->base.dev;
7781         struct drm_i915_private *dev_priv = dev->dev_private;
7782         uint32_t tmp;
7783
7784         if (!intel_display_power_is_enabled(dev_priv,
7785                                             POWER_DOMAIN_PIPE(crtc->pipe)))
7786                 return false;
7787
7788         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7789         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7790
7791         tmp = I915_READ(PIPECONF(crtc->pipe));
7792         if (!(tmp & PIPECONF_ENABLE))
7793                 return false;
7794
7795         switch (tmp & PIPECONF_BPC_MASK) {
7796         case PIPECONF_6BPC:
7797                 pipe_config->pipe_bpp = 18;
7798                 break;
7799         case PIPECONF_8BPC:
7800                 pipe_config->pipe_bpp = 24;
7801                 break;
7802         case PIPECONF_10BPC:
7803                 pipe_config->pipe_bpp = 30;
7804                 break;
7805         case PIPECONF_12BPC:
7806                 pipe_config->pipe_bpp = 36;
7807                 break;
7808         default:
7809                 break;
7810         }
7811
7812         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7813                 pipe_config->limited_color_range = true;
7814
7815         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7816                 struct intel_shared_dpll *pll;
7817
7818                 pipe_config->has_pch_encoder = true;
7819
7820                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7821                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7822                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7823
7824                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7825
7826                 if (HAS_PCH_IBX(dev_priv->dev)) {
7827                         pipe_config->shared_dpll =
7828                                 (enum intel_dpll_id) crtc->pipe;
7829                 } else {
7830                         tmp = I915_READ(PCH_DPLL_SEL);
7831                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7832                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7833                         else
7834                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7835                 }
7836
7837                 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7838
7839                 WARN_ON(!pll->get_hw_state(dev_priv, pll,
7840                                            &pipe_config->dpll_hw_state));
7841
7842                 tmp = pipe_config->dpll_hw_state.dpll;
7843                 pipe_config->pixel_multiplier =
7844                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7845                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7846
7847                 ironlake_pch_clock_get(crtc, pipe_config);
7848         } else {
7849                 pipe_config->pixel_multiplier = 1;
7850         }
7851
7852         intel_get_pipe_timings(crtc, pipe_config);
7853
7854         ironlake_get_pfit_config(crtc, pipe_config);
7855
7856         return true;
7857 }
7858
7859 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7860 {
7861         struct drm_device *dev = dev_priv->dev;
7862         struct intel_crtc *crtc;
7863
7864         for_each_intel_crtc(dev, crtc)
7865                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
7866                      pipe_name(crtc->pipe));
7867
7868         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7869         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7870         I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7871         I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7872         I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7873         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7874              "CPU PWM1 enabled\n");
7875         if (IS_HASWELL(dev))
7876                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7877                      "CPU PWM2 enabled\n");
7878         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7879              "PCH PWM1 enabled\n");
7880         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7881              "Utility pin enabled\n");
7882         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7883
7884         /*
7885          * In theory we can still leave IRQs enabled, as long as only the HPD
7886          * interrupts remain enabled. We used to check for that, but since it's
7887          * gen-specific and since we only disable LCPLL after we fully disable
7888          * the interrupts, the check below should be enough.
7889          */
7890         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7891 }
7892
7893 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7894 {
7895         struct drm_device *dev = dev_priv->dev;
7896
7897         if (IS_HASWELL(dev))
7898                 return I915_READ(D_COMP_HSW);
7899         else
7900                 return I915_READ(D_COMP_BDW);
7901 }
7902
7903 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7904 {
7905         struct drm_device *dev = dev_priv->dev;
7906
7907         if (IS_HASWELL(dev)) {
7908                 mutex_lock(&dev_priv->rps.hw_lock);
7909                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7910                                             val))
7911                         DRM_ERROR("Failed to write to D_COMP\n");
7912                 mutex_unlock(&dev_priv->rps.hw_lock);
7913         } else {
7914                 I915_WRITE(D_COMP_BDW, val);
7915                 POSTING_READ(D_COMP_BDW);
7916         }
7917 }
7918
7919 /*
7920  * This function implements pieces of two sequences from BSpec:
7921  * - Sequence for display software to disable LCPLL
7922  * - Sequence for display software to allow package C8+
7923  * The steps implemented here are just the steps that actually touch the LCPLL
7924  * register. Callers should take care of disabling all the display engine
7925  * functions, doing the mode unset, fixing interrupts, etc.
7926  */
7927 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7928                               bool switch_to_fclk, bool allow_power_down)
7929 {
7930         uint32_t val;
7931
7932         assert_can_disable_lcpll(dev_priv);
7933
7934         val = I915_READ(LCPLL_CTL);
7935
7936         if (switch_to_fclk) {
7937                 val |= LCPLL_CD_SOURCE_FCLK;
7938                 I915_WRITE(LCPLL_CTL, val);
7939
7940                 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7941                                        LCPLL_CD_SOURCE_FCLK_DONE, 1))
7942                         DRM_ERROR("Switching to FCLK failed\n");
7943
7944                 val = I915_READ(LCPLL_CTL);
7945         }
7946
7947         val |= LCPLL_PLL_DISABLE;
7948         I915_WRITE(LCPLL_CTL, val);
7949         POSTING_READ(LCPLL_CTL);
7950
7951         if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7952                 DRM_ERROR("LCPLL still locked\n");
7953
7954         val = hsw_read_dcomp(dev_priv);
7955         val |= D_COMP_COMP_DISABLE;
7956         hsw_write_dcomp(dev_priv, val);
7957         ndelay(100);
7958
7959         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7960                      1))
7961                 DRM_ERROR("D_COMP RCOMP still in progress\n");
7962
7963         if (allow_power_down) {
7964                 val = I915_READ(LCPLL_CTL);
7965                 val |= LCPLL_POWER_DOWN_ALLOW;
7966                 I915_WRITE(LCPLL_CTL, val);
7967                 POSTING_READ(LCPLL_CTL);
7968         }
7969 }
7970
7971 /*
7972  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7973  * source.
7974  */
7975 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7976 {
7977         uint32_t val;
7978
7979         val = I915_READ(LCPLL_CTL);
7980
7981         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7982                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7983                 return;
7984
7985         /*
7986          * Make sure we're not on PC8 state before disabling PC8, otherwise
7987          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7988          */
7989         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7990
7991         if (val & LCPLL_POWER_DOWN_ALLOW) {
7992                 val &= ~LCPLL_POWER_DOWN_ALLOW;
7993                 I915_WRITE(LCPLL_CTL, val);
7994                 POSTING_READ(LCPLL_CTL);
7995         }
7996
7997         val = hsw_read_dcomp(dev_priv);
7998         val |= D_COMP_COMP_FORCE;
7999         val &= ~D_COMP_COMP_DISABLE;
8000         hsw_write_dcomp(dev_priv, val);
8001
8002         val = I915_READ(LCPLL_CTL);
8003         val &= ~LCPLL_PLL_DISABLE;
8004         I915_WRITE(LCPLL_CTL, val);
8005
8006         if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
8007                 DRM_ERROR("LCPLL not locked yet\n");
8008
8009         if (val & LCPLL_CD_SOURCE_FCLK) {
8010                 val = I915_READ(LCPLL_CTL);
8011                 val &= ~LCPLL_CD_SOURCE_FCLK;
8012                 I915_WRITE(LCPLL_CTL, val);
8013
8014                 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
8015                                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
8016                         DRM_ERROR("Switching back to LCPLL failed\n");
8017         }
8018
8019         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
8020 }
8021
8022 /*
8023  * Package states C8 and deeper are really deep PC states that can only be
8024  * reached when all the devices on the system allow it, so even if the graphics
8025  * device allows PC8+, it doesn't mean the system will actually get to these
8026  * states. Our driver only allows PC8+ when going into runtime PM.
8027  *
8028  * The requirements for PC8+ are that all the outputs are disabled, the power
8029  * well is disabled and most interrupts are disabled, and these are also
8030  * requirements for runtime PM. When these conditions are met, we manually do
8031  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
8032  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
8033  * hang the machine.
8034  *
8035  * When we really reach PC8 or deeper states (not just when we allow it) we lose
8036  * the state of some registers, so when we come back from PC8+ we need to
8037  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
8038  * need to take care of the registers kept by RC6. Notice that this happens even
8039  * if we don't put the device in PCI D3 state (which is what currently happens
8040  * because of the runtime PM support).
8041  *
8042  * For more, read "Display Sequences for Package C8" on the hardware
8043  * documentation.
8044  */
8045 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
8046 {
8047         struct drm_device *dev = dev_priv->dev;
8048         uint32_t val;
8049
8050         DRM_DEBUG_KMS("Enabling package C8+\n");
8051
8052         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8053                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
8054                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8055                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8056         }
8057
8058         lpt_disable_clkout_dp(dev);
8059         hsw_disable_lcpll(dev_priv, true, true);
8060 }
8061
8062 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
8063 {
8064         struct drm_device *dev = dev_priv->dev;
8065         uint32_t val;
8066
8067         DRM_DEBUG_KMS("Disabling package C8+\n");
8068
8069         hsw_restore_lcpll(dev_priv);
8070         lpt_init_pch_refclk(dev);
8071
8072         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8073                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
8074                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8075                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8076         }
8077
8078         intel_prepare_ddi(dev);
8079 }
8080
8081 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
8082                                       struct intel_crtc_state *crtc_state)
8083 {
8084         if (!intel_ddi_pll_select(crtc, crtc_state))
8085                 return -EINVAL;
8086
8087         crtc->lowfreq_avail = false;
8088
8089         return 0;
8090 }
8091
8092 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
8093                                 enum port port,
8094                                 struct intel_crtc_state *pipe_config)
8095 {
8096         u32 temp, dpll_ctl1;
8097
8098         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8099         pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
8100
8101         switch (pipe_config->ddi_pll_sel) {
8102         case SKL_DPLL0:
8103                 /*
8104                  * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
8105                  * of the shared DPLL framework and thus needs to be read out
8106                  * separately
8107                  */
8108                 dpll_ctl1 = I915_READ(DPLL_CTRL1);
8109                 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
8110                 break;
8111         case SKL_DPLL1:
8112                 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8113                 break;
8114         case SKL_DPLL2:
8115                 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8116                 break;
8117         case SKL_DPLL3:
8118                 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8119                 break;
8120         }
8121 }
8122
8123 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
8124                                 enum port port,
8125                                 struct intel_crtc_state *pipe_config)
8126 {
8127         pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
8128
8129         switch (pipe_config->ddi_pll_sel) {
8130         case PORT_CLK_SEL_WRPLL1:
8131                 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
8132                 break;
8133         case PORT_CLK_SEL_WRPLL2:
8134                 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
8135                 break;
8136         }
8137 }
8138
8139 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
8140                                        struct intel_crtc_state *pipe_config)
8141 {
8142         struct drm_device *dev = crtc->base.dev;
8143         struct drm_i915_private *dev_priv = dev->dev_private;
8144         struct intel_shared_dpll *pll;
8145         enum port port;
8146         uint32_t tmp;
8147
8148         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
8149
8150         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
8151
8152         if (IS_SKYLAKE(dev))
8153                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
8154         else
8155                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
8156
8157         if (pipe_config->shared_dpll >= 0) {
8158                 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
8159
8160                 WARN_ON(!pll->get_hw_state(dev_priv, pll,
8161                                            &pipe_config->dpll_hw_state));
8162         }
8163
8164         /*
8165          * Haswell has only FDI/PCH transcoder A. It is which is connected to
8166          * DDI E. So just check whether this pipe is wired to DDI E and whether
8167          * the PCH transcoder is on.
8168          */
8169         if (INTEL_INFO(dev)->gen < 9 &&
8170             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
8171                 pipe_config->has_pch_encoder = true;
8172
8173                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
8174                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8175                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8176
8177                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8178         }
8179 }
8180
8181 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8182                                     struct intel_crtc_state *pipe_config)
8183 {
8184         struct drm_device *dev = crtc->base.dev;
8185         struct drm_i915_private *dev_priv = dev->dev_private;
8186         enum intel_display_power_domain pfit_domain;
8187         uint32_t tmp;
8188
8189         if (!intel_display_power_is_enabled(dev_priv,
8190                                          POWER_DOMAIN_PIPE(crtc->pipe)))
8191                 return false;
8192
8193         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8194         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8195
8196         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8197         if (tmp & TRANS_DDI_FUNC_ENABLE) {
8198                 enum pipe trans_edp_pipe;
8199                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8200                 default:
8201                         WARN(1, "unknown pipe linked to edp transcoder\n");
8202                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
8203                 case TRANS_DDI_EDP_INPUT_A_ON:
8204                         trans_edp_pipe = PIPE_A;
8205                         break;
8206                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
8207                         trans_edp_pipe = PIPE_B;
8208                         break;
8209                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
8210                         trans_edp_pipe = PIPE_C;
8211                         break;
8212                 }
8213
8214                 if (trans_edp_pipe == crtc->pipe)
8215                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
8216         }
8217
8218         if (!intel_display_power_is_enabled(dev_priv,
8219                         POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
8220                 return false;
8221
8222         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
8223         if (!(tmp & PIPECONF_ENABLE))
8224                 return false;
8225
8226         haswell_get_ddi_port_state(crtc, pipe_config);
8227
8228         intel_get_pipe_timings(crtc, pipe_config);
8229
8230         pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
8231         if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
8232                 if (IS_SKYLAKE(dev))
8233                         skylake_get_pfit_config(crtc, pipe_config);
8234                 else
8235                         ironlake_get_pfit_config(crtc, pipe_config);
8236         }
8237
8238         if (IS_HASWELL(dev))
8239                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
8240                         (I915_READ(IPS_CTL) & IPS_ENABLE);
8241
8242         if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
8243                 pipe_config->pixel_multiplier =
8244                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8245         } else {
8246                 pipe_config->pixel_multiplier = 1;
8247         }
8248
8249         return true;
8250 }
8251
8252 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8253 {
8254         struct drm_device *dev = crtc->dev;
8255         struct drm_i915_private *dev_priv = dev->dev_private;
8256         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8257         uint32_t cntl = 0, size = 0;
8258
8259         if (base) {
8260                 unsigned int width = intel_crtc->cursor_width;
8261                 unsigned int height = intel_crtc->cursor_height;
8262                 unsigned int stride = roundup_pow_of_two(width) * 4;
8263
8264                 switch (stride) {
8265                 default:
8266                         WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8267                                   width, stride);
8268                         stride = 256;
8269                         /* fallthrough */
8270                 case 256:
8271                 case 512:
8272                 case 1024:
8273                 case 2048:
8274                         break;
8275                 }
8276
8277                 cntl |= CURSOR_ENABLE |
8278                         CURSOR_GAMMA_ENABLE |
8279                         CURSOR_FORMAT_ARGB |
8280                         CURSOR_STRIDE(stride);
8281
8282                 size = (height << 12) | width;
8283         }
8284
8285         if (intel_crtc->cursor_cntl != 0 &&
8286             (intel_crtc->cursor_base != base ||
8287              intel_crtc->cursor_size != size ||
8288              intel_crtc->cursor_cntl != cntl)) {
8289                 /* On these chipsets we can only modify the base/size/stride
8290                  * whilst the cursor is disabled.
8291                  */
8292                 I915_WRITE(_CURACNTR, 0);
8293                 POSTING_READ(_CURACNTR);
8294                 intel_crtc->cursor_cntl = 0;
8295         }
8296
8297         if (intel_crtc->cursor_base != base) {
8298                 I915_WRITE(_CURABASE, base);
8299                 intel_crtc->cursor_base = base;
8300         }
8301
8302         if (intel_crtc->cursor_size != size) {
8303                 I915_WRITE(CURSIZE, size);
8304                 intel_crtc->cursor_size = size;
8305         }
8306
8307         if (intel_crtc->cursor_cntl != cntl) {
8308                 I915_WRITE(_CURACNTR, cntl);
8309                 POSTING_READ(_CURACNTR);
8310                 intel_crtc->cursor_cntl = cntl;
8311         }
8312 }
8313
8314 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8315 {
8316         struct drm_device *dev = crtc->dev;
8317         struct drm_i915_private *dev_priv = dev->dev_private;
8318         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8319         int pipe = intel_crtc->pipe;
8320         uint32_t cntl;
8321
8322         cntl = 0;
8323         if (base) {
8324                 cntl = MCURSOR_GAMMA_ENABLE;
8325                 switch (intel_crtc->cursor_width) {
8326                         case 64:
8327                                 cntl |= CURSOR_MODE_64_ARGB_AX;
8328                                 break;
8329                         case 128:
8330                                 cntl |= CURSOR_MODE_128_ARGB_AX;
8331                                 break;
8332                         case 256:
8333                                 cntl |= CURSOR_MODE_256_ARGB_AX;
8334                                 break;
8335                         default:
8336                                 MISSING_CASE(intel_crtc->cursor_width);
8337                                 return;
8338                 }
8339                 cntl |= pipe << 28; /* Connect to correct pipe */
8340
8341                 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8342                         cntl |= CURSOR_PIPE_CSC_ENABLE;
8343         }
8344
8345         if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
8346                 cntl |= CURSOR_ROTATE_180;
8347
8348         if (intel_crtc->cursor_cntl != cntl) {
8349                 I915_WRITE(CURCNTR(pipe), cntl);
8350                 POSTING_READ(CURCNTR(pipe));
8351                 intel_crtc->cursor_cntl = cntl;
8352         }
8353
8354         /* and commit changes on next vblank */
8355         I915_WRITE(CURBASE(pipe), base);
8356         POSTING_READ(CURBASE(pipe));
8357
8358         intel_crtc->cursor_base = base;
8359 }
8360
8361 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8362 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8363                                      bool on)
8364 {
8365         struct drm_device *dev = crtc->dev;
8366         struct drm_i915_private *dev_priv = dev->dev_private;
8367         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8368         int pipe = intel_crtc->pipe;
8369         int x = crtc->cursor_x;
8370         int y = crtc->cursor_y;
8371         u32 base = 0, pos = 0;
8372
8373         if (on)
8374                 base = intel_crtc->cursor_addr;
8375
8376         if (x >= intel_crtc->config->pipe_src_w)
8377                 base = 0;
8378
8379         if (y >= intel_crtc->config->pipe_src_h)
8380                 base = 0;
8381
8382         if (x < 0) {
8383                 if (x + intel_crtc->cursor_width <= 0)
8384                         base = 0;
8385
8386                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8387                 x = -x;
8388         }
8389         pos |= x << CURSOR_X_SHIFT;
8390
8391         if (y < 0) {
8392                 if (y + intel_crtc->cursor_height <= 0)
8393                         base = 0;
8394
8395                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8396                 y = -y;
8397         }
8398         pos |= y << CURSOR_Y_SHIFT;
8399
8400         if (base == 0 && intel_crtc->cursor_base == 0)
8401                 return;
8402
8403         I915_WRITE(CURPOS(pipe), pos);
8404
8405         /* ILK+ do this automagically */
8406         if (HAS_GMCH_DISPLAY(dev) &&
8407             crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
8408                 base += (intel_crtc->cursor_height *
8409                         intel_crtc->cursor_width - 1) * 4;
8410         }
8411
8412         if (IS_845G(dev) || IS_I865G(dev))
8413                 i845_update_cursor(crtc, base);
8414         else
8415                 i9xx_update_cursor(crtc, base);
8416 }
8417
8418 static bool cursor_size_ok(struct drm_device *dev,
8419                            uint32_t width, uint32_t height)
8420 {
8421         if (width == 0 || height == 0)
8422                 return false;
8423
8424         /*
8425          * 845g/865g are special in that they are only limited by
8426          * the width of their cursors, the height is arbitrary up to
8427          * the precision of the register. Everything else requires
8428          * square cursors, limited to a few power-of-two sizes.
8429          */
8430         if (IS_845G(dev) || IS_I865G(dev)) {
8431                 if ((width & 63) != 0)
8432                         return false;
8433
8434                 if (width > (IS_845G(dev) ? 64 : 512))
8435                         return false;
8436
8437                 if (height > 1023)
8438                         return false;
8439         } else {
8440                 switch (width | height) {
8441                 case 256:
8442                 case 128:
8443                         if (IS_GEN2(dev))
8444                                 return false;
8445                 case 64:
8446                         break;
8447                 default:
8448                         return false;
8449                 }
8450         }
8451
8452         return true;
8453 }
8454
8455 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8456                                  u16 *blue, uint32_t start, uint32_t size)
8457 {
8458         int end = (start + size > 256) ? 256 : start + size, i;
8459         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8460
8461         for (i = start; i < end; i++) {
8462                 intel_crtc->lut_r[i] = red[i] >> 8;
8463                 intel_crtc->lut_g[i] = green[i] >> 8;
8464                 intel_crtc->lut_b[i] = blue[i] >> 8;
8465         }
8466
8467         intel_crtc_load_lut(crtc);
8468 }
8469
8470 /* VESA 640x480x72Hz mode to set on the pipe */
8471 static struct drm_display_mode load_detect_mode = {
8472         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8473                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8474 };
8475
8476 struct drm_framebuffer *
8477 __intel_framebuffer_create(struct drm_device *dev,
8478                            struct drm_mode_fb_cmd2 *mode_cmd,
8479                            struct drm_i915_gem_object *obj)
8480 {
8481         struct intel_framebuffer *intel_fb;
8482         int ret;
8483
8484         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8485         if (!intel_fb) {
8486                 drm_gem_object_unreference(&obj->base);
8487                 return ERR_PTR(-ENOMEM);
8488         }
8489
8490         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8491         if (ret)
8492                 goto err;
8493
8494         return &intel_fb->base;
8495 err:
8496         drm_gem_object_unreference(&obj->base);
8497         kfree(intel_fb);
8498
8499         return ERR_PTR(ret);
8500 }
8501
8502 static struct drm_framebuffer *
8503 intel_framebuffer_create(struct drm_device *dev,
8504                          struct drm_mode_fb_cmd2 *mode_cmd,
8505                          struct drm_i915_gem_object *obj)
8506 {
8507         struct drm_framebuffer *fb;
8508         int ret;
8509
8510         ret = i915_mutex_lock_interruptible(dev);
8511         if (ret)
8512                 return ERR_PTR(ret);
8513         fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8514         mutex_unlock(&dev->struct_mutex);
8515
8516         return fb;
8517 }
8518
8519 static u32
8520 intel_framebuffer_pitch_for_width(int width, int bpp)
8521 {
8522         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8523         return ALIGN(pitch, 64);
8524 }
8525
8526 static u32
8527 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8528 {
8529         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8530         return PAGE_ALIGN(pitch * mode->vdisplay);
8531 }
8532
8533 static struct drm_framebuffer *
8534 intel_framebuffer_create_for_mode(struct drm_device *dev,
8535                                   struct drm_display_mode *mode,
8536                                   int depth, int bpp)
8537 {
8538         struct drm_i915_gem_object *obj;
8539         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8540
8541         obj = i915_gem_alloc_object(dev,
8542                                     intel_framebuffer_size_for_mode(mode, bpp));
8543         if (obj == NULL)
8544                 return ERR_PTR(-ENOMEM);
8545
8546         mode_cmd.width = mode->hdisplay;
8547         mode_cmd.height = mode->vdisplay;
8548         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8549                                                                 bpp);
8550         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8551
8552         return intel_framebuffer_create(dev, &mode_cmd, obj);
8553 }
8554
8555 static struct drm_framebuffer *
8556 mode_fits_in_fbdev(struct drm_device *dev,
8557                    struct drm_display_mode *mode)
8558 {
8559 #ifdef CONFIG_DRM_I915_FBDEV
8560         struct drm_i915_private *dev_priv = dev->dev_private;
8561         struct drm_i915_gem_object *obj;
8562         struct drm_framebuffer *fb;
8563
8564         if (!dev_priv->fbdev)
8565                 return NULL;
8566
8567         if (!dev_priv->fbdev->fb)
8568                 return NULL;
8569
8570         obj = dev_priv->fbdev->fb->obj;
8571         BUG_ON(!obj);
8572
8573         fb = &dev_priv->fbdev->fb->base;
8574         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8575                                                                fb->bits_per_pixel))
8576                 return NULL;
8577
8578         if (obj->base.size < mode->vdisplay * fb->pitches[0])
8579                 return NULL;
8580
8581         return fb;
8582 #else
8583         return NULL;
8584 #endif
8585 }
8586
8587 bool intel_get_load_detect_pipe(struct drm_connector *connector,
8588                                 struct drm_display_mode *mode,
8589                                 struct intel_load_detect_pipe *old,
8590                                 struct drm_modeset_acquire_ctx *ctx)
8591 {
8592         struct intel_crtc *intel_crtc;
8593         struct intel_encoder *intel_encoder =
8594                 intel_attached_encoder(connector);
8595         struct drm_crtc *possible_crtc;
8596         struct drm_encoder *encoder = &intel_encoder->base;
8597         struct drm_crtc *crtc = NULL;
8598         struct drm_device *dev = encoder->dev;
8599         struct drm_framebuffer *fb;
8600         struct drm_mode_config *config = &dev->mode_config;
8601         int ret, i = -1;
8602
8603         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8604                       connector->base.id, connector->name,
8605                       encoder->base.id, encoder->name);
8606
8607 retry:
8608         ret = drm_modeset_lock(&config->connection_mutex, ctx);
8609         if (ret)
8610                 goto fail_unlock;
8611
8612         /*
8613          * Algorithm gets a little messy:
8614          *
8615          *   - if the connector already has an assigned crtc, use it (but make
8616          *     sure it's on first)
8617          *
8618          *   - try to find the first unused crtc that can drive this connector,
8619          *     and use that if we find one
8620          */
8621
8622         /* See if we already have a CRTC for this connector */
8623         if (encoder->crtc) {
8624                 crtc = encoder->crtc;
8625
8626                 ret = drm_modeset_lock(&crtc->mutex, ctx);
8627                 if (ret)
8628                         goto fail_unlock;
8629                 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8630                 if (ret)
8631                         goto fail_unlock;
8632
8633                 old->dpms_mode = connector->dpms;
8634                 old->load_detect_temp = false;
8635
8636                 /* Make sure the crtc and connector are running */
8637                 if (connector->dpms != DRM_MODE_DPMS_ON)
8638                         connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8639
8640                 return true;
8641         }
8642
8643         /* Find an unused one (if possible) */
8644         for_each_crtc(dev, possible_crtc) {
8645                 i++;
8646                 if (!(encoder->possible_crtcs & (1 << i)))
8647                         continue;
8648                 if (possible_crtc->enabled)
8649                         continue;
8650                 /* This can occur when applying the pipe A quirk on resume. */
8651                 if (to_intel_crtc(possible_crtc)->new_enabled)
8652                         continue;
8653
8654                 crtc = possible_crtc;
8655                 break;
8656         }
8657
8658         /*
8659          * If we didn't find an unused CRTC, don't use any.
8660          */
8661         if (!crtc) {
8662                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
8663                 goto fail_unlock;
8664         }
8665
8666         ret = drm_modeset_lock(&crtc->mutex, ctx);
8667         if (ret)
8668                 goto fail_unlock;
8669         ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8670         if (ret)
8671                 goto fail_unlock;
8672         intel_encoder->new_crtc = to_intel_crtc(crtc);
8673         to_intel_connector(connector)->new_encoder = intel_encoder;
8674
8675         intel_crtc = to_intel_crtc(crtc);
8676         intel_crtc->new_enabled = true;
8677         intel_crtc->new_config = intel_crtc->config;
8678         old->dpms_mode = connector->dpms;
8679         old->load_detect_temp = true;
8680         old->release_fb = NULL;
8681
8682         if (!mode)
8683                 mode = &load_detect_mode;
8684
8685         /* We need a framebuffer large enough to accommodate all accesses
8686          * that the plane may generate whilst we perform load detection.
8687          * We can not rely on the fbcon either being present (we get called
8688          * during its initialisation to detect all boot displays, or it may
8689          * not even exist) or that it is large enough to satisfy the
8690          * requested mode.
8691          */
8692         fb = mode_fits_in_fbdev(dev, mode);
8693         if (fb == NULL) {
8694                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8695                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8696                 old->release_fb = fb;
8697         } else
8698                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8699         if (IS_ERR(fb)) {
8700                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8701                 goto fail;
8702         }
8703
8704         if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8705                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8706                 if (old->release_fb)
8707                         old->release_fb->funcs->destroy(old->release_fb);
8708                 goto fail;
8709         }
8710
8711         /* let the connector get through one full cycle before testing */
8712         intel_wait_for_vblank(dev, intel_crtc->pipe);
8713         return true;
8714
8715  fail:
8716         intel_crtc->new_enabled = crtc->enabled;
8717         if (intel_crtc->new_enabled)
8718                 intel_crtc->new_config = intel_crtc->config;
8719         else
8720                 intel_crtc->new_config = NULL;
8721 fail_unlock:
8722         if (ret == -EDEADLK) {
8723                 drm_modeset_backoff(ctx);
8724                 goto retry;
8725         }
8726
8727         return false;
8728 }
8729
8730 void intel_release_load_detect_pipe(struct drm_connector *connector,
8731                                     struct intel_load_detect_pipe *old)
8732 {
8733         struct intel_encoder *intel_encoder =
8734                 intel_attached_encoder(connector);
8735         struct drm_encoder *encoder = &intel_encoder->base;
8736         struct drm_crtc *crtc = encoder->crtc;
8737         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8738
8739         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8740                       connector->base.id, connector->name,
8741                       encoder->base.id, encoder->name);
8742
8743         if (old->load_detect_temp) {
8744                 to_intel_connector(connector)->new_encoder = NULL;
8745                 intel_encoder->new_crtc = NULL;
8746                 intel_crtc->new_enabled = false;
8747                 intel_crtc->new_config = NULL;
8748                 intel_set_mode(crtc, NULL, 0, 0, NULL);
8749
8750                 if (old->release_fb) {
8751                         drm_framebuffer_unregister_private(old->release_fb);
8752                         drm_framebuffer_unreference(old->release_fb);
8753                 }
8754
8755                 return;
8756         }
8757
8758         /* Switch crtc and encoder back off if necessary */
8759         if (old->dpms_mode != DRM_MODE_DPMS_ON)
8760                 connector->funcs->dpms(connector, old->dpms_mode);
8761 }
8762
8763 static int i9xx_pll_refclk(struct drm_device *dev,
8764                            const struct intel_crtc_state *pipe_config)
8765 {
8766         struct drm_i915_private *dev_priv = dev->dev_private;
8767         u32 dpll = pipe_config->dpll_hw_state.dpll;
8768
8769         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8770                 return dev_priv->vbt.lvds_ssc_freq;
8771         else if (HAS_PCH_SPLIT(dev))
8772                 return 120000;
8773         else if (!IS_GEN2(dev))
8774                 return 96000;
8775         else
8776                 return 48000;
8777 }
8778
8779 /* Returns the clock of the currently programmed mode of the given pipe. */
8780 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8781                                 struct intel_crtc_state *pipe_config)
8782 {
8783         struct drm_device *dev = crtc->base.dev;
8784         struct drm_i915_private *dev_priv = dev->dev_private;
8785         int pipe = pipe_config->cpu_transcoder;
8786         u32 dpll = pipe_config->dpll_hw_state.dpll;
8787         u32 fp;
8788         intel_clock_t clock;
8789         int refclk = i9xx_pll_refclk(dev, pipe_config);
8790
8791         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8792                 fp = pipe_config->dpll_hw_state.fp0;
8793         else
8794                 fp = pipe_config->dpll_hw_state.fp1;
8795
8796         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8797         if (IS_PINEVIEW(dev)) {
8798                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8799                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8800         } else {
8801                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8802                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8803         }
8804
8805         if (!IS_GEN2(dev)) {
8806                 if (IS_PINEVIEW(dev))
8807                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8808                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8809                 else
8810                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8811                                DPLL_FPA01_P1_POST_DIV_SHIFT);
8812
8813                 switch (dpll & DPLL_MODE_MASK) {
8814                 case DPLLB_MODE_DAC_SERIAL:
8815                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8816                                 5 : 10;
8817                         break;
8818                 case DPLLB_MODE_LVDS:
8819                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8820                                 7 : 14;
8821                         break;
8822                 default:
8823                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8824                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
8825                         return;
8826                 }
8827
8828                 if (IS_PINEVIEW(dev))
8829                         pineview_clock(refclk, &clock);
8830                 else
8831                         i9xx_clock(refclk, &clock);
8832         } else {
8833                 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8834                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8835
8836                 if (is_lvds) {
8837                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8838                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
8839
8840                         if (lvds & LVDS_CLKB_POWER_UP)
8841                                 clock.p2 = 7;
8842                         else
8843                                 clock.p2 = 14;
8844                 } else {
8845                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
8846                                 clock.p1 = 2;
8847                         else {
8848                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8849                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8850                         }
8851                         if (dpll & PLL_P2_DIVIDE_BY_4)
8852                                 clock.p2 = 4;
8853                         else
8854                                 clock.p2 = 2;
8855                 }
8856
8857                 i9xx_clock(refclk, &clock);
8858         }
8859
8860         /*
8861          * This value includes pixel_multiplier. We will use
8862          * port_clock to compute adjusted_mode.crtc_clock in the
8863          * encoder's get_config() function.
8864          */
8865         pipe_config->port_clock = clock.dot;
8866 }
8867
8868 int intel_dotclock_calculate(int link_freq,
8869                              const struct intel_link_m_n *m_n)
8870 {
8871         /*
8872          * The calculation for the data clock is:
8873          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8874          * But we want to avoid losing precison if possible, so:
8875          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8876          *
8877          * and the link clock is simpler:
8878          * link_clock = (m * link_clock) / n
8879          */
8880
8881         if (!m_n->link_n)
8882                 return 0;
8883
8884         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8885 }
8886
8887 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8888                                    struct intel_crtc_state *pipe_config)
8889 {
8890         struct drm_device *dev = crtc->base.dev;
8891
8892         /* read out port_clock from the DPLL */
8893         i9xx_crtc_clock_get(crtc, pipe_config);
8894
8895         /*
8896          * This value does not include pixel_multiplier.
8897          * We will check that port_clock and adjusted_mode.crtc_clock
8898          * agree once we know their relationship in the encoder's
8899          * get_config() function.
8900          */
8901         pipe_config->base.adjusted_mode.crtc_clock =
8902                 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8903                                          &pipe_config->fdi_m_n);
8904 }
8905
8906 /** Returns the currently programmed mode of the given pipe. */
8907 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8908                                              struct drm_crtc *crtc)
8909 {
8910         struct drm_i915_private *dev_priv = dev->dev_private;
8911         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8912         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8913         struct drm_display_mode *mode;
8914         struct intel_crtc_state pipe_config;
8915         int htot = I915_READ(HTOTAL(cpu_transcoder));
8916         int hsync = I915_READ(HSYNC(cpu_transcoder));
8917         int vtot = I915_READ(VTOTAL(cpu_transcoder));
8918         int vsync = I915_READ(VSYNC(cpu_transcoder));
8919         enum pipe pipe = intel_crtc->pipe;
8920
8921         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8922         if (!mode)
8923                 return NULL;
8924
8925         /*
8926          * Construct a pipe_config sufficient for getting the clock info
8927          * back out of crtc_clock_get.
8928          *
8929          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8930          * to use a real value here instead.
8931          */
8932         pipe_config.cpu_transcoder = (enum transcoder) pipe;
8933         pipe_config.pixel_multiplier = 1;
8934         pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8935         pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8936         pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8937         i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8938
8939         mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8940         mode->hdisplay = (htot & 0xffff) + 1;
8941         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8942         mode->hsync_start = (hsync & 0xffff) + 1;
8943         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8944         mode->vdisplay = (vtot & 0xffff) + 1;
8945         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8946         mode->vsync_start = (vsync & 0xffff) + 1;
8947         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8948
8949         drm_mode_set_name(mode);
8950
8951         return mode;
8952 }
8953
8954 static void intel_decrease_pllclock(struct drm_crtc *crtc)
8955 {
8956         struct drm_device *dev = crtc->dev;
8957         struct drm_i915_private *dev_priv = dev->dev_private;
8958         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8959
8960         if (!HAS_GMCH_DISPLAY(dev))
8961                 return;
8962
8963         if (!dev_priv->lvds_downclock_avail)
8964                 return;
8965
8966         /*
8967          * Since this is called by a timer, we should never get here in
8968          * the manual case.
8969          */
8970         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8971                 int pipe = intel_crtc->pipe;
8972                 int dpll_reg = DPLL(pipe);
8973                 int dpll;
8974
8975                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
8976
8977                 assert_panel_unlocked(dev_priv, pipe);
8978
8979                 dpll = I915_READ(dpll_reg);
8980                 dpll |= DISPLAY_RATE_SELECT_FPA1;
8981                 I915_WRITE(dpll_reg, dpll);
8982                 intel_wait_for_vblank(dev, pipe);
8983                 dpll = I915_READ(dpll_reg);
8984                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8985                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8986         }
8987
8988 }
8989
8990 void intel_mark_busy(struct drm_device *dev)
8991 {
8992         struct drm_i915_private *dev_priv = dev->dev_private;
8993
8994         if (dev_priv->mm.busy)
8995                 return;
8996
8997         intel_runtime_pm_get(dev_priv);
8998         i915_update_gfx_val(dev_priv);
8999         dev_priv->mm.busy = true;
9000 }
9001
9002 void intel_mark_idle(struct drm_device *dev)
9003 {
9004         struct drm_i915_private *dev_priv = dev->dev_private;
9005         struct drm_crtc *crtc;
9006
9007         if (!dev_priv->mm.busy)
9008                 return;
9009
9010         dev_priv->mm.busy = false;
9011
9012         if (!i915.powersave)
9013                 goto out;
9014
9015         for_each_crtc(dev, crtc) {
9016                 if (!crtc->primary->fb)
9017                         continue;
9018
9019                 intel_decrease_pllclock(crtc);
9020         }
9021
9022         if (INTEL_INFO(dev)->gen >= 6)
9023                 gen6_rps_idle(dev->dev_private);
9024
9025 out:
9026         intel_runtime_pm_put(dev_priv);
9027 }
9028
9029 static void intel_crtc_set_state(struct intel_crtc *crtc,
9030                                  struct intel_crtc_state *crtc_state)
9031 {
9032         kfree(crtc->config);
9033         crtc->config = crtc_state;
9034         crtc->base.state = &crtc_state->base;
9035 }
9036
9037 static void intel_crtc_destroy(struct drm_crtc *crtc)
9038 {
9039         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9040         struct drm_device *dev = crtc->dev;
9041         struct intel_unpin_work *work;
9042
9043         spin_lock_irq(&dev->event_lock);
9044         work = intel_crtc->unpin_work;
9045         intel_crtc->unpin_work = NULL;
9046         spin_unlock_irq(&dev->event_lock);
9047
9048         if (work) {
9049                 cancel_work_sync(&work->work);
9050                 kfree(work);
9051         }
9052
9053         intel_crtc_set_state(intel_crtc, NULL);
9054         drm_crtc_cleanup(crtc);
9055
9056         kfree(intel_crtc);
9057 }
9058
9059 static void intel_unpin_work_fn(struct work_struct *__work)
9060 {
9061         struct intel_unpin_work *work =
9062                 container_of(__work, struct intel_unpin_work, work);
9063         struct drm_device *dev = work->crtc->dev;
9064         enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9065
9066         mutex_lock(&dev->struct_mutex);
9067         intel_unpin_fb_obj(work->old_fb_obj);
9068         drm_gem_object_unreference(&work->pending_flip_obj->base);
9069         drm_gem_object_unreference(&work->old_fb_obj->base);
9070
9071         intel_fbc_update(dev);
9072
9073         if (work->flip_queued_req)
9074                 i915_gem_request_assign(&work->flip_queued_req, NULL);
9075         mutex_unlock(&dev->struct_mutex);
9076
9077         intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9078
9079         BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9080         atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9081
9082         kfree(work);
9083 }
9084
9085 static void do_intel_finish_page_flip(struct drm_device *dev,
9086                                       struct drm_crtc *crtc)
9087 {
9088         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9089         struct intel_unpin_work *work;
9090         unsigned long flags;
9091
9092         /* Ignore early vblank irqs */
9093         if (intel_crtc == NULL)
9094                 return;
9095
9096         /*
9097          * This is called both by irq handlers and the reset code (to complete
9098          * lost pageflips) so needs the full irqsave spinlocks.
9099          */
9100         spin_lock_irqsave(&dev->event_lock, flags);
9101         work = intel_crtc->unpin_work;
9102
9103         /* Ensure we don't miss a work->pending update ... */
9104         smp_rmb();
9105
9106         if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9107                 spin_unlock_irqrestore(&dev->event_lock, flags);
9108                 return;
9109         }
9110
9111         page_flip_completed(intel_crtc);
9112
9113         spin_unlock_irqrestore(&dev->event_lock, flags);
9114 }
9115
9116 void intel_finish_page_flip(struct drm_device *dev, int pipe)
9117 {
9118         struct drm_i915_private *dev_priv = dev->dev_private;
9119         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9120
9121         do_intel_finish_page_flip(dev, crtc);
9122 }
9123
9124 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9125 {
9126         struct drm_i915_private *dev_priv = dev->dev_private;
9127         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9128
9129         do_intel_finish_page_flip(dev, crtc);
9130 }
9131
9132 /* Is 'a' after or equal to 'b'? */
9133 static bool g4x_flip_count_after_eq(u32 a, u32 b)
9134 {
9135         return !((a - b) & 0x80000000);
9136 }
9137
9138 static bool page_flip_finished(struct intel_crtc *crtc)
9139 {
9140         struct drm_device *dev = crtc->base.dev;
9141         struct drm_i915_private *dev_priv = dev->dev_private;
9142
9143         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9144             crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9145                 return true;
9146
9147         /*
9148          * The relevant registers doen't exist on pre-ctg.
9149          * As the flip done interrupt doesn't trigger for mmio
9150          * flips on gmch platforms, a flip count check isn't
9151          * really needed there. But since ctg has the registers,
9152          * include it in the check anyway.
9153          */
9154         if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9155                 return true;
9156
9157         /*
9158          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9159          * used the same base address. In that case the mmio flip might
9160          * have completed, but the CS hasn't even executed the flip yet.
9161          *
9162          * A flip count check isn't enough as the CS might have updated
9163          * the base address just after start of vblank, but before we
9164          * managed to process the interrupt. This means we'd complete the
9165          * CS flip too soon.
9166          *
9167          * Combining both checks should get us a good enough result. It may
9168          * still happen that the CS flip has been executed, but has not
9169          * yet actually completed. But in case the base address is the same
9170          * anyway, we don't really care.
9171          */
9172         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9173                 crtc->unpin_work->gtt_offset &&
9174                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9175                                     crtc->unpin_work->flip_count);
9176 }
9177
9178 void intel_prepare_page_flip(struct drm_device *dev, int plane)
9179 {
9180         struct drm_i915_private *dev_priv = dev->dev_private;
9181         struct intel_crtc *intel_crtc =
9182                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9183         unsigned long flags;
9184
9185
9186         /*
9187          * This is called both by irq handlers and the reset code (to complete
9188          * lost pageflips) so needs the full irqsave spinlocks.
9189          *
9190          * NB: An MMIO update of the plane base pointer will also
9191          * generate a page-flip completion irq, i.e. every modeset
9192          * is also accompanied by a spurious intel_prepare_page_flip().
9193          */
9194         spin_lock_irqsave(&dev->event_lock, flags);
9195         if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9196                 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9197         spin_unlock_irqrestore(&dev->event_lock, flags);
9198 }
9199
9200 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9201 {
9202         /* Ensure that the work item is consistent when activating it ... */
9203         smp_wmb();
9204         atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9205         /* and that it is marked active as soon as the irq could fire. */
9206         smp_wmb();
9207 }
9208
9209 static int intel_gen2_queue_flip(struct drm_device *dev,
9210                                  struct drm_crtc *crtc,
9211                                  struct drm_framebuffer *fb,
9212                                  struct drm_i915_gem_object *obj,
9213                                  struct intel_engine_cs *ring,
9214                                  uint32_t flags)
9215 {
9216         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9217         u32 flip_mask;
9218         int ret;
9219
9220         ret = intel_ring_begin(ring, 6);
9221         if (ret)
9222                 return ret;
9223
9224         /* Can't queue multiple flips, so wait for the previous
9225          * one to finish before executing the next.
9226          */
9227         if (intel_crtc->plane)
9228                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9229         else
9230                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9231         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9232         intel_ring_emit(ring, MI_NOOP);
9233         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9234                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9235         intel_ring_emit(ring, fb->pitches[0]);
9236         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9237         intel_ring_emit(ring, 0); /* aux display base address, unused */
9238
9239         intel_mark_page_flip_active(intel_crtc);
9240         __intel_ring_advance(ring);
9241         return 0;
9242 }
9243
9244 static int intel_gen3_queue_flip(struct drm_device *dev,
9245                                  struct drm_crtc *crtc,
9246                                  struct drm_framebuffer *fb,
9247                                  struct drm_i915_gem_object *obj,
9248                                  struct intel_engine_cs *ring,
9249                                  uint32_t flags)
9250 {
9251         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9252         u32 flip_mask;
9253         int ret;
9254
9255         ret = intel_ring_begin(ring, 6);
9256         if (ret)
9257                 return ret;
9258
9259         if (intel_crtc->plane)
9260                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9261         else
9262                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9263         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9264         intel_ring_emit(ring, MI_NOOP);
9265         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9266                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9267         intel_ring_emit(ring, fb->pitches[0]);
9268         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9269         intel_ring_emit(ring, MI_NOOP);
9270
9271         intel_mark_page_flip_active(intel_crtc);
9272         __intel_ring_advance(ring);
9273         return 0;
9274 }
9275
9276 static int intel_gen4_queue_flip(struct drm_device *dev,
9277                                  struct drm_crtc *crtc,
9278                                  struct drm_framebuffer *fb,
9279                                  struct drm_i915_gem_object *obj,
9280                                  struct intel_engine_cs *ring,
9281                                  uint32_t flags)
9282 {
9283         struct drm_i915_private *dev_priv = dev->dev_private;
9284         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9285         uint32_t pf, pipesrc;
9286         int ret;
9287
9288         ret = intel_ring_begin(ring, 4);
9289         if (ret)
9290                 return ret;
9291
9292         /* i965+ uses the linear or tiled offsets from the
9293          * Display Registers (which do not change across a page-flip)
9294          * so we need only reprogram the base address.
9295          */
9296         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9297                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9298         intel_ring_emit(ring, fb->pitches[0]);
9299         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9300                         obj->tiling_mode);
9301
9302         /* XXX Enabling the panel-fitter across page-flip is so far
9303          * untested on non-native modes, so ignore it for now.
9304          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9305          */
9306         pf = 0;
9307         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9308         intel_ring_emit(ring, pf | pipesrc);
9309
9310         intel_mark_page_flip_active(intel_crtc);
9311         __intel_ring_advance(ring);
9312         return 0;
9313 }
9314
9315 static int intel_gen6_queue_flip(struct drm_device *dev,
9316                                  struct drm_crtc *crtc,
9317                                  struct drm_framebuffer *fb,
9318                                  struct drm_i915_gem_object *obj,
9319                                  struct intel_engine_cs *ring,
9320                                  uint32_t flags)
9321 {
9322         struct drm_i915_private *dev_priv = dev->dev_private;
9323         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9324         uint32_t pf, pipesrc;
9325         int ret;
9326
9327         ret = intel_ring_begin(ring, 4);
9328         if (ret)
9329                 return ret;
9330
9331         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9332                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9333         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9334         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9335
9336         /* Contrary to the suggestions in the documentation,
9337          * "Enable Panel Fitter" does not seem to be required when page
9338          * flipping with a non-native mode, and worse causes a normal
9339          * modeset to fail.
9340          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9341          */
9342         pf = 0;
9343         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9344         intel_ring_emit(ring, pf | pipesrc);
9345
9346         intel_mark_page_flip_active(intel_crtc);
9347         __intel_ring_advance(ring);
9348         return 0;
9349 }
9350
9351 static int intel_gen7_queue_flip(struct drm_device *dev,
9352                                  struct drm_crtc *crtc,
9353                                  struct drm_framebuffer *fb,
9354                                  struct drm_i915_gem_object *obj,
9355                                  struct intel_engine_cs *ring,
9356                                  uint32_t flags)
9357 {
9358         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9359         uint32_t plane_bit = 0;
9360         int len, ret;
9361
9362         switch (intel_crtc->plane) {
9363         case PLANE_A:
9364                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9365                 break;
9366         case PLANE_B:
9367                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9368                 break;
9369         case PLANE_C:
9370                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9371                 break;
9372         default:
9373                 WARN_ONCE(1, "unknown plane in flip command\n");
9374                 return -ENODEV;
9375         }
9376
9377         len = 4;
9378         if (ring->id == RCS) {
9379                 len += 6;
9380                 /*
9381                  * On Gen 8, SRM is now taking an extra dword to accommodate
9382                  * 48bits addresses, and we need a NOOP for the batch size to
9383                  * stay even.
9384                  */
9385                 if (IS_GEN8(dev))
9386                         len += 2;
9387         }
9388
9389         /*
9390          * BSpec MI_DISPLAY_FLIP for IVB:
9391          * "The full packet must be contained within the same cache line."
9392          *
9393          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9394          * cacheline, if we ever start emitting more commands before
9395          * the MI_DISPLAY_FLIP we may need to first emit everything else,
9396          * then do the cacheline alignment, and finally emit the
9397          * MI_DISPLAY_FLIP.
9398          */
9399         ret = intel_ring_cacheline_align(ring);
9400         if (ret)
9401                 return ret;
9402
9403         ret = intel_ring_begin(ring, len);
9404         if (ret)
9405                 return ret;
9406
9407         /* Unmask the flip-done completion message. Note that the bspec says that
9408          * we should do this for both the BCS and RCS, and that we must not unmask
9409          * more than one flip event at any time (or ensure that one flip message
9410          * can be sent by waiting for flip-done prior to queueing new flips).
9411          * Experimentation says that BCS works despite DERRMR masking all
9412          * flip-done completion events and that unmasking all planes at once
9413          * for the RCS also doesn't appear to drop events. Setting the DERRMR
9414          * to zero does lead to lockups within MI_DISPLAY_FLIP.
9415          */
9416         if (ring->id == RCS) {
9417                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9418                 intel_ring_emit(ring, DERRMR);
9419                 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9420                                         DERRMR_PIPEB_PRI_FLIP_DONE |
9421                                         DERRMR_PIPEC_PRI_FLIP_DONE));
9422                 if (IS_GEN8(dev))
9423                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9424                                               MI_SRM_LRM_GLOBAL_GTT);
9425                 else
9426                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9427                                               MI_SRM_LRM_GLOBAL_GTT);
9428                 intel_ring_emit(ring, DERRMR);
9429                 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9430                 if (IS_GEN8(dev)) {
9431                         intel_ring_emit(ring, 0);
9432                         intel_ring_emit(ring, MI_NOOP);
9433                 }
9434         }
9435
9436         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9437         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9438         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9439         intel_ring_emit(ring, (MI_NOOP));
9440
9441         intel_mark_page_flip_active(intel_crtc);
9442         __intel_ring_advance(ring);
9443         return 0;
9444 }
9445
9446 static bool use_mmio_flip(struct intel_engine_cs *ring,
9447                           struct drm_i915_gem_object *obj)
9448 {
9449         /*
9450          * This is not being used for older platforms, because
9451          * non-availability of flip done interrupt forces us to use
9452          * CS flips. Older platforms derive flip done using some clever
9453          * tricks involving the flip_pending status bits and vblank irqs.
9454          * So using MMIO flips there would disrupt this mechanism.
9455          */
9456
9457         if (ring == NULL)
9458                 return true;
9459
9460         if (INTEL_INFO(ring->dev)->gen < 5)
9461                 return false;
9462
9463         if (i915.use_mmio_flip < 0)
9464                 return false;
9465         else if (i915.use_mmio_flip > 0)
9466                 return true;
9467         else if (i915.enable_execlists)
9468                 return true;
9469         else
9470                 return ring != i915_gem_request_get_ring(obj->last_read_req);
9471 }
9472
9473 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
9474 {
9475         struct drm_device *dev = intel_crtc->base.dev;
9476         struct drm_i915_private *dev_priv = dev->dev_private;
9477         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
9478         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
9479         struct drm_i915_gem_object *obj = intel_fb->obj;
9480         const enum pipe pipe = intel_crtc->pipe;
9481         u32 ctl, stride;
9482
9483         ctl = I915_READ(PLANE_CTL(pipe, 0));
9484         ctl &= ~PLANE_CTL_TILED_MASK;
9485         if (obj->tiling_mode == I915_TILING_X)
9486                 ctl |= PLANE_CTL_TILED_X;
9487
9488         /*
9489          * The stride is either expressed as a multiple of 64 bytes chunks for
9490          * linear buffers or in number of tiles for tiled buffers.
9491          */
9492         stride = fb->pitches[0] >> 6;
9493         if (obj->tiling_mode == I915_TILING_X)
9494                 stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
9495
9496         /*
9497          * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
9498          * PLANE_SURF updates, the update is then guaranteed to be atomic.
9499          */
9500         I915_WRITE(PLANE_CTL(pipe, 0), ctl);
9501         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
9502
9503         I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
9504         POSTING_READ(PLANE_SURF(pipe, 0));
9505 }
9506
9507 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
9508 {
9509         struct drm_device *dev = intel_crtc->base.dev;
9510         struct drm_i915_private *dev_priv = dev->dev_private;
9511         struct intel_framebuffer *intel_fb =
9512                 to_intel_framebuffer(intel_crtc->base.primary->fb);
9513         struct drm_i915_gem_object *obj = intel_fb->obj;
9514         u32 dspcntr;
9515         u32 reg;
9516
9517         reg = DSPCNTR(intel_crtc->plane);
9518         dspcntr = I915_READ(reg);
9519
9520         if (obj->tiling_mode != I915_TILING_NONE)
9521                 dspcntr |= DISPPLANE_TILED;
9522         else
9523                 dspcntr &= ~DISPPLANE_TILED;
9524
9525         I915_WRITE(reg, dspcntr);
9526
9527         I915_WRITE(DSPSURF(intel_crtc->plane),
9528                    intel_crtc->unpin_work->gtt_offset);
9529         POSTING_READ(DSPSURF(intel_crtc->plane));
9530
9531 }
9532
9533 /*
9534  * XXX: This is the temporary way to update the plane registers until we get
9535  * around to using the usual plane update functions for MMIO flips
9536  */
9537 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
9538 {
9539         struct drm_device *dev = intel_crtc->base.dev;
9540         bool atomic_update;
9541         u32 start_vbl_count;
9542
9543         intel_mark_page_flip_active(intel_crtc);
9544
9545         atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
9546
9547         if (INTEL_INFO(dev)->gen >= 9)
9548                 skl_do_mmio_flip(intel_crtc);
9549         else
9550                 /* use_mmio_flip() retricts MMIO flips to ilk+ */
9551                 ilk_do_mmio_flip(intel_crtc);
9552
9553         if (atomic_update)
9554                 intel_pipe_update_end(intel_crtc, start_vbl_count);
9555 }
9556
9557 static void intel_mmio_flip_work_func(struct work_struct *work)
9558 {
9559         struct intel_crtc *crtc =
9560                 container_of(work, struct intel_crtc, mmio_flip.work);
9561         struct intel_mmio_flip *mmio_flip;
9562
9563         mmio_flip = &crtc->mmio_flip;
9564         if (mmio_flip->req)
9565                 WARN_ON(__i915_wait_request(mmio_flip->req,
9566                                             crtc->reset_counter,
9567                                             false, NULL, NULL) != 0);
9568
9569         intel_do_mmio_flip(crtc);
9570         if (mmio_flip->req) {
9571                 mutex_lock(&crtc->base.dev->struct_mutex);
9572                 i915_gem_request_assign(&mmio_flip->req, NULL);
9573                 mutex_unlock(&crtc->base.dev->struct_mutex);
9574         }
9575 }
9576
9577 static int intel_queue_mmio_flip(struct drm_device *dev,
9578                                  struct drm_crtc *crtc,
9579                                  struct drm_framebuffer *fb,
9580                                  struct drm_i915_gem_object *obj,
9581                                  struct intel_engine_cs *ring,
9582                                  uint32_t flags)
9583 {
9584         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9585
9586         i915_gem_request_assign(&intel_crtc->mmio_flip.req,
9587                                 obj->last_write_req);
9588
9589         schedule_work(&intel_crtc->mmio_flip.work);
9590
9591         return 0;
9592 }
9593
9594 static int intel_gen9_queue_flip(struct drm_device *dev,
9595                                  struct drm_crtc *crtc,
9596                                  struct drm_framebuffer *fb,
9597                                  struct drm_i915_gem_object *obj,
9598                                  struct intel_engine_cs *ring,
9599                                  uint32_t flags)
9600 {
9601         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9602         uint32_t plane = 0, stride;
9603         int ret;
9604
9605         switch(intel_crtc->pipe) {
9606         case PIPE_A:
9607                 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
9608                 break;
9609         case PIPE_B:
9610                 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
9611                 break;
9612         case PIPE_C:
9613                 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
9614                 break;
9615         default:
9616                 WARN_ONCE(1, "unknown plane in flip command\n");
9617                 return -ENODEV;
9618         }
9619
9620         switch (obj->tiling_mode) {
9621         case I915_TILING_NONE:
9622                 stride = fb->pitches[0] >> 6;
9623                 break;
9624         case I915_TILING_X:
9625                 stride = fb->pitches[0] >> 9;
9626                 break;
9627         default:
9628                 WARN_ONCE(1, "unknown tiling in flip command\n");
9629                 return -ENODEV;
9630         }
9631
9632         ret = intel_ring_begin(ring, 10);
9633         if (ret)
9634                 return ret;
9635
9636         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9637         intel_ring_emit(ring, DERRMR);
9638         intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9639                                 DERRMR_PIPEB_PRI_FLIP_DONE |
9640                                 DERRMR_PIPEC_PRI_FLIP_DONE));
9641         intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9642                               MI_SRM_LRM_GLOBAL_GTT);
9643         intel_ring_emit(ring, DERRMR);
9644         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9645         intel_ring_emit(ring, 0);
9646
9647         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
9648         intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
9649         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9650
9651         intel_mark_page_flip_active(intel_crtc);
9652         __intel_ring_advance(ring);
9653
9654         return 0;
9655 }
9656
9657 static int intel_default_queue_flip(struct drm_device *dev,
9658                                     struct drm_crtc *crtc,
9659                                     struct drm_framebuffer *fb,
9660                                     struct drm_i915_gem_object *obj,
9661                                     struct intel_engine_cs *ring,
9662                                     uint32_t flags)
9663 {
9664         return -ENODEV;
9665 }
9666
9667 static bool __intel_pageflip_stall_check(struct drm_device *dev,
9668                                          struct drm_crtc *crtc)
9669 {
9670         struct drm_i915_private *dev_priv = dev->dev_private;
9671         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9672         struct intel_unpin_work *work = intel_crtc->unpin_work;
9673         u32 addr;
9674
9675         if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
9676                 return true;
9677
9678         if (!work->enable_stall_check)
9679                 return false;
9680
9681         if (work->flip_ready_vblank == 0) {
9682                 if (work->flip_queued_req &&
9683                     !i915_gem_request_completed(work->flip_queued_req, true))
9684                         return false;
9685
9686                 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9687         }
9688
9689         if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
9690                 return false;
9691
9692         /* Potential stall - if we see that the flip has happened,
9693          * assume a missed interrupt. */
9694         if (INTEL_INFO(dev)->gen >= 4)
9695                 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
9696         else
9697                 addr = I915_READ(DSPADDR(intel_crtc->plane));
9698
9699         /* There is a potential issue here with a false positive after a flip
9700          * to the same address. We could address this by checking for a
9701          * non-incrementing frame counter.
9702          */
9703         return addr == work->gtt_offset;
9704 }
9705
9706 void intel_check_page_flip(struct drm_device *dev, int pipe)
9707 {
9708         struct drm_i915_private *dev_priv = dev->dev_private;
9709         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9710         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9711
9712         WARN_ON(!in_irq());
9713
9714         if (crtc == NULL)
9715                 return;
9716
9717         spin_lock(&dev->event_lock);
9718         if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9719                 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9720                          intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
9721                 page_flip_completed(intel_crtc);
9722         }
9723         spin_unlock(&dev->event_lock);
9724 }
9725
9726 static int intel_crtc_page_flip(struct drm_crtc *crtc,
9727                                 struct drm_framebuffer *fb,
9728                                 struct drm_pending_vblank_event *event,
9729                                 uint32_t page_flip_flags)
9730 {
9731         struct drm_device *dev = crtc->dev;
9732         struct drm_i915_private *dev_priv = dev->dev_private;
9733         struct drm_framebuffer *old_fb = crtc->primary->fb;
9734         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9735         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9736         struct drm_plane *primary = crtc->primary;
9737         enum pipe pipe = intel_crtc->pipe;
9738         struct intel_unpin_work *work;
9739         struct intel_engine_cs *ring;
9740         int ret;
9741
9742         /*
9743          * drm_mode_page_flip_ioctl() should already catch this, but double
9744          * check to be safe.  In the future we may enable pageflipping from
9745          * a disabled primary plane.
9746          */
9747         if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9748                 return -EBUSY;
9749
9750         /* Can't change pixel format via MI display flips. */
9751         if (fb->pixel_format != crtc->primary->fb->pixel_format)
9752                 return -EINVAL;
9753
9754         /*
9755          * TILEOFF/LINOFF registers can't be changed via MI display flips.
9756          * Note that pitch changes could also affect these register.
9757          */
9758         if (INTEL_INFO(dev)->gen > 3 &&
9759             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9760              fb->pitches[0] != crtc->primary->fb->pitches[0]))
9761                 return -EINVAL;
9762
9763         if (i915_terminally_wedged(&dev_priv->gpu_error))
9764                 goto out_hang;
9765
9766         work = kzalloc(sizeof(*work), GFP_KERNEL);
9767         if (work == NULL)
9768                 return -ENOMEM;
9769
9770         work->event = event;
9771         work->crtc = crtc;
9772         work->old_fb_obj = intel_fb_obj(old_fb);
9773         INIT_WORK(&work->work, intel_unpin_work_fn);
9774
9775         ret = drm_crtc_vblank_get(crtc);
9776         if (ret)
9777                 goto free_work;
9778
9779         /* We borrow the event spin lock for protecting unpin_work */
9780         spin_lock_irq(&dev->event_lock);
9781         if (intel_crtc->unpin_work) {
9782                 /* Before declaring the flip queue wedged, check if
9783                  * the hardware completed the operation behind our backs.
9784                  */
9785                 if (__intel_pageflip_stall_check(dev, crtc)) {
9786                         DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9787                         page_flip_completed(intel_crtc);
9788                 } else {
9789                         DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9790                         spin_unlock_irq(&dev->event_lock);
9791
9792                         drm_crtc_vblank_put(crtc);
9793                         kfree(work);
9794                         return -EBUSY;
9795                 }
9796         }
9797         intel_crtc->unpin_work = work;
9798         spin_unlock_irq(&dev->event_lock);
9799
9800         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9801                 flush_workqueue(dev_priv->wq);
9802
9803         ret = i915_mutex_lock_interruptible(dev);
9804         if (ret)
9805                 goto cleanup;
9806
9807         /* Reference the objects for the scheduled work. */
9808         drm_gem_object_reference(&work->old_fb_obj->base);
9809         drm_gem_object_reference(&obj->base);
9810
9811         crtc->primary->fb = fb;
9812
9813         work->pending_flip_obj = obj;
9814
9815         atomic_inc(&intel_crtc->unpin_work_count);
9816         intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9817
9818         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9819                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9820
9821         if (IS_VALLEYVIEW(dev)) {
9822                 ring = &dev_priv->ring[BCS];
9823                 if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9824                         /* vlv: DISPLAY_FLIP fails to change tiling */
9825                         ring = NULL;
9826         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
9827                 ring = &dev_priv->ring[BCS];
9828         } else if (INTEL_INFO(dev)->gen >= 7) {
9829                 ring = i915_gem_request_get_ring(obj->last_read_req);
9830                 if (ring == NULL || ring->id != RCS)
9831                         ring = &dev_priv->ring[BCS];
9832         } else {
9833                 ring = &dev_priv->ring[RCS];
9834         }
9835
9836         ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
9837         if (ret)
9838                 goto cleanup_pending;
9839
9840         work->gtt_offset =
9841                 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9842
9843         if (use_mmio_flip(ring, obj)) {
9844                 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9845                                             page_flip_flags);
9846                 if (ret)
9847                         goto cleanup_unpin;
9848
9849                 i915_gem_request_assign(&work->flip_queued_req,
9850                                         obj->last_write_req);
9851         } else {
9852                 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9853                                                    page_flip_flags);
9854                 if (ret)
9855                         goto cleanup_unpin;
9856
9857                 i915_gem_request_assign(&work->flip_queued_req,
9858                                         intel_ring_get_request(ring));
9859         }
9860
9861         work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9862         work->enable_stall_check = true;
9863
9864         i915_gem_track_fb(work->old_fb_obj, obj,
9865                           INTEL_FRONTBUFFER_PRIMARY(pipe));
9866
9867         intel_fbc_disable(dev);
9868         intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9869         mutex_unlock(&dev->struct_mutex);
9870
9871         trace_i915_flip_request(intel_crtc->plane, obj);
9872
9873         return 0;
9874
9875 cleanup_unpin:
9876         intel_unpin_fb_obj(obj);
9877 cleanup_pending:
9878         atomic_dec(&intel_crtc->unpin_work_count);
9879         crtc->primary->fb = old_fb;
9880         drm_gem_object_unreference(&work->old_fb_obj->base);
9881         drm_gem_object_unreference(&obj->base);
9882         mutex_unlock(&dev->struct_mutex);
9883
9884 cleanup:
9885         spin_lock_irq(&dev->event_lock);
9886         intel_crtc->unpin_work = NULL;
9887         spin_unlock_irq(&dev->event_lock);
9888
9889         drm_crtc_vblank_put(crtc);
9890 free_work:
9891         kfree(work);
9892
9893         if (ret == -EIO) {
9894 out_hang:
9895                 ret = intel_plane_restore(primary);
9896                 if (ret == 0 && event) {
9897                         spin_lock_irq(&dev->event_lock);
9898                         drm_send_vblank_event(dev, pipe, event);
9899                         spin_unlock_irq(&dev->event_lock);
9900                 }
9901         }
9902         return ret;
9903 }
9904
9905 static struct drm_crtc_helper_funcs intel_helper_funcs = {
9906         .mode_set_base_atomic = intel_pipe_set_base_atomic,
9907         .load_lut = intel_crtc_load_lut,
9908         .atomic_begin = intel_begin_crtc_commit,
9909         .atomic_flush = intel_finish_crtc_commit,
9910 };
9911
9912 /**
9913  * intel_modeset_update_staged_output_state
9914  *
9915  * Updates the staged output configuration state, e.g. after we've read out the
9916  * current hw state.
9917  */
9918 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9919 {
9920         struct intel_crtc *crtc;
9921         struct intel_encoder *encoder;
9922         struct intel_connector *connector;
9923
9924         list_for_each_entry(connector, &dev->mode_config.connector_list,
9925                             base.head) {
9926                 connector->new_encoder =
9927                         to_intel_encoder(connector->base.encoder);
9928         }
9929
9930         for_each_intel_encoder(dev, encoder) {
9931                 encoder->new_crtc =
9932                         to_intel_crtc(encoder->base.crtc);
9933         }
9934
9935         for_each_intel_crtc(dev, crtc) {
9936                 crtc->new_enabled = crtc->base.enabled;
9937
9938                 if (crtc->new_enabled)
9939                         crtc->new_config = crtc->config;
9940                 else
9941                         crtc->new_config = NULL;
9942         }
9943 }
9944
9945 /**
9946  * intel_modeset_commit_output_state
9947  *
9948  * This function copies the stage display pipe configuration to the real one.
9949  */
9950 static void intel_modeset_commit_output_state(struct drm_device *dev)
9951 {
9952         struct intel_crtc *crtc;
9953         struct intel_encoder *encoder;
9954         struct intel_connector *connector;
9955
9956         list_for_each_entry(connector, &dev->mode_config.connector_list,
9957                             base.head) {
9958                 connector->base.encoder = &connector->new_encoder->base;
9959         }
9960
9961         for_each_intel_encoder(dev, encoder) {
9962                 encoder->base.crtc = &encoder->new_crtc->base;
9963         }
9964
9965         for_each_intel_crtc(dev, crtc) {
9966                 crtc->base.enabled = crtc->new_enabled;
9967         }
9968 }
9969
9970 static void
9971 connected_sink_compute_bpp(struct intel_connector *connector,
9972                            struct intel_crtc_state *pipe_config)
9973 {
9974         int bpp = pipe_config->pipe_bpp;
9975
9976         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9977                 connector->base.base.id,
9978                 connector->base.name);
9979
9980         /* Don't use an invalid EDID bpc value */
9981         if (connector->base.display_info.bpc &&
9982             connector->base.display_info.bpc * 3 < bpp) {
9983                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9984                               bpp, connector->base.display_info.bpc*3);
9985                 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9986         }
9987
9988         /* Clamp bpp to 8 on screens without EDID 1.4 */
9989         if (connector->base.display_info.bpc == 0 && bpp > 24) {
9990                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9991                               bpp);
9992                 pipe_config->pipe_bpp = 24;
9993         }
9994 }
9995
9996 static int
9997 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9998                           struct drm_framebuffer *fb,
9999                           struct intel_crtc_state *pipe_config)
10000 {
10001         struct drm_device *dev = crtc->base.dev;
10002         struct intel_connector *connector;
10003         int bpp;
10004
10005         switch (fb->pixel_format) {
10006         case DRM_FORMAT_C8:
10007                 bpp = 8*3; /* since we go through a colormap */
10008                 break;
10009         case DRM_FORMAT_XRGB1555:
10010         case DRM_FORMAT_ARGB1555:
10011                 /* checked in intel_framebuffer_init already */
10012                 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
10013                         return -EINVAL;
10014         case DRM_FORMAT_RGB565:
10015                 bpp = 6*3; /* min is 18bpp */
10016                 break;
10017         case DRM_FORMAT_XBGR8888:
10018         case DRM_FORMAT_ABGR8888:
10019                 /* checked in intel_framebuffer_init already */
10020                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
10021                         return -EINVAL;
10022         case DRM_FORMAT_XRGB8888:
10023         case DRM_FORMAT_ARGB8888:
10024                 bpp = 8*3;
10025                 break;
10026         case DRM_FORMAT_XRGB2101010:
10027         case DRM_FORMAT_ARGB2101010:
10028         case DRM_FORMAT_XBGR2101010:
10029         case DRM_FORMAT_ABGR2101010:
10030                 /* checked in intel_framebuffer_init already */
10031                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
10032                         return -EINVAL;
10033                 bpp = 10*3;
10034                 break;
10035         /* TODO: gen4+ supports 16 bpc floating point, too. */
10036         default:
10037                 DRM_DEBUG_KMS("unsupported depth\n");
10038                 return -EINVAL;
10039         }
10040
10041         pipe_config->pipe_bpp = bpp;
10042
10043         /* Clamp display bpp to EDID value */
10044         list_for_each_entry(connector, &dev->mode_config.connector_list,
10045                             base.head) {
10046                 if (!connector->new_encoder ||
10047                     connector->new_encoder->new_crtc != crtc)
10048                         continue;
10049
10050                 connected_sink_compute_bpp(connector, pipe_config);
10051         }
10052
10053         return bpp;
10054 }
10055
10056 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10057 {
10058         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10059                         "type: 0x%x flags: 0x%x\n",
10060                 mode->crtc_clock,
10061                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10062                 mode->crtc_hsync_end, mode->crtc_htotal,
10063                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10064                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10065 }
10066
10067 static void intel_dump_pipe_config(struct intel_crtc *crtc,
10068                                    struct intel_crtc_state *pipe_config,
10069                                    const char *context)
10070 {
10071         DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
10072                       context, pipe_name(crtc->pipe));
10073
10074         DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
10075         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
10076                       pipe_config->pipe_bpp, pipe_config->dither);
10077         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10078                       pipe_config->has_pch_encoder,
10079                       pipe_config->fdi_lanes,
10080                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
10081                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
10082                       pipe_config->fdi_m_n.tu);
10083         DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10084                       pipe_config->has_dp_encoder,
10085                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
10086                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
10087                       pipe_config->dp_m_n.tu);
10088
10089         DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
10090                       pipe_config->has_dp_encoder,
10091                       pipe_config->dp_m2_n2.gmch_m,
10092                       pipe_config->dp_m2_n2.gmch_n,
10093                       pipe_config->dp_m2_n2.link_m,
10094                       pipe_config->dp_m2_n2.link_n,
10095                       pipe_config->dp_m2_n2.tu);
10096
10097         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10098                       pipe_config->has_audio,
10099                       pipe_config->has_infoframe);
10100
10101         DRM_DEBUG_KMS("requested mode:\n");
10102         drm_mode_debug_printmodeline(&pipe_config->base.mode);
10103         DRM_DEBUG_KMS("adjusted mode:\n");
10104         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10105         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
10106         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
10107         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
10108                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
10109         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10110                       pipe_config->gmch_pfit.control,
10111                       pipe_config->gmch_pfit.pgm_ratios,
10112                       pipe_config->gmch_pfit.lvds_border_bits);
10113         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10114                       pipe_config->pch_pfit.pos,
10115                       pipe_config->pch_pfit.size,
10116                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
10117         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
10118         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
10119 }
10120
10121 static bool encoders_cloneable(const struct intel_encoder *a,
10122                                const struct intel_encoder *b)
10123 {
10124         /* masks could be asymmetric, so check both ways */
10125         return a == b || (a->cloneable & (1 << b->type) &&
10126                           b->cloneable & (1 << a->type));
10127 }
10128
10129 static bool check_single_encoder_cloning(struct intel_crtc *crtc,
10130                                          struct intel_encoder *encoder)
10131 {
10132         struct drm_device *dev = crtc->base.dev;
10133         struct intel_encoder *source_encoder;
10134
10135         for_each_intel_encoder(dev, source_encoder) {
10136                 if (source_encoder->new_crtc != crtc)
10137                         continue;
10138
10139                 if (!encoders_cloneable(encoder, source_encoder))
10140                         return false;
10141         }
10142
10143         return true;
10144 }
10145
10146 static bool check_encoder_cloning(struct intel_crtc *crtc)
10147 {
10148         struct drm_device *dev = crtc->base.dev;
10149         struct intel_encoder *encoder;
10150
10151         for_each_intel_encoder(dev, encoder) {
10152                 if (encoder->new_crtc != crtc)
10153                         continue;
10154
10155                 if (!check_single_encoder_cloning(crtc, encoder))
10156                         return false;
10157         }
10158
10159         return true;
10160 }
10161
10162 static bool check_digital_port_conflicts(struct drm_device *dev)
10163 {
10164         struct intel_connector *connector;
10165         unsigned int used_ports = 0;
10166
10167         /*
10168          * Walk the connector list instead of the encoder
10169          * list to detect the problem on ddi platforms
10170          * where there's just one encoder per digital port.
10171          */
10172         list_for_each_entry(connector,
10173                             &dev->mode_config.connector_list, base.head) {
10174                 struct intel_encoder *encoder = connector->new_encoder;
10175
10176                 if (!encoder)
10177                         continue;
10178
10179                 WARN_ON(!encoder->new_crtc);
10180
10181                 switch (encoder->type) {
10182                         unsigned int port_mask;
10183                 case INTEL_OUTPUT_UNKNOWN:
10184                         if (WARN_ON(!HAS_DDI(dev)))
10185                                 break;
10186                 case INTEL_OUTPUT_DISPLAYPORT:
10187                 case INTEL_OUTPUT_HDMI:
10188                 case INTEL_OUTPUT_EDP:
10189                         port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
10190
10191                         /* the same port mustn't appear more than once */
10192                         if (used_ports & port_mask)
10193                                 return false;
10194
10195                         used_ports |= port_mask;
10196                 default:
10197                         break;
10198                 }
10199         }
10200
10201         return true;
10202 }
10203
10204 static struct intel_crtc_state *
10205 intel_modeset_pipe_config(struct drm_crtc *crtc,
10206                           struct drm_framebuffer *fb,
10207                           struct drm_display_mode *mode)
10208 {
10209         struct drm_device *dev = crtc->dev;
10210         struct intel_encoder *encoder;
10211         struct intel_crtc_state *pipe_config;
10212         int plane_bpp, ret = -EINVAL;
10213         bool retry = true;
10214
10215         if (!check_encoder_cloning(to_intel_crtc(crtc))) {
10216                 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
10217                 return ERR_PTR(-EINVAL);
10218         }
10219
10220         if (!check_digital_port_conflicts(dev)) {
10221                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
10222                 return ERR_PTR(-EINVAL);
10223         }
10224
10225         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10226         if (!pipe_config)
10227                 return ERR_PTR(-ENOMEM);
10228
10229         drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
10230         drm_mode_copy(&pipe_config->base.mode, mode);
10231
10232         pipe_config->cpu_transcoder =
10233                 (enum transcoder) to_intel_crtc(crtc)->pipe;
10234         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
10235
10236         /*
10237          * Sanitize sync polarity flags based on requested ones. If neither
10238          * positive or negative polarity is requested, treat this as meaning
10239          * negative polarity.
10240          */
10241         if (!(pipe_config->base.adjusted_mode.flags &
10242               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
10243                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
10244
10245         if (!(pipe_config->base.adjusted_mode.flags &
10246               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
10247                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10248
10249         /* Compute a starting value for pipe_config->pipe_bpp taking the source
10250          * plane pixel format and any sink constraints into account. Returns the
10251          * source plane bpp so that dithering can be selected on mismatches
10252          * after encoders and crtc also have had their say. */
10253         plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10254                                               fb, pipe_config);
10255         if (plane_bpp < 0)
10256                 goto fail;
10257
10258         /*
10259          * Determine the real pipe dimensions. Note that stereo modes can
10260          * increase the actual pipe size due to the frame doubling and
10261          * insertion of additional space for blanks between the frame. This
10262          * is stored in the crtc timings. We use the requested mode to do this
10263          * computation to clearly distinguish it from the adjusted mode, which
10264          * can be changed by the connectors in the below retry loop.
10265          */
10266         drm_crtc_get_hv_timing(&pipe_config->base.mode,
10267                                &pipe_config->pipe_src_w,
10268                                &pipe_config->pipe_src_h);
10269
10270 encoder_retry:
10271         /* Ensure the port clock defaults are reset when retrying. */
10272         pipe_config->port_clock = 0;
10273         pipe_config->pixel_multiplier = 1;
10274
10275         /* Fill in default crtc timings, allow encoders to overwrite them. */
10276         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
10277                               CRTC_STEREO_DOUBLE);
10278
10279         /* Pass our mode to the connectors and the CRTC to give them a chance to
10280          * adjust it according to limitations or connector properties, and also
10281          * a chance to reject the mode entirely.
10282          */
10283         for_each_intel_encoder(dev, encoder) {
10284
10285                 if (&encoder->new_crtc->base != crtc)
10286                         continue;
10287
10288                 if (!(encoder->compute_config(encoder, pipe_config))) {
10289                         DRM_DEBUG_KMS("Encoder config failure\n");
10290                         goto fail;
10291                 }
10292         }
10293
10294         /* Set default port clock if not overwritten by the encoder. Needs to be
10295          * done afterwards in case the encoder adjusts the mode. */
10296         if (!pipe_config->port_clock)
10297                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
10298                         * pipe_config->pixel_multiplier;
10299
10300         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10301         if (ret < 0) {
10302                 DRM_DEBUG_KMS("CRTC fixup failed\n");
10303                 goto fail;
10304         }
10305
10306         if (ret == RETRY) {
10307                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
10308                         ret = -EINVAL;
10309                         goto fail;
10310                 }
10311
10312                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10313                 retry = false;
10314                 goto encoder_retry;
10315         }
10316
10317         pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10318         DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10319                       plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10320
10321         return pipe_config;
10322 fail:
10323         kfree(pipe_config);
10324         return ERR_PTR(ret);
10325 }
10326
10327 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
10328  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10329 static void
10330 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10331                              unsigned *prepare_pipes, unsigned *disable_pipes)
10332 {
10333         struct intel_crtc *intel_crtc;
10334         struct drm_device *dev = crtc->dev;
10335         struct intel_encoder *encoder;
10336         struct intel_connector *connector;
10337         struct drm_crtc *tmp_crtc;
10338
10339         *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10340
10341         /* Check which crtcs have changed outputs connected to them, these need
10342          * to be part of the prepare_pipes mask. We don't (yet) support global
10343          * modeset across multiple crtcs, so modeset_pipes will only have one
10344          * bit set at most. */
10345         list_for_each_entry(connector, &dev->mode_config.connector_list,
10346                             base.head) {
10347                 if (connector->base.encoder == &connector->new_encoder->base)
10348                         continue;
10349
10350                 if (connector->base.encoder) {
10351                         tmp_crtc = connector->base.encoder->crtc;
10352
10353                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10354                 }
10355
10356                 if (connector->new_encoder)
10357                         *prepare_pipes |=
10358                                 1 << connector->new_encoder->new_crtc->pipe;
10359         }
10360
10361         for_each_intel_encoder(dev, encoder) {
10362                 if (encoder->base.crtc == &encoder->new_crtc->base)
10363                         continue;
10364
10365                 if (encoder->base.crtc) {
10366                         tmp_crtc = encoder->base.crtc;
10367
10368                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10369                 }
10370
10371                 if (encoder->new_crtc)
10372                         *prepare_pipes |= 1 << encoder->new_crtc->pipe;
10373         }
10374
10375         /* Check for pipes that will be enabled/disabled ... */
10376         for_each_intel_crtc(dev, intel_crtc) {
10377                 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
10378                         continue;
10379
10380                 if (!intel_crtc->new_enabled)
10381                         *disable_pipes |= 1 << intel_crtc->pipe;
10382                 else
10383                         *prepare_pipes |= 1 << intel_crtc->pipe;
10384         }
10385
10386
10387         /* set_mode is also used to update properties on life display pipes. */
10388         intel_crtc = to_intel_crtc(crtc);
10389         if (intel_crtc->new_enabled)
10390                 *prepare_pipes |= 1 << intel_crtc->pipe;
10391
10392         /*
10393          * For simplicity do a full modeset on any pipe where the output routing
10394          * changed. We could be more clever, but that would require us to be
10395          * more careful with calling the relevant encoder->mode_set functions.
10396          */
10397         if (*prepare_pipes)
10398                 *modeset_pipes = *prepare_pipes;
10399
10400         /* ... and mask these out. */
10401         *modeset_pipes &= ~(*disable_pipes);
10402         *prepare_pipes &= ~(*disable_pipes);
10403
10404         /*
10405          * HACK: We don't (yet) fully support global modesets. intel_set_config
10406          * obies this rule, but the modeset restore mode of
10407          * intel_modeset_setup_hw_state does not.
10408          */
10409         *modeset_pipes &= 1 << intel_crtc->pipe;
10410         *prepare_pipes &= 1 << intel_crtc->pipe;
10411
10412         DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10413                       *modeset_pipes, *prepare_pipes, *disable_pipes);
10414 }
10415
10416 static bool intel_crtc_in_use(struct drm_crtc *crtc)
10417 {
10418         struct drm_encoder *encoder;
10419         struct drm_device *dev = crtc->dev;
10420
10421         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10422                 if (encoder->crtc == crtc)
10423                         return true;
10424
10425         return false;
10426 }
10427
10428 static void
10429 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10430 {
10431         struct drm_i915_private *dev_priv = dev->dev_private;
10432         struct intel_encoder *intel_encoder;
10433         struct intel_crtc *intel_crtc;
10434         struct drm_connector *connector;
10435
10436         intel_shared_dpll_commit(dev_priv);
10437
10438         for_each_intel_encoder(dev, intel_encoder) {
10439                 if (!intel_encoder->base.crtc)
10440                         continue;
10441
10442                 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10443
10444                 if (prepare_pipes & (1 << intel_crtc->pipe))
10445                         intel_encoder->connectors_active = false;
10446         }
10447
10448         intel_modeset_commit_output_state(dev);
10449
10450         /* Double check state. */
10451         for_each_intel_crtc(dev, intel_crtc) {
10452                 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10453                 WARN_ON(intel_crtc->new_config &&
10454                         intel_crtc->new_config != intel_crtc->config);
10455                 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10456         }
10457
10458         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10459                 if (!connector->encoder || !connector->encoder->crtc)
10460                         continue;
10461
10462                 intel_crtc = to_intel_crtc(connector->encoder->crtc);
10463
10464                 if (prepare_pipes & (1 << intel_crtc->pipe)) {
10465                         struct drm_property *dpms_property =
10466                                 dev->mode_config.dpms_property;
10467
10468                         connector->dpms = DRM_MODE_DPMS_ON;
10469                         drm_object_property_set_value(&connector->base,
10470                                                          dpms_property,
10471                                                          DRM_MODE_DPMS_ON);
10472
10473                         intel_encoder = to_intel_encoder(connector->encoder);
10474                         intel_encoder->connectors_active = true;
10475                 }
10476         }
10477
10478 }
10479
10480 static bool intel_fuzzy_clock_check(int clock1, int clock2)
10481 {
10482         int diff;
10483
10484         if (clock1 == clock2)
10485                 return true;
10486
10487         if (!clock1 || !clock2)
10488                 return false;
10489
10490         diff = abs(clock1 - clock2);
10491
10492         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10493                 return true;
10494
10495         return false;
10496 }
10497
10498 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10499         list_for_each_entry((intel_crtc), \
10500                             &(dev)->mode_config.crtc_list, \
10501                             base.head) \
10502                 if (mask & (1 <<(intel_crtc)->pipe))
10503
10504 static bool
10505 intel_pipe_config_compare(struct drm_device *dev,
10506                           struct intel_crtc_state *current_config,
10507                           struct intel_crtc_state *pipe_config)
10508 {
10509 #define PIPE_CONF_CHECK_X(name) \
10510         if (current_config->name != pipe_config->name) { \
10511                 DRM_ERROR("mismatch in " #name " " \
10512                           "(expected 0x%08x, found 0x%08x)\n", \
10513                           current_config->name, \
10514                           pipe_config->name); \
10515                 return false; \
10516         }
10517
10518 #define PIPE_CONF_CHECK_I(name) \
10519         if (current_config->name != pipe_config->name) { \
10520                 DRM_ERROR("mismatch in " #name " " \
10521                           "(expected %i, found %i)\n", \
10522                           current_config->name, \
10523                           pipe_config->name); \
10524                 return false; \
10525         }
10526
10527 /* This is required for BDW+ where there is only one set of registers for
10528  * switching between high and low RR.
10529  * This macro can be used whenever a comparison has to be made between one
10530  * hw state and multiple sw state variables.
10531  */
10532 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10533         if ((current_config->name != pipe_config->name) && \
10534                 (current_config->alt_name != pipe_config->name)) { \
10535                         DRM_ERROR("mismatch in " #name " " \
10536                                   "(expected %i or %i, found %i)\n", \
10537                                   current_config->name, \
10538                                   current_config->alt_name, \
10539                                   pipe_config->name); \
10540                         return false; \
10541         }
10542
10543 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
10544         if ((current_config->name ^ pipe_config->name) & (mask)) { \
10545                 DRM_ERROR("mismatch in " #name "(" #mask ") "      \
10546                           "(expected %i, found %i)\n", \
10547                           current_config->name & (mask), \
10548                           pipe_config->name & (mask)); \
10549                 return false; \
10550         }
10551
10552 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10553         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10554                 DRM_ERROR("mismatch in " #name " " \
10555                           "(expected %i, found %i)\n", \
10556                           current_config->name, \
10557                           pipe_config->name); \
10558                 return false; \
10559         }
10560
10561 #define PIPE_CONF_QUIRK(quirk)  \
10562         ((current_config->quirks | pipe_config->quirks) & (quirk))
10563
10564         PIPE_CONF_CHECK_I(cpu_transcoder);
10565
10566         PIPE_CONF_CHECK_I(has_pch_encoder);
10567         PIPE_CONF_CHECK_I(fdi_lanes);
10568         PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10569         PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10570         PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10571         PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10572         PIPE_CONF_CHECK_I(fdi_m_n.tu);
10573
10574         PIPE_CONF_CHECK_I(has_dp_encoder);
10575
10576         if (INTEL_INFO(dev)->gen < 8) {
10577                 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10578                 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10579                 PIPE_CONF_CHECK_I(dp_m_n.link_m);
10580                 PIPE_CONF_CHECK_I(dp_m_n.link_n);
10581                 PIPE_CONF_CHECK_I(dp_m_n.tu);
10582
10583                 if (current_config->has_drrs) {
10584                         PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
10585                         PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
10586                         PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
10587                         PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
10588                         PIPE_CONF_CHECK_I(dp_m2_n2.tu);
10589                 }
10590         } else {
10591                 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
10592                 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
10593                 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
10594                 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
10595                 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
10596         }
10597
10598         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
10599         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
10600         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
10601         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
10602         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
10603         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
10604
10605         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
10606         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
10607         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
10608         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
10609         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
10610         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
10611
10612         PIPE_CONF_CHECK_I(pixel_multiplier);
10613         PIPE_CONF_CHECK_I(has_hdmi_sink);
10614         if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10615             IS_VALLEYVIEW(dev))
10616                 PIPE_CONF_CHECK_I(limited_color_range);
10617         PIPE_CONF_CHECK_I(has_infoframe);
10618
10619         PIPE_CONF_CHECK_I(has_audio);
10620
10621         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
10622                               DRM_MODE_FLAG_INTERLACE);
10623
10624         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10625                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
10626                                       DRM_MODE_FLAG_PHSYNC);
10627                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
10628                                       DRM_MODE_FLAG_NHSYNC);
10629                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
10630                                       DRM_MODE_FLAG_PVSYNC);
10631                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
10632                                       DRM_MODE_FLAG_NVSYNC);
10633         }
10634
10635         PIPE_CONF_CHECK_I(pipe_src_w);
10636         PIPE_CONF_CHECK_I(pipe_src_h);
10637
10638         /*
10639          * FIXME: BIOS likes to set up a cloned config with lvds+external
10640          * screen. Since we don't yet re-compute the pipe config when moving
10641          * just the lvds port away to another pipe the sw tracking won't match.
10642          *
10643          * Proper atomic modesets with recomputed global state will fix this.
10644          * Until then just don't check gmch state for inherited modes.
10645          */
10646         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10647                 PIPE_CONF_CHECK_I(gmch_pfit.control);
10648                 /* pfit ratios are autocomputed by the hw on gen4+ */
10649                 if (INTEL_INFO(dev)->gen < 4)
10650                         PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10651                 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10652         }
10653
10654         PIPE_CONF_CHECK_I(pch_pfit.enabled);
10655         if (current_config->pch_pfit.enabled) {
10656                 PIPE_CONF_CHECK_I(pch_pfit.pos);
10657                 PIPE_CONF_CHECK_I(pch_pfit.size);
10658         }
10659
10660         /* BDW+ don't expose a synchronous way to read the state */
10661         if (IS_HASWELL(dev))
10662                 PIPE_CONF_CHECK_I(ips_enabled);
10663
10664         PIPE_CONF_CHECK_I(double_wide);
10665
10666         PIPE_CONF_CHECK_X(ddi_pll_sel);
10667
10668         PIPE_CONF_CHECK_I(shared_dpll);
10669         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10670         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10671         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10672         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10673         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10674         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10675         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10676         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10677
10678         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10679                 PIPE_CONF_CHECK_I(pipe_bpp);
10680
10681         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
10682         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10683
10684 #undef PIPE_CONF_CHECK_X
10685 #undef PIPE_CONF_CHECK_I
10686 #undef PIPE_CONF_CHECK_I_ALT
10687 #undef PIPE_CONF_CHECK_FLAGS
10688 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
10689 #undef PIPE_CONF_QUIRK
10690
10691         return true;
10692 }
10693
10694 static void check_wm_state(struct drm_device *dev)
10695 {
10696         struct drm_i915_private *dev_priv = dev->dev_private;
10697         struct skl_ddb_allocation hw_ddb, *sw_ddb;
10698         struct intel_crtc *intel_crtc;
10699         int plane;
10700
10701         if (INTEL_INFO(dev)->gen < 9)
10702                 return;
10703
10704         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
10705         sw_ddb = &dev_priv->wm.skl_hw.ddb;
10706
10707         for_each_intel_crtc(dev, intel_crtc) {
10708                 struct skl_ddb_entry *hw_entry, *sw_entry;
10709                 const enum pipe pipe = intel_crtc->pipe;
10710
10711                 if (!intel_crtc->active)
10712                         continue;
10713
10714                 /* planes */
10715                 for_each_plane(pipe, plane) {
10716                         hw_entry = &hw_ddb.plane[pipe][plane];
10717                         sw_entry = &sw_ddb->plane[pipe][plane];
10718
10719                         if (skl_ddb_entry_equal(hw_entry, sw_entry))
10720                                 continue;
10721
10722                         DRM_ERROR("mismatch in DDB state pipe %c plane %d "
10723                                   "(expected (%u,%u), found (%u,%u))\n",
10724                                   pipe_name(pipe), plane + 1,
10725                                   sw_entry->start, sw_entry->end,
10726                                   hw_entry->start, hw_entry->end);
10727                 }
10728
10729                 /* cursor */
10730                 hw_entry = &hw_ddb.cursor[pipe];
10731                 sw_entry = &sw_ddb->cursor[pipe];
10732
10733                 if (skl_ddb_entry_equal(hw_entry, sw_entry))
10734                         continue;
10735
10736                 DRM_ERROR("mismatch in DDB state pipe %c cursor "
10737                           "(expected (%u,%u), found (%u,%u))\n",
10738                           pipe_name(pipe),
10739                           sw_entry->start, sw_entry->end,
10740                           hw_entry->start, hw_entry->end);
10741         }
10742 }
10743
10744 static void
10745 check_connector_state(struct drm_device *dev)
10746 {
10747         struct intel_connector *connector;
10748
10749         list_for_each_entry(connector, &dev->mode_config.connector_list,
10750                             base.head) {
10751                 /* This also checks the encoder/connector hw state with the
10752                  * ->get_hw_state callbacks. */
10753                 intel_connector_check_state(connector);
10754
10755                 I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
10756                      "connector's staged encoder doesn't match current encoder\n");
10757         }
10758 }
10759
10760 static void
10761 check_encoder_state(struct drm_device *dev)
10762 {
10763         struct intel_encoder *encoder;
10764         struct intel_connector *connector;
10765
10766         for_each_intel_encoder(dev, encoder) {
10767                 bool enabled = false;
10768                 bool active = false;
10769                 enum pipe pipe, tracked_pipe;
10770
10771                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10772                               encoder->base.base.id,
10773                               encoder->base.name);
10774
10775                 I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
10776                      "encoder's stage crtc doesn't match current crtc\n");
10777                 I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
10778                      "encoder's active_connectors set, but no crtc\n");
10779
10780                 list_for_each_entry(connector, &dev->mode_config.connector_list,
10781                                     base.head) {
10782                         if (connector->base.encoder != &encoder->base)
10783                                 continue;
10784                         enabled = true;
10785                         if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10786                                 active = true;
10787                 }
10788                 /*
10789                  * for MST connectors if we unplug the connector is gone
10790                  * away but the encoder is still connected to a crtc
10791                  * until a modeset happens in response to the hotplug.
10792                  */
10793                 if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10794                         continue;
10795
10796                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
10797                      "encoder's enabled state mismatch "
10798                      "(expected %i, found %i)\n",
10799                      !!encoder->base.crtc, enabled);
10800                 I915_STATE_WARN(active && !encoder->base.crtc,
10801                      "active encoder with no crtc\n");
10802
10803                 I915_STATE_WARN(encoder->connectors_active != active,
10804                      "encoder's computed active state doesn't match tracked active state "
10805                      "(expected %i, found %i)\n", active, encoder->connectors_active);
10806
10807                 active = encoder->get_hw_state(encoder, &pipe);
10808                 I915_STATE_WARN(active != encoder->connectors_active,
10809                      "encoder's hw state doesn't match sw tracking "
10810                      "(expected %i, found %i)\n",
10811                      encoder->connectors_active, active);
10812
10813                 if (!encoder->base.crtc)
10814                         continue;
10815
10816                 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10817                 I915_STATE_WARN(active && pipe != tracked_pipe,
10818                      "active encoder's pipe doesn't match"
10819                      "(expected %i, found %i)\n",
10820                      tracked_pipe, pipe);
10821
10822         }
10823 }
10824
10825 static void
10826 check_crtc_state(struct drm_device *dev)
10827 {
10828         struct drm_i915_private *dev_priv = dev->dev_private;
10829         struct intel_crtc *crtc;
10830         struct intel_encoder *encoder;
10831         struct intel_crtc_state pipe_config;
10832
10833         for_each_intel_crtc(dev, crtc) {
10834                 bool enabled = false;
10835                 bool active = false;
10836
10837                 memset(&pipe_config, 0, sizeof(pipe_config));
10838
10839                 DRM_DEBUG_KMS("[CRTC:%d]\n",
10840                               crtc->base.base.id);
10841
10842                 I915_STATE_WARN(crtc->active && !crtc->base.enabled,
10843                      "active crtc, but not enabled in sw tracking\n");
10844
10845                 for_each_intel_encoder(dev, encoder) {
10846                         if (encoder->base.crtc != &crtc->base)
10847                                 continue;
10848                         enabled = true;
10849                         if (encoder->connectors_active)
10850                                 active = true;
10851                 }
10852
10853                 I915_STATE_WARN(active != crtc->active,
10854                      "crtc's computed active state doesn't match tracked active state "
10855                      "(expected %i, found %i)\n", active, crtc->active);
10856                 I915_STATE_WARN(enabled != crtc->base.enabled,
10857                      "crtc's computed enabled state doesn't match tracked enabled state "
10858                      "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10859
10860                 active = dev_priv->display.get_pipe_config(crtc,
10861                                                            &pipe_config);
10862
10863                 /* hw state is inconsistent with the pipe quirk */
10864                 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10865                     (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
10866                         active = crtc->active;
10867
10868                 for_each_intel_encoder(dev, encoder) {
10869                         enum pipe pipe;
10870                         if (encoder->base.crtc != &crtc->base)
10871                                 continue;
10872                         if (encoder->get_hw_state(encoder, &pipe))
10873                                 encoder->get_config(encoder, &pipe_config);
10874                 }
10875
10876                 I915_STATE_WARN(crtc->active != active,
10877                      "crtc active state doesn't match with hw state "
10878                      "(expected %i, found %i)\n", crtc->active, active);
10879
10880                 if (active &&
10881                     !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
10882                         I915_STATE_WARN(1, "pipe state doesn't match!\n");
10883                         intel_dump_pipe_config(crtc, &pipe_config,
10884                                                "[hw state]");
10885                         intel_dump_pipe_config(crtc, crtc->config,
10886                                                "[sw state]");
10887                 }
10888         }
10889 }
10890
10891 static void
10892 check_shared_dpll_state(struct drm_device *dev)
10893 {
10894         struct drm_i915_private *dev_priv = dev->dev_private;
10895         struct intel_crtc *crtc;
10896         struct intel_dpll_hw_state dpll_hw_state;
10897         int i;
10898
10899         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10900                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10901                 int enabled_crtcs = 0, active_crtcs = 0;
10902                 bool active;
10903
10904                 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10905
10906                 DRM_DEBUG_KMS("%s\n", pll->name);
10907
10908                 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10909
10910                 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
10911                      "more active pll users than references: %i vs %i\n",
10912                      pll->active, hweight32(pll->config.crtc_mask));
10913                 I915_STATE_WARN(pll->active && !pll->on,
10914                      "pll in active use but not on in sw tracking\n");
10915                 I915_STATE_WARN(pll->on && !pll->active,
10916                      "pll in on but not on in use in sw tracking\n");
10917                 I915_STATE_WARN(pll->on != active,
10918                      "pll on state mismatch (expected %i, found %i)\n",
10919                      pll->on, active);
10920
10921                 for_each_intel_crtc(dev, crtc) {
10922                         if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10923                                 enabled_crtcs++;
10924                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10925                                 active_crtcs++;
10926                 }
10927                 I915_STATE_WARN(pll->active != active_crtcs,
10928                      "pll active crtcs mismatch (expected %i, found %i)\n",
10929                      pll->active, active_crtcs);
10930                 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
10931                      "pll enabled crtcs mismatch (expected %i, found %i)\n",
10932                      hweight32(pll->config.crtc_mask), enabled_crtcs);
10933
10934                 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
10935                                        sizeof(dpll_hw_state)),
10936                      "pll hw state mismatch\n");
10937         }
10938 }
10939
10940 void
10941 intel_modeset_check_state(struct drm_device *dev)
10942 {
10943         check_wm_state(dev);
10944         check_connector_state(dev);
10945         check_encoder_state(dev);
10946         check_crtc_state(dev);
10947         check_shared_dpll_state(dev);
10948 }
10949
10950 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
10951                                      int dotclock)
10952 {
10953         /*
10954          * FDI already provided one idea for the dotclock.
10955          * Yell if the encoder disagrees.
10956          */
10957         WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
10958              "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10959              pipe_config->base.adjusted_mode.crtc_clock, dotclock);
10960 }
10961
10962 static void update_scanline_offset(struct intel_crtc *crtc)
10963 {
10964         struct drm_device *dev = crtc->base.dev;
10965
10966         /*
10967          * The scanline counter increments at the leading edge of hsync.
10968          *
10969          * On most platforms it starts counting from vtotal-1 on the
10970          * first active line. That means the scanline counter value is
10971          * always one less than what we would expect. Ie. just after
10972          * start of vblank, which also occurs at start of hsync (on the
10973          * last active line), the scanline counter will read vblank_start-1.
10974          *
10975          * On gen2 the scanline counter starts counting from 1 instead
10976          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10977          * to keep the value positive), instead of adding one.
10978          *
10979          * On HSW+ the behaviour of the scanline counter depends on the output
10980          * type. For DP ports it behaves like most other platforms, but on HDMI
10981          * there's an extra 1 line difference. So we need to add two instead of
10982          * one to the value.
10983          */
10984         if (IS_GEN2(dev)) {
10985                 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
10986                 int vtotal;
10987
10988                 vtotal = mode->crtc_vtotal;
10989                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10990                         vtotal /= 2;
10991
10992                 crtc->scanline_offset = vtotal - 1;
10993         } else if (HAS_DDI(dev) &&
10994                    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
10995                 crtc->scanline_offset = 2;
10996         } else
10997                 crtc->scanline_offset = 1;
10998 }
10999
11000 static struct intel_crtc_state *
11001 intel_modeset_compute_config(struct drm_crtc *crtc,
11002                              struct drm_display_mode *mode,
11003                              struct drm_framebuffer *fb,
11004                              unsigned *modeset_pipes,
11005                              unsigned *prepare_pipes,
11006                              unsigned *disable_pipes)
11007 {
11008         struct intel_crtc_state *pipe_config = NULL;
11009
11010         intel_modeset_affected_pipes(crtc, modeset_pipes,
11011                                      prepare_pipes, disable_pipes);
11012
11013         if ((*modeset_pipes) == 0)
11014                 goto out;
11015
11016         /*
11017          * Note this needs changes when we start tracking multiple modes
11018          * and crtcs.  At that point we'll need to compute the whole config
11019          * (i.e. one pipe_config for each crtc) rather than just the one
11020          * for this crtc.
11021          */
11022         pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
11023         if (IS_ERR(pipe_config)) {
11024                 goto out;
11025         }
11026         intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11027                                "[modeset]");
11028
11029 out:
11030         return pipe_config;
11031 }
11032
11033 static int __intel_set_mode_setup_plls(struct drm_device *dev,
11034                                        unsigned modeset_pipes,
11035                                        unsigned disable_pipes)
11036 {
11037         struct drm_i915_private *dev_priv = to_i915(dev);
11038         unsigned clear_pipes = modeset_pipes | disable_pipes;
11039         struct intel_crtc *intel_crtc;
11040         int ret = 0;
11041
11042         if (!dev_priv->display.crtc_compute_clock)
11043                 return 0;
11044
11045         ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
11046         if (ret)
11047                 goto done;
11048
11049         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
11050                 struct intel_crtc_state *state = intel_crtc->new_config;
11051                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11052                                                            state);
11053                 if (ret) {
11054                         intel_shared_dpll_abort_config(dev_priv);
11055                         goto done;
11056                 }
11057         }
11058
11059 done:
11060         return ret;
11061 }
11062
11063 static int __intel_set_mode(struct drm_crtc *crtc,
11064                             struct drm_display_mode *mode,
11065                             int x, int y, struct drm_framebuffer *fb,
11066                             struct intel_crtc_state *pipe_config,
11067                             unsigned modeset_pipes,
11068                             unsigned prepare_pipes,
11069                             unsigned disable_pipes)
11070 {
11071         struct drm_device *dev = crtc->dev;
11072         struct drm_i915_private *dev_priv = dev->dev_private;
11073         struct drm_display_mode *saved_mode;
11074         struct intel_crtc *intel_crtc;
11075         int ret = 0;
11076
11077         saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
11078         if (!saved_mode)
11079                 return -ENOMEM;
11080
11081         *saved_mode = crtc->mode;
11082
11083         if (modeset_pipes)
11084                 to_intel_crtc(crtc)->new_config = pipe_config;
11085
11086         /*
11087          * See if the config requires any additional preparation, e.g.
11088          * to adjust global state with pipes off.  We need to do this
11089          * here so we can get the modeset_pipe updated config for the new
11090          * mode set on this crtc.  For other crtcs we need to use the
11091          * adjusted_mode bits in the crtc directly.
11092          */
11093         if (IS_VALLEYVIEW(dev)) {
11094                 valleyview_modeset_global_pipes(dev, &prepare_pipes);
11095
11096                 /* may have added more to prepare_pipes than we should */
11097                 prepare_pipes &= ~disable_pipes;
11098         }
11099
11100         ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
11101         if (ret)
11102                 goto done;
11103
11104         for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
11105                 intel_crtc_disable(&intel_crtc->base);
11106
11107         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11108                 if (intel_crtc->base.enabled)
11109                         dev_priv->display.crtc_disable(&intel_crtc->base);
11110         }
11111
11112         /* crtc->mode is already used by the ->mode_set callbacks, hence we need
11113          * to set it here already despite that we pass it down the callchain.
11114          *
11115          * Note we'll need to fix this up when we start tracking multiple
11116          * pipes; here we assume a single modeset_pipe and only track the
11117          * single crtc and mode.
11118          */
11119         if (modeset_pipes) {
11120                 crtc->mode = *mode;
11121                 /* mode_set/enable/disable functions rely on a correct pipe
11122                  * config. */
11123                 intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
11124
11125                 /*
11126                  * Calculate and store various constants which
11127                  * are later needed by vblank and swap-completion
11128                  * timestamping. They are derived from true hwmode.
11129                  */
11130                 drm_calc_timestamping_constants(crtc,
11131                                                 &pipe_config->base.adjusted_mode);
11132         }
11133
11134         /* Only after disabling all output pipelines that will be changed can we
11135          * update the the output configuration. */
11136         intel_modeset_update_state(dev, prepare_pipes);
11137
11138         modeset_update_crtc_power_domains(dev);
11139
11140         /* Set up the DPLL and any encoders state that needs to adjust or depend
11141          * on the DPLL.
11142          */
11143         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
11144                 struct drm_plane *primary = intel_crtc->base.primary;
11145                 int vdisplay, hdisplay;
11146
11147                 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
11148                 ret = primary->funcs->update_plane(primary, &intel_crtc->base,
11149                                                    fb, 0, 0,
11150                                                    hdisplay, vdisplay,
11151                                                    x << 16, y << 16,
11152                                                    hdisplay << 16, vdisplay << 16);
11153         }
11154
11155         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11156         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11157                 update_scanline_offset(intel_crtc);
11158
11159                 dev_priv->display.crtc_enable(&intel_crtc->base);
11160         }
11161
11162         /* FIXME: add subpixel order */
11163 done:
11164         if (ret && crtc->enabled)
11165                 crtc->mode = *saved_mode;
11166
11167         kfree(saved_mode);
11168         return ret;
11169 }
11170
11171 static int intel_set_mode_pipes(struct drm_crtc *crtc,
11172                                 struct drm_display_mode *mode,
11173                                 int x, int y, struct drm_framebuffer *fb,
11174                                 struct intel_crtc_state *pipe_config,
11175                                 unsigned modeset_pipes,
11176                                 unsigned prepare_pipes,
11177                                 unsigned disable_pipes)
11178 {
11179         int ret;
11180
11181         ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
11182                                prepare_pipes, disable_pipes);
11183
11184         if (ret == 0)
11185                 intel_modeset_check_state(crtc->dev);
11186
11187         return ret;
11188 }
11189
11190 static int intel_set_mode(struct drm_crtc *crtc,
11191                           struct drm_display_mode *mode,
11192                           int x, int y, struct drm_framebuffer *fb)
11193 {
11194         struct intel_crtc_state *pipe_config;
11195         unsigned modeset_pipes, prepare_pipes, disable_pipes;
11196
11197         pipe_config = intel_modeset_compute_config(crtc, mode, fb,
11198                                                    &modeset_pipes,
11199                                                    &prepare_pipes,
11200                                                    &disable_pipes);
11201
11202         if (IS_ERR(pipe_config))
11203                 return PTR_ERR(pipe_config);
11204
11205         return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
11206                                     modeset_pipes, prepare_pipes,
11207                                     disable_pipes);
11208 }
11209
11210 void intel_crtc_restore_mode(struct drm_crtc *crtc)
11211 {
11212         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
11213 }
11214
11215 #undef for_each_intel_crtc_masked
11216
11217 static void intel_set_config_free(struct intel_set_config *config)
11218 {
11219         if (!config)
11220                 return;
11221
11222         kfree(config->save_connector_encoders);
11223         kfree(config->save_encoder_crtcs);
11224         kfree(config->save_crtc_enabled);
11225         kfree(config);
11226 }
11227
11228 static int intel_set_config_save_state(struct drm_device *dev,
11229                                        struct intel_set_config *config)
11230 {
11231         struct drm_crtc *crtc;
11232         struct drm_encoder *encoder;
11233         struct drm_connector *connector;
11234         int count;
11235
11236         config->save_crtc_enabled =
11237                 kcalloc(dev->mode_config.num_crtc,
11238                         sizeof(bool), GFP_KERNEL);
11239         if (!config->save_crtc_enabled)
11240                 return -ENOMEM;
11241
11242         config->save_encoder_crtcs =
11243                 kcalloc(dev->mode_config.num_encoder,
11244                         sizeof(struct drm_crtc *), GFP_KERNEL);
11245         if (!config->save_encoder_crtcs)
11246                 return -ENOMEM;
11247
11248         config->save_connector_encoders =
11249                 kcalloc(dev->mode_config.num_connector,
11250                         sizeof(struct drm_encoder *), GFP_KERNEL);
11251         if (!config->save_connector_encoders)
11252                 return -ENOMEM;
11253
11254         /* Copy data. Note that driver private data is not affected.
11255          * Should anything bad happen only the expected state is
11256          * restored, not the drivers personal bookkeeping.
11257          */
11258         count = 0;
11259         for_each_crtc(dev, crtc) {
11260                 config->save_crtc_enabled[count++] = crtc->enabled;
11261         }
11262
11263         count = 0;
11264         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11265                 config->save_encoder_crtcs[count++] = encoder->crtc;
11266         }
11267
11268         count = 0;
11269         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11270                 config->save_connector_encoders[count++] = connector->encoder;
11271         }
11272
11273         return 0;
11274 }
11275
11276 static void intel_set_config_restore_state(struct drm_device *dev,
11277                                            struct intel_set_config *config)
11278 {
11279         struct intel_crtc *crtc;
11280         struct intel_encoder *encoder;
11281         struct intel_connector *connector;
11282         int count;
11283
11284         count = 0;
11285         for_each_intel_crtc(dev, crtc) {
11286                 crtc->new_enabled = config->save_crtc_enabled[count++];
11287
11288                 if (crtc->new_enabled)
11289                         crtc->new_config = crtc->config;
11290                 else
11291                         crtc->new_config = NULL;
11292         }
11293
11294         count = 0;
11295         for_each_intel_encoder(dev, encoder) {
11296                 encoder->new_crtc =
11297                         to_intel_crtc(config->save_encoder_crtcs[count++]);
11298         }
11299
11300         count = 0;
11301         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11302                 connector->new_encoder =
11303                         to_intel_encoder(config->save_connector_encoders[count++]);
11304         }
11305 }
11306
11307 static bool
11308 is_crtc_connector_off(struct drm_mode_set *set)
11309 {
11310         int i;
11311
11312         if (set->num_connectors == 0)
11313                 return false;
11314
11315         if (WARN_ON(set->connectors == NULL))
11316                 return false;
11317
11318         for (i = 0; i < set->num_connectors; i++)
11319                 if (set->connectors[i]->encoder &&
11320                     set->connectors[i]->encoder->crtc == set->crtc &&
11321                     set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
11322                         return true;
11323
11324         return false;
11325 }
11326
11327 static void
11328 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11329                                       struct intel_set_config *config)
11330 {
11331
11332         /* We should be able to check here if the fb has the same properties
11333          * and then just flip_or_move it */
11334         if (is_crtc_connector_off(set)) {
11335                 config->mode_changed = true;
11336         } else if (set->crtc->primary->fb != set->fb) {
11337                 /*
11338                  * If we have no fb, we can only flip as long as the crtc is
11339                  * active, otherwise we need a full mode set.  The crtc may
11340                  * be active if we've only disabled the primary plane, or
11341                  * in fastboot situations.
11342                  */
11343                 if (set->crtc->primary->fb == NULL) {
11344                         struct intel_crtc *intel_crtc =
11345                                 to_intel_crtc(set->crtc);
11346
11347                         if (intel_crtc->active) {
11348                                 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
11349                                 config->fb_changed = true;
11350                         } else {
11351                                 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
11352                                 config->mode_changed = true;
11353                         }
11354                 } else if (set->fb == NULL) {
11355                         config->mode_changed = true;
11356                 } else if (set->fb->pixel_format !=
11357                            set->crtc->primary->fb->pixel_format) {
11358                         config->mode_changed = true;
11359                 } else {
11360                         config->fb_changed = true;
11361                 }
11362         }
11363
11364         if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
11365                 config->fb_changed = true;
11366
11367         if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
11368                 DRM_DEBUG_KMS("modes are different, full mode set\n");
11369                 drm_mode_debug_printmodeline(&set->crtc->mode);
11370                 drm_mode_debug_printmodeline(set->mode);
11371                 config->mode_changed = true;
11372         }
11373
11374         DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
11375                         set->crtc->base.id, config->mode_changed, config->fb_changed);
11376 }
11377
11378 static int
11379 intel_modeset_stage_output_state(struct drm_device *dev,
11380                                  struct drm_mode_set *set,
11381                                  struct intel_set_config *config)
11382 {
11383         struct intel_connector *connector;
11384         struct intel_encoder *encoder;
11385         struct intel_crtc *crtc;
11386         int ro;
11387
11388         /* The upper layers ensure that we either disable a crtc or have a list
11389          * of connectors. For paranoia, double-check this. */
11390         WARN_ON(!set->fb && (set->num_connectors != 0));
11391         WARN_ON(set->fb && (set->num_connectors == 0));
11392
11393         list_for_each_entry(connector, &dev->mode_config.connector_list,
11394                             base.head) {
11395                 /* Otherwise traverse passed in connector list and get encoders
11396                  * for them. */
11397                 for (ro = 0; ro < set->num_connectors; ro++) {
11398                         if (set->connectors[ro] == &connector->base) {
11399                                 connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
11400                                 break;
11401                         }
11402                 }
11403
11404                 /* If we disable the crtc, disable all its connectors. Also, if
11405                  * the connector is on the changing crtc but not on the new
11406                  * connector list, disable it. */
11407                 if ((!set->fb || ro == set->num_connectors) &&
11408                     connector->base.encoder &&
11409                     connector->base.encoder->crtc == set->crtc) {
11410                         connector->new_encoder = NULL;
11411
11412                         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11413                                 connector->base.base.id,
11414                                 connector->base.name);
11415                 }
11416
11417
11418                 if (&connector->new_encoder->base != connector->base.encoder) {
11419                         DRM_DEBUG_KMS("encoder changed, full mode switch\n");
11420                         config->mode_changed = true;
11421                 }
11422         }
11423         /* connector->new_encoder is now updated for all connectors. */
11424
11425         /* Update crtc of enabled connectors. */
11426         list_for_each_entry(connector, &dev->mode_config.connector_list,
11427                             base.head) {
11428                 struct drm_crtc *new_crtc;
11429
11430                 if (!connector->new_encoder)
11431                         continue;
11432
11433                 new_crtc = connector->new_encoder->base.crtc;
11434
11435                 for (ro = 0; ro < set->num_connectors; ro++) {
11436                         if (set->connectors[ro] == &connector->base)
11437                                 new_crtc = set->crtc;
11438                 }
11439
11440                 /* Make sure the new CRTC will work with the encoder */
11441                 if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
11442                                          new_crtc)) {
11443                         return -EINVAL;
11444                 }
11445                 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
11446
11447                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11448                         connector->base.base.id,
11449                         connector->base.name,
11450                         new_crtc->base.id);
11451         }
11452
11453         /* Check for any encoders that needs to be disabled. */
11454         for_each_intel_encoder(dev, encoder) {
11455                 int num_connectors = 0;
11456                 list_for_each_entry(connector,
11457                                     &dev->mode_config.connector_list,
11458                                     base.head) {
11459                         if (connector->new_encoder == encoder) {
11460                                 WARN_ON(!connector->new_encoder->new_crtc);
11461                                 num_connectors++;
11462                         }
11463                 }
11464
11465                 if (num_connectors == 0)
11466                         encoder->new_crtc = NULL;
11467                 else if (num_connectors > 1)
11468                         return -EINVAL;
11469
11470                 /* Only now check for crtc changes so we don't miss encoders
11471                  * that will be disabled. */
11472                 if (&encoder->new_crtc->base != encoder->base.crtc) {
11473                         DRM_DEBUG_KMS("crtc changed, full mode switch\n");
11474                         config->mode_changed = true;
11475                 }
11476         }
11477         /* Now we've also updated encoder->new_crtc for all encoders. */
11478         list_for_each_entry(connector, &dev->mode_config.connector_list,
11479                             base.head) {
11480                 if (connector->new_encoder)
11481                         if (connector->new_encoder != connector->encoder)
11482                                 connector->encoder = connector->new_encoder;
11483         }
11484         for_each_intel_crtc(dev, crtc) {
11485                 crtc->new_enabled = false;
11486
11487                 for_each_intel_encoder(dev, encoder) {
11488                         if (encoder->new_crtc == crtc) {
11489                                 crtc->new_enabled = true;
11490                                 break;
11491                         }
11492                 }
11493
11494                 if (crtc->new_enabled != crtc->base.enabled) {
11495                         DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11496                                       crtc->new_enabled ? "en" : "dis");
11497                         config->mode_changed = true;
11498                 }
11499
11500                 if (crtc->new_enabled)
11501                         crtc->new_config = crtc->config;
11502                 else
11503                         crtc->new_config = NULL;
11504         }
11505
11506         return 0;
11507 }
11508
11509 static void disable_crtc_nofb(struct intel_crtc *crtc)
11510 {
11511         struct drm_device *dev = crtc->base.dev;
11512         struct intel_encoder *encoder;
11513         struct intel_connector *connector;
11514
11515         DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11516                       pipe_name(crtc->pipe));
11517
11518         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11519                 if (connector->new_encoder &&
11520                     connector->new_encoder->new_crtc == crtc)
11521                         connector->new_encoder = NULL;
11522         }
11523
11524         for_each_intel_encoder(dev, encoder) {
11525                 if (encoder->new_crtc == crtc)
11526                         encoder->new_crtc = NULL;
11527         }
11528
11529         crtc->new_enabled = false;
11530         crtc->new_config = NULL;
11531 }
11532
11533 static int intel_crtc_set_config(struct drm_mode_set *set)
11534 {
11535         struct drm_device *dev;
11536         struct drm_mode_set save_set;
11537         struct intel_set_config *config;
11538         struct intel_crtc_state *pipe_config;
11539         unsigned modeset_pipes, prepare_pipes, disable_pipes;
11540         int ret;
11541
11542         BUG_ON(!set);
11543         BUG_ON(!set->crtc);
11544         BUG_ON(!set->crtc->helper_private);
11545
11546         /* Enforce sane interface api - has been abused by the fb helper. */
11547         BUG_ON(!set->mode && set->fb);
11548         BUG_ON(set->fb && set->num_connectors == 0);
11549
11550         if (set->fb) {
11551                 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11552                                 set->crtc->base.id, set->fb->base.id,
11553                                 (int)set->num_connectors, set->x, set->y);
11554         } else {
11555                 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11556         }
11557
11558         dev = set->crtc->dev;
11559
11560         ret = -ENOMEM;
11561         config = kzalloc(sizeof(*config), GFP_KERNEL);
11562         if (!config)
11563                 goto out_config;
11564
11565         ret = intel_set_config_save_state(dev, config);
11566         if (ret)
11567                 goto out_config;
11568
11569         save_set.crtc = set->crtc;
11570         save_set.mode = &set->crtc->mode;
11571         save_set.x = set->crtc->x;
11572         save_set.y = set->crtc->y;
11573         save_set.fb = set->crtc->primary->fb;
11574
11575         /* Compute whether we need a full modeset, only an fb base update or no
11576          * change at all. In the future we might also check whether only the
11577          * mode changed, e.g. for LVDS where we only change the panel fitter in
11578          * such cases. */
11579         intel_set_config_compute_mode_changes(set, config);
11580
11581         ret = intel_modeset_stage_output_state(dev, set, config);
11582         if (ret)
11583                 goto fail;
11584
11585         pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11586                                                    set->fb,
11587                                                    &modeset_pipes,
11588                                                    &prepare_pipes,
11589                                                    &disable_pipes);
11590         if (IS_ERR(pipe_config)) {
11591                 ret = PTR_ERR(pipe_config);
11592                 goto fail;
11593         } else if (pipe_config) {
11594                 if (pipe_config->has_audio !=
11595                     to_intel_crtc(set->crtc)->config->has_audio)
11596                         config->mode_changed = true;
11597
11598                 /*
11599                  * Note we have an issue here with infoframes: current code
11600                  * only updates them on the full mode set path per hw
11601                  * requirements.  So here we should be checking for any
11602                  * required changes and forcing a mode set.
11603                  */
11604         }
11605
11606         /* set_mode will free it in the mode_changed case */
11607         if (!config->mode_changed)
11608                 kfree(pipe_config);
11609
11610         intel_update_pipe_size(to_intel_crtc(set->crtc));
11611
11612         if (config->mode_changed) {
11613                 ret = intel_set_mode_pipes(set->crtc, set->mode,
11614                                            set->x, set->y, set->fb, pipe_config,
11615                                            modeset_pipes, prepare_pipes,
11616                                            disable_pipes);
11617         } else if (config->fb_changed) {
11618                 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11619                 struct drm_plane *primary = set->crtc->primary;
11620                 int vdisplay, hdisplay;
11621
11622                 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
11623                 ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
11624                                                    0, 0, hdisplay, vdisplay,
11625                                                    set->x << 16, set->y << 16,
11626                                                    hdisplay << 16, vdisplay << 16);
11627
11628                 /*
11629                  * We need to make sure the primary plane is re-enabled if it
11630                  * has previously been turned off.
11631                  */
11632                 if (!intel_crtc->primary_enabled && ret == 0) {
11633                         WARN_ON(!intel_crtc->active);
11634                         intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
11635                 }
11636
11637                 /*
11638                  * In the fastboot case this may be our only check of the
11639                  * state after boot.  It would be better to only do it on
11640                  * the first update, but we don't have a nice way of doing that
11641                  * (and really, set_config isn't used much for high freq page
11642                  * flipping, so increasing its cost here shouldn't be a big
11643                  * deal).
11644                  */
11645                 if (i915.fastboot && ret == 0)
11646                         intel_modeset_check_state(set->crtc->dev);
11647         }
11648
11649         if (ret) {
11650                 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11651                               set->crtc->base.id, ret);
11652 fail:
11653                 intel_set_config_restore_state(dev, config);
11654
11655                 /*
11656                  * HACK: if the pipe was on, but we didn't have a framebuffer,
11657                  * force the pipe off to avoid oopsing in the modeset code
11658                  * due to fb==NULL. This should only happen during boot since
11659                  * we don't yet reconstruct the FB from the hardware state.
11660                  */
11661                 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11662                         disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11663
11664                 /* Try to restore the config */
11665                 if (config->mode_changed &&
11666                     intel_set_mode(save_set.crtc, save_set.mode,
11667                                    save_set.x, save_set.y, save_set.fb))
11668                         DRM_ERROR("failed to restore config after modeset failure\n");
11669         }
11670
11671 out_config:
11672         intel_set_config_free(config);
11673         return ret;
11674 }
11675
11676 static const struct drm_crtc_funcs intel_crtc_funcs = {
11677         .gamma_set = intel_crtc_gamma_set,
11678         .set_config = intel_crtc_set_config,
11679         .destroy = intel_crtc_destroy,
11680         .page_flip = intel_crtc_page_flip,
11681         .atomic_duplicate_state = intel_crtc_duplicate_state,
11682         .atomic_destroy_state = intel_crtc_destroy_state,
11683 };
11684
11685 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11686                                       struct intel_shared_dpll *pll,
11687                                       struct intel_dpll_hw_state *hw_state)
11688 {
11689         uint32_t val;
11690
11691         if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
11692                 return false;
11693
11694         val = I915_READ(PCH_DPLL(pll->id));
11695         hw_state->dpll = val;
11696         hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11697         hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11698
11699         return val & DPLL_VCO_ENABLE;
11700 }
11701
11702 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11703                                   struct intel_shared_dpll *pll)
11704 {
11705         I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
11706         I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
11707 }
11708
11709 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11710                                 struct intel_shared_dpll *pll)
11711 {
11712         /* PCH refclock must be enabled first */
11713         ibx_assert_pch_refclk_enabled(dev_priv);
11714
11715         I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11716
11717         /* Wait for the clocks to stabilize. */
11718         POSTING_READ(PCH_DPLL(pll->id));
11719         udelay(150);
11720
11721         /* The pixel multiplier can only be updated once the
11722          * DPLL is enabled and the clocks are stable.
11723          *
11724          * So write it again.
11725          */
11726         I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11727         POSTING_READ(PCH_DPLL(pll->id));
11728         udelay(200);
11729 }
11730
11731 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11732                                  struct intel_shared_dpll *pll)
11733 {
11734         struct drm_device *dev = dev_priv->dev;
11735         struct intel_crtc *crtc;
11736
11737         /* Make sure no transcoder isn't still depending on us. */
11738         for_each_intel_crtc(dev, crtc) {
11739                 if (intel_crtc_to_shared_dpll(crtc) == pll)
11740                         assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11741         }
11742
11743         I915_WRITE(PCH_DPLL(pll->id), 0);
11744         POSTING_READ(PCH_DPLL(pll->id));
11745         udelay(200);
11746 }
11747
11748 static char *ibx_pch_dpll_names[] = {
11749         "PCH DPLL A",
11750         "PCH DPLL B",
11751 };
11752
11753 static void ibx_pch_dpll_init(struct drm_device *dev)
11754 {
11755         struct drm_i915_private *dev_priv = dev->dev_private;
11756         int i;
11757
11758         dev_priv->num_shared_dpll = 2;
11759
11760         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11761                 dev_priv->shared_dplls[i].id = i;
11762                 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11763                 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11764                 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11765                 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11766                 dev_priv->shared_dplls[i].get_hw_state =
11767                         ibx_pch_dpll_get_hw_state;
11768         }
11769 }
11770
11771 static void intel_shared_dpll_init(struct drm_device *dev)
11772 {
11773         struct drm_i915_private *dev_priv = dev->dev_private;
11774
11775         if (HAS_DDI(dev))
11776                 intel_ddi_pll_init(dev);
11777         else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11778                 ibx_pch_dpll_init(dev);
11779         else
11780                 dev_priv->num_shared_dpll = 0;
11781
11782         BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11783 }
11784
11785 /**
11786  * intel_prepare_plane_fb - Prepare fb for usage on plane
11787  * @plane: drm plane to prepare for
11788  * @fb: framebuffer to prepare for presentation
11789  *
11790  * Prepares a framebuffer for usage on a display plane.  Generally this
11791  * involves pinning the underlying object and updating the frontbuffer tracking
11792  * bits.  Some older platforms need special physical address handling for
11793  * cursor planes.
11794  *
11795  * Returns 0 on success, negative error code on failure.
11796  */
11797 int
11798 intel_prepare_plane_fb(struct drm_plane *plane,
11799                        struct drm_framebuffer *fb)
11800 {
11801         struct drm_device *dev = plane->dev;
11802         struct intel_plane *intel_plane = to_intel_plane(plane);
11803         enum pipe pipe = intel_plane->pipe;
11804         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11805         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11806         unsigned frontbuffer_bits = 0;
11807         int ret = 0;
11808
11809         if (!obj)
11810                 return 0;
11811
11812         switch (plane->type) {
11813         case DRM_PLANE_TYPE_PRIMARY:
11814                 frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
11815                 break;
11816         case DRM_PLANE_TYPE_CURSOR:
11817                 frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
11818                 break;
11819         case DRM_PLANE_TYPE_OVERLAY:
11820                 frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
11821                 break;
11822         }
11823
11824         mutex_lock(&dev->struct_mutex);
11825
11826         if (plane->type == DRM_PLANE_TYPE_CURSOR &&
11827             INTEL_INFO(dev)->cursor_needs_physical) {
11828                 int align = IS_I830(dev) ? 16 * 1024 : 256;
11829                 ret = i915_gem_object_attach_phys(obj, align);
11830                 if (ret)
11831                         DRM_DEBUG_KMS("failed to attach phys object\n");
11832         } else {
11833                 ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
11834         }
11835
11836         if (ret == 0)
11837                 i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
11838
11839         mutex_unlock(&dev->struct_mutex);
11840
11841         return ret;
11842 }
11843
11844 /**
11845  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11846  * @plane: drm plane to clean up for
11847  * @fb: old framebuffer that was on plane
11848  *
11849  * Cleans up a framebuffer that has just been removed from a plane.
11850  */
11851 void
11852 intel_cleanup_plane_fb(struct drm_plane *plane,
11853                        struct drm_framebuffer *fb)
11854 {
11855         struct drm_device *dev = plane->dev;
11856         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11857
11858         if (WARN_ON(!obj))
11859                 return;
11860
11861         if (plane->type != DRM_PLANE_TYPE_CURSOR ||
11862             !INTEL_INFO(dev)->cursor_needs_physical) {
11863                 mutex_lock(&dev->struct_mutex);
11864                 intel_unpin_fb_obj(obj);
11865                 mutex_unlock(&dev->struct_mutex);
11866         }
11867 }
11868
11869 static int
11870 intel_check_primary_plane(struct drm_plane *plane,
11871                           struct intel_plane_state *state)
11872 {
11873         struct drm_device *dev = plane->dev;
11874         struct drm_i915_private *dev_priv = dev->dev_private;
11875         struct drm_crtc *crtc = state->base.crtc;
11876         struct intel_crtc *intel_crtc;
11877         struct drm_framebuffer *fb = state->base.fb;
11878         struct drm_rect *dest = &state->dst;
11879         struct drm_rect *src = &state->src;
11880         const struct drm_rect *clip = &state->clip;
11881         int ret;
11882
11883         crtc = crtc ? crtc : plane->crtc;
11884         intel_crtc = to_intel_crtc(crtc);
11885
11886         ret = drm_plane_helper_check_update(plane, crtc, fb,
11887                                             src, dest, clip,
11888                                             DRM_PLANE_HELPER_NO_SCALING,
11889                                             DRM_PLANE_HELPER_NO_SCALING,
11890                                             false, true, &state->visible);
11891         if (ret)
11892                 return ret;
11893
11894         if (intel_crtc->active) {
11895                 intel_crtc->atomic.wait_for_flips = true;
11896
11897                 /*
11898                  * FBC does not work on some platforms for rotated
11899                  * planes, so disable it when rotation is not 0 and
11900                  * update it when rotation is set back to 0.
11901                  *
11902                  * FIXME: This is redundant with the fbc update done in
11903                  * the primary plane enable function except that that
11904                  * one is done too late. We eventually need to unify
11905                  * this.
11906                  */
11907                 if (intel_crtc->primary_enabled &&
11908                     INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11909                     dev_priv->fbc.plane == intel_crtc->plane &&
11910                     state->base.rotation != BIT(DRM_ROTATE_0)) {
11911                         intel_crtc->atomic.disable_fbc = true;
11912                 }
11913
11914                 if (state->visible) {
11915                         /*
11916                          * BDW signals flip done immediately if the plane
11917                          * is disabled, even if the plane enable is already
11918                          * armed to occur at the next vblank :(
11919                          */
11920                         if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
11921                                 intel_crtc->atomic.wait_vblank = true;
11922                 }
11923
11924                 intel_crtc->atomic.fb_bits |=
11925                         INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
11926
11927                 intel_crtc->atomic.update_fbc = true;
11928         }
11929
11930         return 0;
11931 }
11932
11933 static void
11934 intel_commit_primary_plane(struct drm_plane *plane,
11935                            struct intel_plane_state *state)
11936 {
11937         struct drm_crtc *crtc = state->base.crtc;
11938         struct drm_framebuffer *fb = state->base.fb;
11939         struct drm_device *dev = plane->dev;
11940         struct drm_i915_private *dev_priv = dev->dev_private;
11941         struct intel_crtc *intel_crtc;
11942         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11943         struct intel_plane *intel_plane = to_intel_plane(plane);
11944         struct drm_rect *src = &state->src;
11945
11946         crtc = crtc ? crtc : plane->crtc;
11947         intel_crtc = to_intel_crtc(crtc);
11948
11949         plane->fb = fb;
11950         crtc->x = src->x1 >> 16;
11951         crtc->y = src->y1 >> 16;
11952
11953         intel_plane->obj = obj;
11954
11955         if (intel_crtc->active) {
11956                 if (state->visible) {
11957                         /* FIXME: kill this fastboot hack */
11958                         intel_update_pipe_size(intel_crtc);
11959
11960                         intel_crtc->primary_enabled = true;
11961
11962                         dev_priv->display.update_primary_plane(crtc, plane->fb,
11963                                         crtc->x, crtc->y);
11964                 } else {
11965                         /*
11966                          * If clipping results in a non-visible primary plane,
11967                          * we'll disable the primary plane.  Note that this is
11968                          * a bit different than what happens if userspace
11969                          * explicitly disables the plane by passing fb=0
11970                          * because plane->fb still gets set and pinned.
11971                          */
11972                         intel_disable_primary_hw_plane(plane, crtc);
11973                 }
11974         }
11975 }
11976
11977 static void intel_begin_crtc_commit(struct drm_crtc *crtc)
11978 {
11979         struct drm_device *dev = crtc->dev;
11980         struct drm_i915_private *dev_priv = dev->dev_private;
11981         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11982         struct intel_plane *intel_plane;
11983         struct drm_plane *p;
11984         unsigned fb_bits = 0;
11985
11986         /* Track fb's for any planes being disabled */
11987         list_for_each_entry(p, &dev->mode_config.plane_list, head) {
11988                 intel_plane = to_intel_plane(p);
11989
11990                 if (intel_crtc->atomic.disabled_planes &
11991                     (1 << drm_plane_index(p))) {
11992                         switch (p->type) {
11993                         case DRM_PLANE_TYPE_PRIMARY:
11994                                 fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
11995                                 break;
11996                         case DRM_PLANE_TYPE_CURSOR:
11997                                 fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
11998                                 break;
11999                         case DRM_PLANE_TYPE_OVERLAY:
12000                                 fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
12001                                 break;
12002                         }
12003
12004                         mutex_lock(&dev->struct_mutex);
12005                         i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
12006                         mutex_unlock(&dev->struct_mutex);
12007                 }
12008         }
12009
12010         if (intel_crtc->atomic.wait_for_flips)
12011                 intel_crtc_wait_for_pending_flips(crtc);
12012
12013         if (intel_crtc->atomic.disable_fbc)
12014                 intel_fbc_disable(dev);
12015
12016         if (intel_crtc->atomic.pre_disable_primary)
12017                 intel_pre_disable_primary(crtc);
12018
12019         if (intel_crtc->atomic.update_wm)
12020                 intel_update_watermarks(crtc);
12021
12022         intel_runtime_pm_get(dev_priv);
12023
12024         /* Perform vblank evasion around commit operation */
12025         if (intel_crtc->active)
12026                 intel_crtc->atomic.evade =
12027                         intel_pipe_update_start(intel_crtc,
12028                                                 &intel_crtc->atomic.start_vbl_count);
12029 }
12030
12031 static void intel_finish_crtc_commit(struct drm_crtc *crtc)
12032 {
12033         struct drm_device *dev = crtc->dev;
12034         struct drm_i915_private *dev_priv = dev->dev_private;
12035         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12036         struct drm_plane *p;
12037
12038         if (intel_crtc->atomic.evade)
12039                 intel_pipe_update_end(intel_crtc,
12040                                       intel_crtc->atomic.start_vbl_count);
12041
12042         intel_runtime_pm_put(dev_priv);
12043
12044         if (intel_crtc->atomic.wait_vblank)
12045                 intel_wait_for_vblank(dev, intel_crtc->pipe);
12046
12047         intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
12048
12049         if (intel_crtc->atomic.update_fbc) {
12050                 mutex_lock(&dev->struct_mutex);
12051                 intel_fbc_update(dev);
12052                 mutex_unlock(&dev->struct_mutex);
12053         }
12054
12055         if (intel_crtc->atomic.post_enable_primary)
12056                 intel_post_enable_primary(crtc);
12057
12058         drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
12059                 if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
12060                         intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
12061                                                        false, false);
12062
12063         memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
12064 }
12065
12066 /**
12067  * intel_plane_destroy - destroy a plane
12068  * @plane: plane to destroy
12069  *
12070  * Common destruction function for all types of planes (primary, cursor,
12071  * sprite).
12072  */
12073 void intel_plane_destroy(struct drm_plane *plane)
12074 {
12075         struct intel_plane *intel_plane = to_intel_plane(plane);
12076         drm_plane_cleanup(plane);
12077         kfree(intel_plane);
12078 }
12079
12080 const struct drm_plane_funcs intel_plane_funcs = {
12081         .update_plane = drm_plane_helper_update,
12082         .disable_plane = drm_plane_helper_disable,
12083         .destroy = intel_plane_destroy,
12084         .set_property = drm_atomic_helper_plane_set_property,
12085         .atomic_get_property = intel_plane_atomic_get_property,
12086         .atomic_set_property = intel_plane_atomic_set_property,
12087         .atomic_duplicate_state = intel_plane_duplicate_state,
12088         .atomic_destroy_state = intel_plane_destroy_state,
12089
12090 };
12091
12092 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
12093                                                     int pipe)
12094 {
12095         struct intel_plane *primary;
12096         struct intel_plane_state *state;
12097         const uint32_t *intel_primary_formats;
12098         int num_formats;
12099
12100         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
12101         if (primary == NULL)
12102                 return NULL;
12103
12104         state = intel_create_plane_state(&primary->base);
12105         if (!state) {
12106                 kfree(primary);
12107                 return NULL;
12108         }
12109         primary->base.state = &state->base;
12110
12111         primary->can_scale = false;
12112         primary->max_downscale = 1;
12113         primary->pipe = pipe;
12114         primary->plane = pipe;
12115         primary->check_plane = intel_check_primary_plane;
12116         primary->commit_plane = intel_commit_primary_plane;
12117         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
12118                 primary->plane = !pipe;
12119
12120         if (INTEL_INFO(dev)->gen <= 3) {
12121                 intel_primary_formats = intel_primary_formats_gen2;
12122                 num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
12123         } else {
12124                 intel_primary_formats = intel_primary_formats_gen4;
12125                 num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
12126         }
12127
12128         drm_universal_plane_init(dev, &primary->base, 0,
12129                                  &intel_plane_funcs,
12130                                  intel_primary_formats, num_formats,
12131                                  DRM_PLANE_TYPE_PRIMARY);
12132
12133         if (INTEL_INFO(dev)->gen >= 4) {
12134                 if (!dev->mode_config.rotation_property)
12135                         dev->mode_config.rotation_property =
12136                                 drm_mode_create_rotation_property(dev,
12137                                                         BIT(DRM_ROTATE_0) |
12138                                                         BIT(DRM_ROTATE_180));
12139                 if (dev->mode_config.rotation_property)
12140                         drm_object_attach_property(&primary->base.base,
12141                                 dev->mode_config.rotation_property,
12142                                 state->base.rotation);
12143         }
12144
12145         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
12146
12147         return &primary->base;
12148 }
12149
12150 static int
12151 intel_check_cursor_plane(struct drm_plane *plane,
12152                          struct intel_plane_state *state)
12153 {
12154         struct drm_crtc *crtc = state->base.crtc;
12155         struct drm_device *dev = plane->dev;
12156         struct drm_framebuffer *fb = state->base.fb;
12157         struct drm_rect *dest = &state->dst;
12158         struct drm_rect *src = &state->src;
12159         const struct drm_rect *clip = &state->clip;
12160         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12161         struct intel_crtc *intel_crtc;
12162         unsigned stride;
12163         int ret;
12164
12165         crtc = crtc ? crtc : plane->crtc;
12166         intel_crtc = to_intel_crtc(crtc);
12167
12168         ret = drm_plane_helper_check_update(plane, crtc, fb,
12169                                             src, dest, clip,
12170                                             DRM_PLANE_HELPER_NO_SCALING,
12171                                             DRM_PLANE_HELPER_NO_SCALING,
12172                                             true, true, &state->visible);
12173         if (ret)
12174                 return ret;
12175
12176
12177         /* if we want to turn off the cursor ignore width and height */
12178         if (!obj)
12179                 goto finish;
12180
12181         /* Check for which cursor types we support */
12182         if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
12183                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
12184                           state->base.crtc_w, state->base.crtc_h);
12185                 return -EINVAL;
12186         }
12187
12188         stride = roundup_pow_of_two(state->base.crtc_w) * 4;
12189         if (obj->base.size < stride * state->base.crtc_h) {
12190                 DRM_DEBUG_KMS("buffer is too small\n");
12191                 return -ENOMEM;
12192         }
12193
12194         if (fb == crtc->cursor->fb)
12195                 return 0;
12196
12197         /* we only need to pin inside GTT if cursor is non-phy */
12198         mutex_lock(&dev->struct_mutex);
12199         if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
12200                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
12201                 ret = -EINVAL;
12202         }
12203         mutex_unlock(&dev->struct_mutex);
12204
12205 finish:
12206         if (intel_crtc->active) {
12207                 if (intel_crtc->cursor_width != state->base.crtc_w)
12208                         intel_crtc->atomic.update_wm = true;
12209
12210                 intel_crtc->atomic.fb_bits |=
12211                         INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
12212         }
12213
12214         return ret;
12215 }
12216
12217 static void
12218 intel_commit_cursor_plane(struct drm_plane *plane,
12219                           struct intel_plane_state *state)
12220 {
12221         struct drm_crtc *crtc = state->base.crtc;
12222         struct drm_device *dev = plane->dev;
12223         struct intel_crtc *intel_crtc;
12224         struct intel_plane *intel_plane = to_intel_plane(plane);
12225         struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
12226         uint32_t addr;
12227
12228         crtc = crtc ? crtc : plane->crtc;
12229         intel_crtc = to_intel_crtc(crtc);
12230
12231         plane->fb = state->base.fb;
12232         crtc->cursor_x = state->base.crtc_x;
12233         crtc->cursor_y = state->base.crtc_y;
12234
12235         intel_plane->obj = obj;
12236
12237         if (intel_crtc->cursor_bo == obj)
12238                 goto update;
12239
12240         if (!obj)
12241                 addr = 0;
12242         else if (!INTEL_INFO(dev)->cursor_needs_physical)
12243                 addr = i915_gem_obj_ggtt_offset(obj);
12244         else
12245                 addr = obj->phys_handle->busaddr;
12246
12247         intel_crtc->cursor_addr = addr;
12248         intel_crtc->cursor_bo = obj;
12249 update:
12250         intel_crtc->cursor_width = state->base.crtc_w;
12251         intel_crtc->cursor_height = state->base.crtc_h;
12252
12253         if (intel_crtc->active)
12254                 intel_crtc_update_cursor(crtc, state->visible);
12255 }
12256
12257 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
12258                                                    int pipe)
12259 {
12260         struct intel_plane *cursor;
12261         struct intel_plane_state *state;
12262
12263         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
12264         if (cursor == NULL)
12265                 return NULL;
12266
12267         state = intel_create_plane_state(&cursor->base);
12268         if (!state) {
12269                 kfree(cursor);
12270                 return NULL;
12271         }
12272         cursor->base.state = &state->base;
12273
12274         cursor->can_scale = false;
12275         cursor->max_downscale = 1;
12276         cursor->pipe = pipe;
12277         cursor->plane = pipe;
12278         cursor->check_plane = intel_check_cursor_plane;
12279         cursor->commit_plane = intel_commit_cursor_plane;
12280
12281         drm_universal_plane_init(dev, &cursor->base, 0,
12282                                  &intel_plane_funcs,
12283                                  intel_cursor_formats,
12284                                  ARRAY_SIZE(intel_cursor_formats),
12285                                  DRM_PLANE_TYPE_CURSOR);
12286
12287         if (INTEL_INFO(dev)->gen >= 4) {
12288                 if (!dev->mode_config.rotation_property)
12289                         dev->mode_config.rotation_property =
12290                                 drm_mode_create_rotation_property(dev,
12291                                                         BIT(DRM_ROTATE_0) |
12292                                                         BIT(DRM_ROTATE_180));
12293                 if (dev->mode_config.rotation_property)
12294                         drm_object_attach_property(&cursor->base.base,
12295                                 dev->mode_config.rotation_property,
12296                                 state->base.rotation);
12297         }
12298
12299         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
12300
12301         return &cursor->base;
12302 }
12303
12304 static void intel_crtc_init(struct drm_device *dev, int pipe)
12305 {
12306         struct drm_i915_private *dev_priv = dev->dev_private;
12307         struct intel_crtc *intel_crtc;
12308         struct intel_crtc_state *crtc_state = NULL;
12309         struct drm_plane *primary = NULL;
12310         struct drm_plane *cursor = NULL;
12311         int i, ret;
12312
12313         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
12314         if (intel_crtc == NULL)
12315                 return;
12316
12317         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
12318         if (!crtc_state)
12319                 goto fail;
12320         intel_crtc_set_state(intel_crtc, crtc_state);
12321
12322         primary = intel_primary_plane_create(dev, pipe);
12323         if (!primary)
12324                 goto fail;
12325
12326         cursor = intel_cursor_plane_create(dev, pipe);
12327         if (!cursor)
12328                 goto fail;
12329
12330         ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
12331                                         cursor, &intel_crtc_funcs);
12332         if (ret)
12333                 goto fail;
12334
12335         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
12336         for (i = 0; i < 256; i++) {
12337                 intel_crtc->lut_r[i] = i;
12338                 intel_crtc->lut_g[i] = i;
12339                 intel_crtc->lut_b[i] = i;
12340         }
12341
12342         /*
12343          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
12344          * is hooked to pipe B. Hence we want plane A feeding pipe B.
12345          */
12346         intel_crtc->pipe = pipe;
12347         intel_crtc->plane = pipe;
12348         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
12349                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
12350                 intel_crtc->plane = !pipe;
12351         }
12352
12353         intel_crtc->cursor_base = ~0;
12354         intel_crtc->cursor_cntl = ~0;
12355         intel_crtc->cursor_size = ~0;
12356
12357         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
12358                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
12359         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12360         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
12361
12362         INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
12363
12364         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
12365
12366         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
12367         return;
12368
12369 fail:
12370         if (primary)
12371                 drm_plane_cleanup(primary);
12372         if (cursor)
12373                 drm_plane_cleanup(cursor);
12374         kfree(crtc_state);
12375         kfree(intel_crtc);
12376 }
12377
12378 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12379 {
12380         struct drm_encoder *encoder = connector->base.encoder;
12381         struct drm_device *dev = connector->base.dev;
12382
12383         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
12384
12385         if (!encoder || WARN_ON(!encoder->crtc))
12386                 return INVALID_PIPE;
12387
12388         return to_intel_crtc(encoder->crtc)->pipe;
12389 }
12390
12391 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12392                                 struct drm_file *file)
12393 {
12394         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
12395         struct drm_crtc *drmmode_crtc;
12396         struct intel_crtc *crtc;
12397
12398         if (!drm_core_check_feature(dev, DRIVER_MODESET))
12399                 return -ENODEV;
12400
12401         drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
12402
12403         if (!drmmode_crtc) {
12404                 DRM_ERROR("no such CRTC id\n");
12405                 return -ENOENT;
12406         }
12407
12408         crtc = to_intel_crtc(drmmode_crtc);
12409         pipe_from_crtc_id->pipe = crtc->pipe;
12410
12411         return 0;
12412 }
12413
12414 static int intel_encoder_clones(struct intel_encoder *encoder)
12415 {
12416         struct drm_device *dev = encoder->base.dev;
12417         struct intel_encoder *source_encoder;
12418         int index_mask = 0;
12419         int entry = 0;
12420
12421         for_each_intel_encoder(dev, source_encoder) {
12422                 if (encoders_cloneable(encoder, source_encoder))
12423                         index_mask |= (1 << entry);
12424
12425                 entry++;
12426         }
12427
12428         return index_mask;
12429 }
12430
12431 static bool has_edp_a(struct drm_device *dev)
12432 {
12433         struct drm_i915_private *dev_priv = dev->dev_private;
12434
12435         if (!IS_MOBILE(dev))
12436                 return false;
12437
12438         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
12439                 return false;
12440
12441         if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
12442                 return false;
12443
12444         return true;
12445 }
12446
12447 static bool intel_crt_present(struct drm_device *dev)
12448 {
12449         struct drm_i915_private *dev_priv = dev->dev_private;
12450
12451         if (INTEL_INFO(dev)->gen >= 9)
12452                 return false;
12453
12454         if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
12455                 return false;
12456
12457         if (IS_CHERRYVIEW(dev))
12458                 return false;
12459
12460         if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
12461                 return false;
12462
12463         return true;
12464 }
12465
12466 static void intel_setup_outputs(struct drm_device *dev)
12467 {
12468         struct drm_i915_private *dev_priv = dev->dev_private;
12469         struct intel_encoder *encoder;
12470         struct drm_connector *connector;
12471         bool dpd_is_edp = false;
12472
12473         intel_lvds_init(dev);
12474
12475         if (intel_crt_present(dev))
12476                 intel_crt_init(dev);
12477
12478         if (HAS_DDI(dev)) {
12479                 int found;
12480
12481                 /* Haswell uses DDI functions to detect digital outputs */
12482                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
12483                 /* DDI A only supports eDP */
12484                 if (found)
12485                         intel_ddi_init(dev, PORT_A);
12486
12487                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
12488                  * register */
12489                 found = I915_READ(SFUSE_STRAP);
12490
12491                 if (found & SFUSE_STRAP_DDIB_DETECTED)
12492                         intel_ddi_init(dev, PORT_B);
12493                 if (found & SFUSE_STRAP_DDIC_DETECTED)
12494                         intel_ddi_init(dev, PORT_C);
12495                 if (found & SFUSE_STRAP_DDID_DETECTED)
12496                         intel_ddi_init(dev, PORT_D);
12497         } else if (HAS_PCH_SPLIT(dev)) {
12498                 int found;
12499                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
12500
12501                 if (has_edp_a(dev))
12502                         intel_dp_init(dev, DP_A, PORT_A);
12503
12504                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
12505                         /* PCH SDVOB multiplex with HDMIB */
12506                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
12507                         if (!found)
12508                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
12509                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
12510                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
12511                 }
12512
12513                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
12514                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
12515
12516                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
12517                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
12518
12519                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
12520                         intel_dp_init(dev, PCH_DP_C, PORT_C);
12521
12522                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
12523                         intel_dp_init(dev, PCH_DP_D, PORT_D);
12524         } else if (IS_VALLEYVIEW(dev)) {
12525                 /*
12526                  * The DP_DETECTED bit is the latched state of the DDC
12527                  * SDA pin at boot. However since eDP doesn't require DDC
12528                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
12529                  * eDP ports may have been muxed to an alternate function.
12530                  * Thus we can't rely on the DP_DETECTED bit alone to detect
12531                  * eDP ports. Consult the VBT as well as DP_DETECTED to
12532                  * detect eDP ports.
12533                  */
12534                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED &&
12535                     !intel_dp_is_edp(dev, PORT_B))
12536                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12537                                         PORT_B);
12538                 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12539                     intel_dp_is_edp(dev, PORT_B))
12540                         intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12541
12542                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED &&
12543                     !intel_dp_is_edp(dev, PORT_C))
12544                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12545                                         PORT_C);
12546                 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12547                     intel_dp_is_edp(dev, PORT_C))
12548                         intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
12549
12550                 if (IS_CHERRYVIEW(dev)) {
12551                         if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
12552                                 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12553                                                 PORT_D);
12554                         /* eDP not supported on port D, so don't check VBT */
12555                         if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12556                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12557                 }
12558
12559                 intel_dsi_init(dev);
12560         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
12561                 bool found = false;
12562
12563                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
12564                         DRM_DEBUG_KMS("probing SDVOB\n");
12565                         found = intel_sdvo_init(dev, GEN3_SDVOB, true);
12566                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
12567                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
12568                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
12569                         }
12570
12571                         if (!found && SUPPORTS_INTEGRATED_DP(dev))
12572                                 intel_dp_init(dev, DP_B, PORT_B);
12573                 }
12574
12575                 /* Before G4X SDVOC doesn't have its own detect register */
12576
12577                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
12578                         DRM_DEBUG_KMS("probing SDVOC\n");
12579                         found = intel_sdvo_init(dev, GEN3_SDVOC, false);
12580                 }
12581
12582                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
12583
12584                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
12585                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
12586                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
12587                         }
12588                         if (SUPPORTS_INTEGRATED_DP(dev))
12589                                 intel_dp_init(dev, DP_C, PORT_C);
12590                 }
12591
12592                 if (SUPPORTS_INTEGRATED_DP(dev) &&
12593                     (I915_READ(DP_D) & DP_DETECTED))
12594                         intel_dp_init(dev, DP_D, PORT_D);
12595         } else if (IS_GEN2(dev))
12596                 intel_dvo_init(dev);
12597
12598         if (SUPPORTS_TV(dev))
12599                 intel_tv_init(dev);
12600
12601         /*
12602          * FIXME:  We don't have full atomic support yet, but we want to be
12603          * able to enable/test plane updates via the atomic interface in the
12604          * meantime.  However as soon as we flip DRIVER_ATOMIC on, the DRM core
12605          * will take some atomic codepaths to lookup properties during
12606          * drmModeGetConnector() that unconditionally dereference
12607          * connector->state.
12608          *
12609          * We create a dummy connector state here for each connector to ensure
12610          * the DRM core doesn't try to dereference a NULL connector->state.
12611          * The actual connector properties will never be updated or contain
12612          * useful information, but since we're doing this specifically for
12613          * testing/debug of the plane operations (and only when a specific
12614          * kernel module option is given), that shouldn't really matter.
12615          *
12616          * Once atomic support for crtc's + connectors lands, this loop should
12617          * be removed since we'll be setting up real connector state, which
12618          * will contain Intel-specific properties.
12619          */
12620         if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
12621                 list_for_each_entry(connector,
12622                                     &dev->mode_config.connector_list,
12623                                     head) {
12624                         if (!WARN_ON(connector->state)) {
12625                                 connector->state =
12626                                         kzalloc(sizeof(*connector->state),
12627                                                 GFP_KERNEL);
12628                         }
12629                 }
12630         }
12631
12632         intel_psr_init(dev);
12633
12634         for_each_intel_encoder(dev, encoder) {
12635                 encoder->base.possible_crtcs = encoder->crtc_mask;
12636                 encoder->base.possible_clones =
12637                         intel_encoder_clones(encoder);
12638         }
12639
12640         intel_init_pch_refclk(dev);
12641
12642         drm_helper_move_panel_connectors_to_head(dev);
12643 }
12644
12645 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
12646 {
12647         struct drm_device *dev = fb->dev;
12648         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
12649
12650         drm_framebuffer_cleanup(fb);
12651         mutex_lock(&dev->struct_mutex);
12652         WARN_ON(!intel_fb->obj->framebuffer_references--);
12653         drm_gem_object_unreference(&intel_fb->obj->base);
12654         mutex_unlock(&dev->struct_mutex);
12655         kfree(intel_fb);
12656 }
12657
12658 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
12659                                                 struct drm_file *file,
12660                                                 unsigned int *handle)
12661 {
12662         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
12663         struct drm_i915_gem_object *obj = intel_fb->obj;
12664
12665         return drm_gem_handle_create(file, &obj->base, handle);
12666 }
12667
12668 static const struct drm_framebuffer_funcs intel_fb_funcs = {
12669         .destroy = intel_user_framebuffer_destroy,
12670         .create_handle = intel_user_framebuffer_create_handle,
12671 };
12672
12673 static int intel_framebuffer_init(struct drm_device *dev,
12674                                   struct intel_framebuffer *intel_fb,
12675                                   struct drm_mode_fb_cmd2 *mode_cmd,
12676                                   struct drm_i915_gem_object *obj)
12677 {
12678         int aligned_height;
12679         int pitch_limit;
12680         int ret;
12681
12682         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12683
12684         if (obj->tiling_mode == I915_TILING_Y) {
12685                 DRM_DEBUG("hardware does not support tiling Y\n");
12686                 return -EINVAL;
12687         }
12688
12689         if (mode_cmd->pitches[0] & 63) {
12690                 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
12691                           mode_cmd->pitches[0]);
12692                 return -EINVAL;
12693         }
12694
12695         if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
12696                 pitch_limit = 32*1024;
12697         } else if (INTEL_INFO(dev)->gen >= 4) {
12698                 if (obj->tiling_mode)
12699                         pitch_limit = 16*1024;
12700                 else
12701                         pitch_limit = 32*1024;
12702         } else if (INTEL_INFO(dev)->gen >= 3) {
12703                 if (obj->tiling_mode)
12704                         pitch_limit = 8*1024;
12705                 else
12706                         pitch_limit = 16*1024;
12707         } else
12708                 /* XXX DSPC is limited to 4k tiled */
12709                 pitch_limit = 8*1024;
12710
12711         if (mode_cmd->pitches[0] > pitch_limit) {
12712                 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
12713                           obj->tiling_mode ? "tiled" : "linear",
12714                           mode_cmd->pitches[0], pitch_limit);
12715                 return -EINVAL;
12716         }
12717
12718         if (obj->tiling_mode != I915_TILING_NONE &&
12719             mode_cmd->pitches[0] != obj->stride) {
12720                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12721                           mode_cmd->pitches[0], obj->stride);
12722                 return -EINVAL;
12723         }
12724
12725         /* Reject formats not supported by any plane early. */
12726         switch (mode_cmd->pixel_format) {
12727         case DRM_FORMAT_C8:
12728         case DRM_FORMAT_RGB565:
12729         case DRM_FORMAT_XRGB8888:
12730         case DRM_FORMAT_ARGB8888:
12731                 break;
12732         case DRM_FORMAT_XRGB1555:
12733         case DRM_FORMAT_ARGB1555:
12734                 if (INTEL_INFO(dev)->gen > 3) {
12735                         DRM_DEBUG("unsupported pixel format: %s\n",
12736                                   drm_get_format_name(mode_cmd->pixel_format));
12737                         return -EINVAL;
12738                 }
12739                 break;
12740         case DRM_FORMAT_XBGR8888:
12741         case DRM_FORMAT_ABGR8888:
12742         case DRM_FORMAT_XRGB2101010:
12743         case DRM_FORMAT_ARGB2101010:
12744         case DRM_FORMAT_XBGR2101010:
12745         case DRM_FORMAT_ABGR2101010:
12746                 if (INTEL_INFO(dev)->gen < 4) {
12747                         DRM_DEBUG("unsupported pixel format: %s\n",
12748                                   drm_get_format_name(mode_cmd->pixel_format));
12749                         return -EINVAL;
12750                 }
12751                 break;
12752         case DRM_FORMAT_YUYV:
12753         case DRM_FORMAT_UYVY:
12754         case DRM_FORMAT_YVYU:
12755         case DRM_FORMAT_VYUY:
12756                 if (INTEL_INFO(dev)->gen < 5) {
12757                         DRM_DEBUG("unsupported pixel format: %s\n",
12758                                   drm_get_format_name(mode_cmd->pixel_format));
12759                         return -EINVAL;
12760                 }
12761                 break;
12762         default:
12763                 DRM_DEBUG("unsupported pixel format: %s\n",
12764                           drm_get_format_name(mode_cmd->pixel_format));
12765                 return -EINVAL;
12766         }
12767
12768         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12769         if (mode_cmd->offsets[0] != 0)
12770                 return -EINVAL;
12771
12772         aligned_height = intel_fb_align_height(dev, mode_cmd->height,
12773                                                obj->tiling_mode);
12774         /* FIXME drm helper for size checks (especially planar formats)? */
12775         if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12776                 return -EINVAL;
12777
12778         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
12779         intel_fb->obj = obj;
12780         intel_fb->obj->framebuffer_references++;
12781
12782         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
12783         if (ret) {
12784                 DRM_ERROR("framebuffer init failed %d\n", ret);
12785                 return ret;
12786         }
12787
12788         return 0;
12789 }
12790
12791 static struct drm_framebuffer *
12792 intel_user_framebuffer_create(struct drm_device *dev,
12793                               struct drm_file *filp,
12794                               struct drm_mode_fb_cmd2 *mode_cmd)
12795 {
12796         struct drm_i915_gem_object *obj;
12797
12798         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
12799                                                 mode_cmd->handles[0]));
12800         if (&obj->base == NULL)
12801                 return ERR_PTR(-ENOENT);
12802
12803         return intel_framebuffer_create(dev, mode_cmd, obj);
12804 }
12805
12806 #ifndef CONFIG_DRM_I915_FBDEV
12807 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
12808 {
12809 }
12810 #endif
12811
12812 static const struct drm_mode_config_funcs intel_mode_funcs = {
12813         .fb_create = intel_user_framebuffer_create,
12814         .output_poll_changed = intel_fbdev_output_poll_changed,
12815         .atomic_check = intel_atomic_check,
12816         .atomic_commit = intel_atomic_commit,
12817 };
12818
12819 /* Set up chip specific display functions */
12820 static void intel_init_display(struct drm_device *dev)
12821 {
12822         struct drm_i915_private *dev_priv = dev->dev_private;
12823
12824         if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
12825                 dev_priv->display.find_dpll = g4x_find_best_dpll;
12826         else if (IS_CHERRYVIEW(dev))
12827                 dev_priv->display.find_dpll = chv_find_best_dpll;
12828         else if (IS_VALLEYVIEW(dev))
12829                 dev_priv->display.find_dpll = vlv_find_best_dpll;
12830         else if (IS_PINEVIEW(dev))
12831                 dev_priv->display.find_dpll = pnv_find_best_dpll;
12832         else
12833                 dev_priv->display.find_dpll = i9xx_find_best_dpll;
12834
12835         if (INTEL_INFO(dev)->gen >= 9) {
12836                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
12837                 dev_priv->display.get_initial_plane_config =
12838                         skylake_get_initial_plane_config;
12839                 dev_priv->display.crtc_compute_clock =
12840                         haswell_crtc_compute_clock;
12841                 dev_priv->display.crtc_enable = haswell_crtc_enable;
12842                 dev_priv->display.crtc_disable = haswell_crtc_disable;
12843                 dev_priv->display.off = ironlake_crtc_off;
12844                 dev_priv->display.update_primary_plane =
12845                         skylake_update_primary_plane;
12846         } else if (HAS_DDI(dev)) {
12847                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
12848                 dev_priv->display.get_initial_plane_config =
12849                         ironlake_get_initial_plane_config;
12850                 dev_priv->display.crtc_compute_clock =
12851                         haswell_crtc_compute_clock;
12852                 dev_priv->display.crtc_enable = haswell_crtc_enable;
12853                 dev_priv->display.crtc_disable = haswell_crtc_disable;
12854                 dev_priv->display.off = ironlake_crtc_off;
12855                 dev_priv->display.update_primary_plane =
12856                         ironlake_update_primary_plane;
12857         } else if (HAS_PCH_SPLIT(dev)) {
12858                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12859                 dev_priv->display.get_initial_plane_config =
12860                         ironlake_get_initial_plane_config;
12861                 dev_priv->display.crtc_compute_clock =
12862                         ironlake_crtc_compute_clock;
12863                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
12864                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
12865                 dev_priv->display.off = ironlake_crtc_off;
12866                 dev_priv->display.update_primary_plane =
12867                         ironlake_update_primary_plane;
12868         } else if (IS_VALLEYVIEW(dev)) {
12869                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12870                 dev_priv->display.get_initial_plane_config =
12871                         i9xx_get_initial_plane_config;
12872                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12873                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12874                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12875                 dev_priv->display.off = i9xx_crtc_off;
12876                 dev_priv->display.update_primary_plane =
12877                         i9xx_update_primary_plane;
12878         } else {
12879                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12880                 dev_priv->display.get_initial_plane_config =
12881                         i9xx_get_initial_plane_config;
12882                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12883                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12884                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12885                 dev_priv->display.off = i9xx_crtc_off;
12886                 dev_priv->display.update_primary_plane =
12887                         i9xx_update_primary_plane;
12888         }
12889
12890         /* Returns the core display clock speed */
12891         if (IS_VALLEYVIEW(dev))
12892                 dev_priv->display.get_display_clock_speed =
12893                         valleyview_get_display_clock_speed;
12894         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12895                 dev_priv->display.get_display_clock_speed =
12896                         i945_get_display_clock_speed;
12897         else if (IS_I915G(dev))
12898                 dev_priv->display.get_display_clock_speed =
12899                         i915_get_display_clock_speed;
12900         else if (IS_I945GM(dev) || IS_845G(dev))
12901                 dev_priv->display.get_display_clock_speed =
12902                         i9xx_misc_get_display_clock_speed;
12903         else if (IS_PINEVIEW(dev))
12904                 dev_priv->display.get_display_clock_speed =
12905                         pnv_get_display_clock_speed;
12906         else if (IS_I915GM(dev))
12907                 dev_priv->display.get_display_clock_speed =
12908                         i915gm_get_display_clock_speed;
12909         else if (IS_I865G(dev))
12910                 dev_priv->display.get_display_clock_speed =
12911                         i865_get_display_clock_speed;
12912         else if (IS_I85X(dev))
12913                 dev_priv->display.get_display_clock_speed =
12914                         i855_get_display_clock_speed;
12915         else /* 852, 830 */
12916                 dev_priv->display.get_display_clock_speed =
12917                         i830_get_display_clock_speed;
12918
12919         if (IS_GEN5(dev)) {
12920                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12921         } else if (IS_GEN6(dev)) {
12922                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12923         } else if (IS_IVYBRIDGE(dev)) {
12924                 /* FIXME: detect B0+ stepping and use auto training */
12925                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12926                 dev_priv->display.modeset_global_resources =
12927                         ivb_modeset_global_resources;
12928         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12929                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12930         } else if (IS_VALLEYVIEW(dev)) {
12931                 dev_priv->display.modeset_global_resources =
12932                         valleyview_modeset_global_resources;
12933         }
12934
12935         /* Default just returns -ENODEV to indicate unsupported */
12936         dev_priv->display.queue_flip = intel_default_queue_flip;
12937
12938         switch (INTEL_INFO(dev)->gen) {
12939         case 2:
12940                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
12941                 break;
12942
12943         case 3:
12944                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
12945                 break;
12946
12947         case 4:
12948         case 5:
12949                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
12950                 break;
12951
12952         case 6:
12953                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
12954                 break;
12955         case 7:
12956         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
12957                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
12958                 break;
12959         case 9:
12960                 dev_priv->display.queue_flip = intel_gen9_queue_flip;
12961                 break;
12962         }
12963
12964         intel_panel_init_backlight_funcs(dev);
12965
12966         mutex_init(&dev_priv->pps_mutex);
12967 }
12968
12969 /*
12970  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12971  * resume, or other times.  This quirk makes sure that's the case for
12972  * affected systems.
12973  */
12974 static void quirk_pipea_force(struct drm_device *dev)
12975 {
12976         struct drm_i915_private *dev_priv = dev->dev_private;
12977
12978         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12979         DRM_INFO("applying pipe a force quirk\n");
12980 }
12981
12982 static void quirk_pipeb_force(struct drm_device *dev)
12983 {
12984         struct drm_i915_private *dev_priv = dev->dev_private;
12985
12986         dev_priv->quirks |= QUIRK_PIPEB_FORCE;
12987         DRM_INFO("applying pipe b force quirk\n");
12988 }
12989
12990 /*
12991  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12992  */
12993 static void quirk_ssc_force_disable(struct drm_device *dev)
12994 {
12995         struct drm_i915_private *dev_priv = dev->dev_private;
12996         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12997         DRM_INFO("applying lvds SSC disable quirk\n");
12998 }
12999
13000 /*
13001  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
13002  * brightness value
13003  */
13004 static void quirk_invert_brightness(struct drm_device *dev)
13005 {
13006         struct drm_i915_private *dev_priv = dev->dev_private;
13007         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
13008         DRM_INFO("applying inverted panel brightness quirk\n");
13009 }
13010
13011 /* Some VBT's incorrectly indicate no backlight is present */
13012 static void quirk_backlight_present(struct drm_device *dev)
13013 {
13014         struct drm_i915_private *dev_priv = dev->dev_private;
13015         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
13016         DRM_INFO("applying backlight present quirk\n");
13017 }
13018
13019 struct intel_quirk {
13020         int device;
13021         int subsystem_vendor;
13022         int subsystem_device;
13023         void (*hook)(struct drm_device *dev);
13024 };
13025
13026 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
13027 struct intel_dmi_quirk {
13028         void (*hook)(struct drm_device *dev);
13029         const struct dmi_system_id (*dmi_id_list)[];
13030 };
13031
13032 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
13033 {
13034         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
13035         return 1;
13036 }
13037
13038 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
13039         {
13040                 .dmi_id_list = &(const struct dmi_system_id[]) {
13041                         {
13042                                 .callback = intel_dmi_reverse_brightness,
13043                                 .ident = "NCR Corporation",
13044                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
13045                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
13046                                 },
13047                         },
13048                         { }  /* terminating entry */
13049                 },
13050                 .hook = quirk_invert_brightness,
13051         },
13052 };
13053
13054 static struct intel_quirk intel_quirks[] = {
13055         /* HP Mini needs pipe A force quirk (LP: #322104) */
13056         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
13057
13058         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
13059         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
13060
13061         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
13062         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
13063
13064         /* 830 needs to leave pipe A & dpll A up */
13065         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
13066
13067         /* 830 needs to leave pipe B & dpll B up */
13068         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
13069
13070         /* Lenovo U160 cannot use SSC on LVDS */
13071         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
13072
13073         /* Sony Vaio Y cannot use SSC on LVDS */
13074         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
13075
13076         /* Acer Aspire 5734Z must invert backlight brightness */
13077         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
13078
13079         /* Acer/eMachines G725 */
13080         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
13081
13082         /* Acer/eMachines e725 */
13083         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
13084
13085         /* Acer/Packard Bell NCL20 */
13086         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
13087
13088         /* Acer Aspire 4736Z */
13089         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
13090
13091         /* Acer Aspire 5336 */
13092         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
13093
13094         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
13095         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
13096
13097         /* Acer C720 Chromebook (Core i3 4005U) */
13098         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
13099
13100         /* Apple Macbook 2,1 (Core 2 T7400) */
13101         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
13102
13103         /* Toshiba CB35 Chromebook (Celeron 2955U) */
13104         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
13105
13106         /* HP Chromebook 14 (Celeron 2955U) */
13107         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
13108 };
13109
13110 static void intel_init_quirks(struct drm_device *dev)
13111 {
13112         struct pci_dev *d = dev->pdev;
13113         int i;
13114
13115         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
13116                 struct intel_quirk *q = &intel_quirks[i];
13117
13118                 if (d->device == q->device &&
13119                     (d->subsystem_vendor == q->subsystem_vendor ||
13120                      q->subsystem_vendor == PCI_ANY_ID) &&
13121                     (d->subsystem_device == q->subsystem_device ||
13122                      q->subsystem_device == PCI_ANY_ID))
13123                         q->hook(dev);
13124         }
13125         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
13126                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
13127                         intel_dmi_quirks[i].hook(dev);
13128         }
13129 }
13130
13131 /* Disable the VGA plane that we never use */
13132 static void i915_disable_vga(struct drm_device *dev)
13133 {
13134         struct drm_i915_private *dev_priv = dev->dev_private;
13135         u8 sr1;
13136         u32 vga_reg = i915_vgacntrl_reg(dev);
13137
13138         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
13139         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
13140         outb(SR01, VGA_SR_INDEX);
13141         sr1 = inb(VGA_SR_DATA);
13142         outb(sr1 | 1<<5, VGA_SR_DATA);
13143         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
13144         udelay(300);
13145
13146         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
13147         POSTING_READ(vga_reg);
13148 }
13149
13150 void intel_modeset_init_hw(struct drm_device *dev)
13151 {
13152         intel_prepare_ddi(dev);
13153
13154         if (IS_VALLEYVIEW(dev))
13155                 vlv_update_cdclk(dev);
13156
13157         intel_init_clock_gating(dev);
13158
13159         intel_enable_gt_powersave(dev);
13160 }
13161
13162 void intel_modeset_init(struct drm_device *dev)
13163 {
13164         struct drm_i915_private *dev_priv = dev->dev_private;
13165         int sprite, ret;
13166         enum pipe pipe;
13167         struct intel_crtc *crtc;
13168
13169         drm_mode_config_init(dev);
13170
13171         dev->mode_config.min_width = 0;
13172         dev->mode_config.min_height = 0;
13173
13174         dev->mode_config.preferred_depth = 24;
13175         dev->mode_config.prefer_shadow = 1;
13176
13177         dev->mode_config.funcs = &intel_mode_funcs;
13178
13179         intel_init_quirks(dev);
13180
13181         intel_init_pm(dev);
13182
13183         if (INTEL_INFO(dev)->num_pipes == 0)
13184                 return;
13185
13186         intel_init_display(dev);
13187         intel_init_audio(dev);
13188
13189         if (IS_GEN2(dev)) {
13190                 dev->mode_config.max_width = 2048;
13191                 dev->mode_config.max_height = 2048;
13192         } else if (IS_GEN3(dev)) {
13193                 dev->mode_config.max_width = 4096;
13194                 dev->mode_config.max_height = 4096;
13195         } else {
13196                 dev->mode_config.max_width = 8192;
13197                 dev->mode_config.max_height = 8192;
13198         }
13199
13200         if (IS_845G(dev) || IS_I865G(dev)) {
13201                 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
13202                 dev->mode_config.cursor_height = 1023;
13203         } else if (IS_GEN2(dev)) {
13204                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
13205                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
13206         } else {
13207                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
13208                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
13209         }
13210
13211         dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
13212
13213         DRM_DEBUG_KMS("%d display pipe%s available.\n",
13214                       INTEL_INFO(dev)->num_pipes,
13215                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
13216
13217         for_each_pipe(dev_priv, pipe) {
13218                 intel_crtc_init(dev, pipe);
13219                 for_each_sprite(pipe, sprite) {
13220                         ret = intel_plane_init(dev, pipe, sprite);
13221                         if (ret)
13222                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
13223                                               pipe_name(pipe), sprite_name(pipe, sprite), ret);
13224                 }
13225         }
13226
13227         intel_init_dpio(dev);
13228
13229         intel_shared_dpll_init(dev);
13230
13231         /* Just disable it once at startup */
13232         i915_disable_vga(dev);
13233         intel_setup_outputs(dev);
13234
13235         /* Just in case the BIOS is doing something questionable. */
13236         intel_fbc_disable(dev);
13237
13238         drm_modeset_lock_all(dev);
13239         intel_modeset_setup_hw_state(dev, false);
13240         drm_modeset_unlock_all(dev);
13241
13242         for_each_intel_crtc(dev, crtc) {
13243                 if (!crtc->active)
13244                         continue;
13245
13246                 /*
13247                  * Note that reserving the BIOS fb up front prevents us
13248                  * from stuffing other stolen allocations like the ring
13249                  * on top.  This prevents some ugliness at boot time, and
13250                  * can even allow for smooth boot transitions if the BIOS
13251                  * fb is large enough for the active pipe configuration.
13252                  */
13253                 if (dev_priv->display.get_initial_plane_config) {
13254                         dev_priv->display.get_initial_plane_config(crtc,
13255                                                            &crtc->plane_config);
13256                         /*
13257                          * If the fb is shared between multiple heads, we'll
13258                          * just get the first one.
13259                          */
13260                         intel_find_plane_obj(crtc, &crtc->plane_config);
13261                 }
13262         }
13263 }
13264
13265 static void intel_enable_pipe_a(struct drm_device *dev)
13266 {
13267         struct intel_connector *connector;
13268         struct drm_connector *crt = NULL;
13269         struct intel_load_detect_pipe load_detect_temp;
13270         struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
13271
13272         /* We can't just switch on the pipe A, we need to set things up with a
13273          * proper mode and output configuration. As a gross hack, enable pipe A
13274          * by enabling the load detect pipe once. */
13275         list_for_each_entry(connector,
13276                             &dev->mode_config.connector_list,
13277                             base.head) {
13278                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
13279                         crt = &connector->base;
13280                         break;
13281                 }
13282         }
13283
13284         if (!crt)
13285                 return;
13286
13287         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
13288                 intel_release_load_detect_pipe(crt, &load_detect_temp);
13289 }
13290
13291 static bool
13292 intel_check_plane_mapping(struct intel_crtc *crtc)
13293 {
13294         struct drm_device *dev = crtc->base.dev;
13295         struct drm_i915_private *dev_priv = dev->dev_private;
13296         u32 reg, val;
13297
13298         if (INTEL_INFO(dev)->num_pipes == 1)
13299                 return true;
13300
13301         reg = DSPCNTR(!crtc->plane);
13302         val = I915_READ(reg);
13303
13304         if ((val & DISPLAY_PLANE_ENABLE) &&
13305             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
13306                 return false;
13307
13308         return true;
13309 }
13310
13311 static void intel_sanitize_crtc(struct intel_crtc *crtc)
13312 {
13313         struct drm_device *dev = crtc->base.dev;
13314         struct drm_i915_private *dev_priv = dev->dev_private;
13315         u32 reg;
13316
13317         /* Clear any frame start delays used for debugging left by the BIOS */
13318         reg = PIPECONF(crtc->config->cpu_transcoder);
13319         I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
13320
13321         /* restore vblank interrupts to correct state */
13322         if (crtc->active) {
13323                 update_scanline_offset(crtc);
13324                 drm_vblank_on(dev, crtc->pipe);
13325         } else
13326                 drm_vblank_off(dev, crtc->pipe);
13327
13328         /* We need to sanitize the plane -> pipe mapping first because this will
13329          * disable the crtc (and hence change the state) if it is wrong. Note
13330          * that gen4+ has a fixed plane -> pipe mapping.  */
13331         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
13332                 struct intel_connector *connector;
13333                 bool plane;
13334
13335                 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
13336                               crtc->base.base.id);
13337
13338                 /* Pipe has the wrong plane attached and the plane is active.
13339                  * Temporarily change the plane mapping and disable everything
13340                  * ...  */
13341                 plane = crtc->plane;
13342                 crtc->plane = !plane;
13343                 crtc->primary_enabled = true;
13344                 dev_priv->display.crtc_disable(&crtc->base);
13345                 crtc->plane = plane;
13346
13347                 /* ... and break all links. */
13348                 list_for_each_entry(connector, &dev->mode_config.connector_list,
13349                                     base.head) {
13350                         if (connector->encoder->base.crtc != &crtc->base)
13351                                 continue;
13352
13353                         connector->base.dpms = DRM_MODE_DPMS_OFF;
13354                         connector->base.encoder = NULL;
13355                 }
13356                 /* multiple connectors may have the same encoder:
13357                  *  handle them and break crtc link separately */
13358                 list_for_each_entry(connector, &dev->mode_config.connector_list,
13359                                     base.head)
13360                         if (connector->encoder->base.crtc == &crtc->base) {
13361                                 connector->encoder->base.crtc = NULL;
13362                                 connector->encoder->connectors_active = false;
13363                         }
13364
13365                 WARN_ON(crtc->active);
13366                 crtc->base.enabled = false;
13367         }
13368
13369         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
13370             crtc->pipe == PIPE_A && !crtc->active) {
13371                 /* BIOS forgot to enable pipe A, this mostly happens after
13372                  * resume. Force-enable the pipe to fix this, the update_dpms
13373                  * call below we restore the pipe to the right state, but leave
13374                  * the required bits on. */
13375                 intel_enable_pipe_a(dev);
13376         }
13377
13378         /* Adjust the state of the output pipe according to whether we
13379          * have active connectors/encoders. */
13380         intel_crtc_update_dpms(&crtc->base);
13381
13382         if (crtc->active != crtc->base.enabled) {
13383                 struct intel_encoder *encoder;
13384
13385                 /* This can happen either due to bugs in the get_hw_state
13386                  * functions or because the pipe is force-enabled due to the
13387                  * pipe A quirk. */
13388                 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
13389                               crtc->base.base.id,
13390                               crtc->base.enabled ? "enabled" : "disabled",
13391                               crtc->active ? "enabled" : "disabled");
13392
13393                 crtc->base.enabled = crtc->active;
13394
13395                 /* Because we only establish the connector -> encoder ->
13396                  * crtc links if something is active, this means the
13397                  * crtc is now deactivated. Break the links. connector
13398                  * -> encoder links are only establish when things are
13399                  *  actually up, hence no need to break them. */
13400                 WARN_ON(crtc->active);
13401
13402                 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13403                         WARN_ON(encoder->connectors_active);
13404                         encoder->base.crtc = NULL;
13405                 }
13406         }
13407
13408         if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
13409                 /*
13410                  * We start out with underrun reporting disabled to avoid races.
13411                  * For correct bookkeeping mark this on active crtcs.
13412                  *
13413                  * Also on gmch platforms we dont have any hardware bits to
13414                  * disable the underrun reporting. Which means we need to start
13415                  * out with underrun reporting disabled also on inactive pipes,
13416                  * since otherwise we'll complain about the garbage we read when
13417                  * e.g. coming up after runtime pm.
13418                  *
13419                  * No protection against concurrent access is required - at
13420                  * worst a fifo underrun happens which also sets this to false.
13421                  */
13422                 crtc->cpu_fifo_underrun_disabled = true;
13423                 crtc->pch_fifo_underrun_disabled = true;
13424         }
13425 }
13426
13427 static void intel_sanitize_encoder(struct intel_encoder *encoder)
13428 {
13429         struct intel_connector *connector;
13430         struct drm_device *dev = encoder->base.dev;
13431
13432         /* We need to check both for a crtc link (meaning that the
13433          * encoder is active and trying to read from a pipe) and the
13434          * pipe itself being active. */
13435         bool has_active_crtc = encoder->base.crtc &&
13436                 to_intel_crtc(encoder->base.crtc)->active;
13437
13438         if (encoder->connectors_active && !has_active_crtc) {
13439                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13440                               encoder->base.base.id,
13441                               encoder->base.name);
13442
13443                 /* Connector is active, but has no active pipe. This is
13444                  * fallout from our resume register restoring. Disable
13445                  * the encoder manually again. */
13446                 if (encoder->base.crtc) {
13447                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
13448                                       encoder->base.base.id,
13449                                       encoder->base.name);
13450                         encoder->disable(encoder);
13451                         if (encoder->post_disable)
13452                                 encoder->post_disable(encoder);
13453                 }
13454                 encoder->base.crtc = NULL;
13455                 encoder->connectors_active = false;
13456
13457                 /* Inconsistent output/port/pipe state happens presumably due to
13458                  * a bug in one of the get_hw_state functions. Or someplace else
13459                  * in our code, like the register restore mess on resume. Clamp
13460                  * things to off as a safer default. */
13461                 list_for_each_entry(connector,
13462                                     &dev->mode_config.connector_list,
13463                                     base.head) {
13464                         if (connector->encoder != encoder)
13465                                 continue;
13466                         connector->base.dpms = DRM_MODE_DPMS_OFF;
13467                         connector->base.encoder = NULL;
13468                 }
13469         }
13470         /* Enabled encoders without active connectors will be fixed in
13471          * the crtc fixup. */
13472 }
13473
13474 void i915_redisable_vga_power_on(struct drm_device *dev)
13475 {
13476         struct drm_i915_private *dev_priv = dev->dev_private;
13477         u32 vga_reg = i915_vgacntrl_reg(dev);
13478
13479         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
13480                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
13481                 i915_disable_vga(dev);
13482         }
13483 }
13484
13485 void i915_redisable_vga(struct drm_device *dev)
13486 {
13487         struct drm_i915_private *dev_priv = dev->dev_private;
13488
13489         /* This function can be called both from intel_modeset_setup_hw_state or
13490          * at a very early point in our resume sequence, where the power well
13491          * structures are not yet restored. Since this function is at a very
13492          * paranoid "someone might have enabled VGA while we were not looking"
13493          * level, just check if the power well is enabled instead of trying to
13494          * follow the "don't touch the power well if we don't need it" policy
13495          * the rest of the driver uses. */
13496         if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
13497                 return;
13498
13499         i915_redisable_vga_power_on(dev);
13500 }
13501
13502 static bool primary_get_hw_state(struct intel_crtc *crtc)
13503 {
13504         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
13505
13506         if (!crtc->active)
13507                 return false;
13508
13509         return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
13510 }
13511
13512 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13513 {
13514         struct drm_i915_private *dev_priv = dev->dev_private;
13515         enum pipe pipe;
13516         struct intel_crtc *crtc;
13517         struct intel_encoder *encoder;
13518         struct intel_connector *connector;
13519         int i;
13520
13521         for_each_intel_crtc(dev, crtc) {
13522                 memset(crtc->config, 0, sizeof(*crtc->config));
13523
13524                 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
13525
13526                 crtc->active = dev_priv->display.get_pipe_config(crtc,
13527                                                                  crtc->config);
13528
13529                 crtc->base.enabled = crtc->active;
13530                 crtc->primary_enabled = primary_get_hw_state(crtc);
13531
13532                 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
13533                               crtc->base.base.id,
13534                               crtc->active ? "enabled" : "disabled");
13535         }
13536
13537         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13538                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13539
13540                 pll->on = pll->get_hw_state(dev_priv, pll,
13541                                             &pll->config.hw_state);
13542                 pll->active = 0;
13543                 pll->config.crtc_mask = 0;
13544                 for_each_intel_crtc(dev, crtc) {
13545                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
13546                                 pll->active++;
13547                                 pll->config.crtc_mask |= 1 << crtc->pipe;
13548                         }
13549                 }
13550
13551                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
13552                               pll->name, pll->config.crtc_mask, pll->on);
13553
13554                 if (pll->config.crtc_mask)
13555                         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
13556         }
13557
13558         for_each_intel_encoder(dev, encoder) {
13559                 pipe = 0;
13560
13561                 if (encoder->get_hw_state(encoder, &pipe)) {
13562                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13563                         encoder->base.crtc = &crtc->base;
13564                         encoder->get_config(encoder, crtc->config);
13565                 } else {
13566                         encoder->base.crtc = NULL;
13567                 }
13568
13569                 encoder->connectors_active = false;
13570                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13571                               encoder->base.base.id,
13572                               encoder->base.name,
13573                               encoder->base.crtc ? "enabled" : "disabled",
13574                               pipe_name(pipe));
13575         }
13576
13577         list_for_each_entry(connector, &dev->mode_config.connector_list,
13578                             base.head) {
13579                 if (connector->get_hw_state(connector)) {
13580                         connector->base.dpms = DRM_MODE_DPMS_ON;
13581                         connector->encoder->connectors_active = true;
13582                         connector->base.encoder = &connector->encoder->base;
13583                 } else {
13584                         connector->base.dpms = DRM_MODE_DPMS_OFF;
13585                         connector->base.encoder = NULL;
13586                 }
13587                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
13588                               connector->base.base.id,
13589                               connector->base.name,
13590                               connector->base.encoder ? "enabled" : "disabled");
13591         }
13592 }
13593
13594 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
13595  * and i915 state tracking structures. */
13596 void intel_modeset_setup_hw_state(struct drm_device *dev,
13597                                   bool force_restore)
13598 {
13599         struct drm_i915_private *dev_priv = dev->dev_private;
13600         enum pipe pipe;
13601         struct intel_crtc *crtc;
13602         struct intel_encoder *encoder;
13603         int i;
13604
13605         intel_modeset_readout_hw_state(dev);
13606
13607         /*
13608          * Now that we have the config, copy it to each CRTC struct
13609          * Note that this could go away if we move to using crtc_config
13610          * checking everywhere.
13611          */
13612         for_each_intel_crtc(dev, crtc) {
13613                 if (crtc->active && i915.fastboot) {
13614                         intel_mode_from_pipe_config(&crtc->base.mode,
13615                                                     crtc->config);
13616                         DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
13617                                       crtc->base.base.id);
13618                         drm_mode_debug_printmodeline(&crtc->base.mode);
13619                 }
13620         }
13621
13622         /* HW state is read out, now we need to sanitize this mess. */
13623         for_each_intel_encoder(dev, encoder) {
13624                 intel_sanitize_encoder(encoder);
13625         }
13626
13627         for_each_pipe(dev_priv, pipe) {
13628                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13629                 intel_sanitize_crtc(crtc);
13630                 intel_dump_pipe_config(crtc, crtc->config,
13631                                        "[setup_hw_state]");
13632         }
13633
13634         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13635                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13636
13637                 if (!pll->on || pll->active)
13638                         continue;
13639
13640                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
13641
13642                 pll->disable(dev_priv, pll);
13643                 pll->on = false;
13644         }
13645
13646         if (IS_GEN9(dev))
13647                 skl_wm_get_hw_state(dev);
13648         else if (HAS_PCH_SPLIT(dev))
13649                 ilk_wm_get_hw_state(dev);
13650
13651         if (force_restore) {
13652                 i915_redisable_vga(dev);
13653
13654                 /*
13655                  * We need to use raw interfaces for restoring state to avoid
13656                  * checking (bogus) intermediate states.
13657                  */
13658                 for_each_pipe(dev_priv, pipe) {
13659                         struct drm_crtc *crtc =
13660                                 dev_priv->pipe_to_crtc_mapping[pipe];
13661
13662                         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
13663                                        crtc->primary->fb);
13664                 }
13665         } else {
13666                 intel_modeset_update_staged_output_state(dev);
13667         }
13668
13669         intel_modeset_check_state(dev);
13670 }
13671
13672 void intel_modeset_gem_init(struct drm_device *dev)
13673 {
13674         struct drm_i915_private *dev_priv = dev->dev_private;
13675         struct drm_crtc *c;
13676         struct drm_i915_gem_object *obj;
13677
13678         mutex_lock(&dev->struct_mutex);
13679         intel_init_gt_powersave(dev);
13680         mutex_unlock(&dev->struct_mutex);
13681
13682         /*
13683          * There may be no VBT; and if the BIOS enabled SSC we can
13684          * just keep using it to avoid unnecessary flicker.  Whereas if the
13685          * BIOS isn't using it, don't assume it will work even if the VBT
13686          * indicates as much.
13687          */
13688         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13689                 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
13690                                                 DREF_SSC1_ENABLE);
13691
13692         intel_modeset_init_hw(dev);
13693
13694         intel_setup_overlay(dev);
13695
13696         /*
13697          * Make sure any fbs we allocated at startup are properly
13698          * pinned & fenced.  When we do the allocation it's too early
13699          * for this.
13700          */
13701         mutex_lock(&dev->struct_mutex);
13702         for_each_crtc(dev, c) {
13703                 obj = intel_fb_obj(c->primary->fb);
13704                 if (obj == NULL)
13705                         continue;
13706
13707                 if (intel_pin_and_fence_fb_obj(c->primary,
13708                                                c->primary->fb,
13709                                                NULL)) {
13710                         DRM_ERROR("failed to pin boot fb on pipe %d\n",
13711                                   to_intel_crtc(c)->pipe);
13712                         drm_framebuffer_unreference(c->primary->fb);
13713                         c->primary->fb = NULL;
13714                 }
13715         }
13716         mutex_unlock(&dev->struct_mutex);
13717
13718         intel_backlight_register(dev);
13719 }
13720
13721 void intel_connector_unregister(struct intel_connector *intel_connector)
13722 {
13723         struct drm_connector *connector = &intel_connector->base;
13724
13725         intel_panel_destroy_backlight(connector);
13726         drm_connector_unregister(connector);
13727 }
13728
13729 void intel_modeset_cleanup(struct drm_device *dev)
13730 {
13731         struct drm_i915_private *dev_priv = dev->dev_private;
13732         struct drm_connector *connector;
13733
13734         intel_disable_gt_powersave(dev);
13735
13736         intel_backlight_unregister(dev);
13737
13738         /*
13739          * Interrupts and polling as the first thing to avoid creating havoc.
13740          * Too much stuff here (turning of connectors, ...) would
13741          * experience fancy races otherwise.
13742          */
13743         intel_irq_uninstall(dev_priv);
13744
13745         /*
13746          * Due to the hpd irq storm handling the hotplug work can re-arm the
13747          * poll handlers. Hence disable polling after hpd handling is shut down.
13748          */
13749         drm_kms_helper_poll_fini(dev);
13750
13751         mutex_lock(&dev->struct_mutex);
13752
13753         intel_unregister_dsm_handler();
13754
13755         intel_fbc_disable(dev);
13756
13757         ironlake_teardown_rc6(dev);
13758
13759         mutex_unlock(&dev->struct_mutex);
13760
13761         /* flush any delayed tasks or pending work */
13762         flush_scheduled_work();
13763
13764         /* destroy the backlight and sysfs files before encoders/connectors */
13765         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
13766                 struct intel_connector *intel_connector;
13767
13768                 intel_connector = to_intel_connector(connector);
13769                 intel_connector->unregister(intel_connector);
13770         }
13771
13772         drm_mode_config_cleanup(dev);
13773
13774         intel_cleanup_overlay(dev);
13775
13776         mutex_lock(&dev->struct_mutex);
13777         intel_cleanup_gt_powersave(dev);
13778         mutex_unlock(&dev->struct_mutex);
13779 }
13780
13781 /*
13782  * Return which encoder is currently attached for connector.
13783  */
13784 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
13785 {
13786         return &intel_attached_encoder(connector)->base;
13787 }
13788
13789 void intel_connector_attach_encoder(struct intel_connector *connector,
13790                                     struct intel_encoder *encoder)
13791 {
13792         connector->encoder = encoder;
13793         drm_mode_connector_attach_encoder(&connector->base,
13794                                           &encoder->base);
13795 }
13796
13797 /*
13798  * set vga decode state - true == enable VGA decode
13799  */
13800 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
13801 {
13802         struct drm_i915_private *dev_priv = dev->dev_private;
13803         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
13804         u16 gmch_ctrl;
13805
13806         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
13807                 DRM_ERROR("failed to read control word\n");
13808                 return -EIO;
13809         }
13810
13811         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
13812                 return 0;
13813
13814         if (state)
13815                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
13816         else
13817                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
13818
13819         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
13820                 DRM_ERROR("failed to write control word\n");
13821                 return -EIO;
13822         }
13823
13824         return 0;
13825 }
13826
13827 struct intel_display_error_state {
13828
13829         u32 power_well_driver;
13830
13831         int num_transcoders;
13832
13833         struct intel_cursor_error_state {
13834                 u32 control;
13835                 u32 position;
13836                 u32 base;
13837                 u32 size;
13838         } cursor[I915_MAX_PIPES];
13839
13840         struct intel_pipe_error_state {
13841                 bool power_domain_on;
13842                 u32 source;
13843                 u32 stat;
13844         } pipe[I915_MAX_PIPES];
13845
13846         struct intel_plane_error_state {
13847                 u32 control;
13848                 u32 stride;
13849                 u32 size;
13850                 u32 pos;
13851                 u32 addr;
13852                 u32 surface;
13853                 u32 tile_offset;
13854         } plane[I915_MAX_PIPES];
13855
13856         struct intel_transcoder_error_state {
13857                 bool power_domain_on;
13858                 enum transcoder cpu_transcoder;
13859
13860                 u32 conf;
13861
13862                 u32 htotal;
13863                 u32 hblank;
13864                 u32 hsync;
13865                 u32 vtotal;
13866                 u32 vblank;
13867                 u32 vsync;
13868         } transcoder[4];
13869 };
13870
13871 struct intel_display_error_state *
13872 intel_display_capture_error_state(struct drm_device *dev)
13873 {
13874         struct drm_i915_private *dev_priv = dev->dev_private;
13875         struct intel_display_error_state *error;
13876         int transcoders[] = {
13877                 TRANSCODER_A,
13878                 TRANSCODER_B,
13879                 TRANSCODER_C,
13880                 TRANSCODER_EDP,
13881         };
13882         int i;
13883
13884         if (INTEL_INFO(dev)->num_pipes == 0)
13885                 return NULL;
13886
13887         error = kzalloc(sizeof(*error), GFP_ATOMIC);
13888         if (error == NULL)
13889                 return NULL;
13890
13891         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13892                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13893
13894         for_each_pipe(dev_priv, i) {
13895                 error->pipe[i].power_domain_on =
13896                         __intel_display_power_is_enabled(dev_priv,
13897                                                          POWER_DOMAIN_PIPE(i));
13898                 if (!error->pipe[i].power_domain_on)
13899                         continue;
13900
13901                 error->cursor[i].control = I915_READ(CURCNTR(i));
13902                 error->cursor[i].position = I915_READ(CURPOS(i));
13903                 error->cursor[i].base = I915_READ(CURBASE(i));
13904
13905                 error->plane[i].control = I915_READ(DSPCNTR(i));
13906                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
13907                 if (INTEL_INFO(dev)->gen <= 3) {
13908                         error->plane[i].size = I915_READ(DSPSIZE(i));
13909                         error->plane[i].pos = I915_READ(DSPPOS(i));
13910                 }
13911                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13912                         error->plane[i].addr = I915_READ(DSPADDR(i));
13913                 if (INTEL_INFO(dev)->gen >= 4) {
13914                         error->plane[i].surface = I915_READ(DSPSURF(i));
13915                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13916                 }
13917
13918                 error->pipe[i].source = I915_READ(PIPESRC(i));
13919
13920                 if (HAS_GMCH_DISPLAY(dev))
13921                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
13922         }
13923
13924         error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13925         if (HAS_DDI(dev_priv->dev))
13926                 error->num_transcoders++; /* Account for eDP. */
13927
13928         for (i = 0; i < error->num_transcoders; i++) {
13929                 enum transcoder cpu_transcoder = transcoders[i];
13930
13931                 error->transcoder[i].power_domain_on =
13932                         __intel_display_power_is_enabled(dev_priv,
13933                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13934                 if (!error->transcoder[i].power_domain_on)
13935                         continue;
13936
13937                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
13938
13939                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13940                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13941                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13942                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13943                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13944                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13945                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13946         }
13947
13948         return error;
13949 }
13950
13951 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13952
13953 void
13954 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13955                                 struct drm_device *dev,
13956                                 struct intel_display_error_state *error)
13957 {
13958         struct drm_i915_private *dev_priv = dev->dev_private;
13959         int i;
13960
13961         if (!error)
13962                 return;
13963
13964         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13965         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13966                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13967                            error->power_well_driver);
13968         for_each_pipe(dev_priv, i) {
13969                 err_printf(m, "Pipe [%d]:\n", i);
13970                 err_printf(m, "  Power: %s\n",
13971                            error->pipe[i].power_domain_on ? "on" : "off");
13972                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13973                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13974
13975                 err_printf(m, "Plane [%d]:\n", i);
13976                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13977                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13978                 if (INTEL_INFO(dev)->gen <= 3) {
13979                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13980                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13981                 }
13982                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13983                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13984                 if (INTEL_INFO(dev)->gen >= 4) {
13985                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13986                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13987                 }
13988
13989                 err_printf(m, "Cursor [%d]:\n", i);
13990                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13991                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13992                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13993         }
13994
13995         for (i = 0; i < error->num_transcoders; i++) {
13996                 err_printf(m, "CPU transcoder: %c\n",
13997                            transcoder_name(error->transcoder[i].cpu_transcoder));
13998                 err_printf(m, "  Power: %s\n",
13999                            error->transcoder[i].power_domain_on ? "on" : "off");
14000                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
14001                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
14002                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
14003                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
14004                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
14005                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
14006                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
14007         }
14008 }
14009
14010 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
14011 {
14012         struct intel_crtc *crtc;
14013
14014         for_each_intel_crtc(dev, crtc) {
14015                 struct intel_unpin_work *work;
14016
14017                 spin_lock_irq(&dev->event_lock);
14018
14019                 work = crtc->unpin_work;
14020
14021                 if (work && work->event &&
14022                     work->event->base.file_priv == file) {
14023                         kfree(work->event);
14024                         work->event = NULL;
14025                 }
14026
14027                 spin_unlock_irq(&dev->event_lock);
14028         }
14029 }