]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: enable ring freq scaling, RC6 and graphics turbo on Ivy Bridge v3
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/module.h>
28 #include <linux/input.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <linux/vgaarb.h>
33 #include "drmP.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "drm_dp_helper.h"
39
40 #include "drm_crtc_helper.h"
41
42 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
43
44 bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
45 static void intel_update_watermarks(struct drm_device *dev);
46 static void intel_increase_pllclock(struct drm_crtc *crtc);
47 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
48
49 typedef struct {
50     /* given values */
51     int n;
52     int m1, m2;
53     int p1, p2;
54     /* derived values */
55     int dot;
56     int vco;
57     int m;
58     int p;
59 } intel_clock_t;
60
61 typedef struct {
62     int min, max;
63 } intel_range_t;
64
65 typedef struct {
66     int dot_limit;
67     int p2_slow, p2_fast;
68 } intel_p2_t;
69
70 #define INTEL_P2_NUM                  2
71 typedef struct intel_limit intel_limit_t;
72 struct intel_limit {
73     intel_range_t   dot, vco, n, m, m1, m2, p, p1;
74     intel_p2_t      p2;
75     bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
76                       int, int, intel_clock_t *);
77 };
78
79 /* FDI */
80 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
81
82 static bool
83 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
84                     int target, int refclk, intel_clock_t *best_clock);
85 static bool
86 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87                         int target, int refclk, intel_clock_t *best_clock);
88
89 static bool
90 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
91                       int target, int refclk, intel_clock_t *best_clock);
92 static bool
93 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
94                            int target, int refclk, intel_clock_t *best_clock);
95
96 static inline u32 /* units of 100MHz */
97 intel_fdi_link_freq(struct drm_device *dev)
98 {
99         if (IS_GEN5(dev)) {
100                 struct drm_i915_private *dev_priv = dev->dev_private;
101                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
102         } else
103                 return 27;
104 }
105
106 static const intel_limit_t intel_limits_i8xx_dvo = {
107         .dot = { .min = 25000, .max = 350000 },
108         .vco = { .min = 930000, .max = 1400000 },
109         .n = { .min = 3, .max = 16 },
110         .m = { .min = 96, .max = 140 },
111         .m1 = { .min = 18, .max = 26 },
112         .m2 = { .min = 6, .max = 16 },
113         .p = { .min = 4, .max = 128 },
114         .p1 = { .min = 2, .max = 33 },
115         .p2 = { .dot_limit = 165000,
116                 .p2_slow = 4, .p2_fast = 2 },
117         .find_pll = intel_find_best_PLL,
118 };
119
120 static const intel_limit_t intel_limits_i8xx_lvds = {
121         .dot = { .min = 25000, .max = 350000 },
122         .vco = { .min = 930000, .max = 1400000 },
123         .n = { .min = 3, .max = 16 },
124         .m = { .min = 96, .max = 140 },
125         .m1 = { .min = 18, .max = 26 },
126         .m2 = { .min = 6, .max = 16 },
127         .p = { .min = 4, .max = 128 },
128         .p1 = { .min = 1, .max = 6 },
129         .p2 = { .dot_limit = 165000,
130                 .p2_slow = 14, .p2_fast = 7 },
131         .find_pll = intel_find_best_PLL,
132 };
133
134 static const intel_limit_t intel_limits_i9xx_sdvo = {
135         .dot = { .min = 20000, .max = 400000 },
136         .vco = { .min = 1400000, .max = 2800000 },
137         .n = { .min = 1, .max = 6 },
138         .m = { .min = 70, .max = 120 },
139         .m1 = { .min = 10, .max = 22 },
140         .m2 = { .min = 5, .max = 9 },
141         .p = { .min = 5, .max = 80 },
142         .p1 = { .min = 1, .max = 8 },
143         .p2 = { .dot_limit = 200000,
144                 .p2_slow = 10, .p2_fast = 5 },
145         .find_pll = intel_find_best_PLL,
146 };
147
148 static const intel_limit_t intel_limits_i9xx_lvds = {
149         .dot = { .min = 20000, .max = 400000 },
150         .vco = { .min = 1400000, .max = 2800000 },
151         .n = { .min = 1, .max = 6 },
152         .m = { .min = 70, .max = 120 },
153         .m1 = { .min = 10, .max = 22 },
154         .m2 = { .min = 5, .max = 9 },
155         .p = { .min = 7, .max = 98 },
156         .p1 = { .min = 1, .max = 8 },
157         .p2 = { .dot_limit = 112000,
158                 .p2_slow = 14, .p2_fast = 7 },
159         .find_pll = intel_find_best_PLL,
160 };
161
162
163 static const intel_limit_t intel_limits_g4x_sdvo = {
164         .dot = { .min = 25000, .max = 270000 },
165         .vco = { .min = 1750000, .max = 3500000},
166         .n = { .min = 1, .max = 4 },
167         .m = { .min = 104, .max = 138 },
168         .m1 = { .min = 17, .max = 23 },
169         .m2 = { .min = 5, .max = 11 },
170         .p = { .min = 10, .max = 30 },
171         .p1 = { .min = 1, .max = 3},
172         .p2 = { .dot_limit = 270000,
173                 .p2_slow = 10,
174                 .p2_fast = 10
175         },
176         .find_pll = intel_g4x_find_best_PLL,
177 };
178
179 static const intel_limit_t intel_limits_g4x_hdmi = {
180         .dot = { .min = 22000, .max = 400000 },
181         .vco = { .min = 1750000, .max = 3500000},
182         .n = { .min = 1, .max = 4 },
183         .m = { .min = 104, .max = 138 },
184         .m1 = { .min = 16, .max = 23 },
185         .m2 = { .min = 5, .max = 11 },
186         .p = { .min = 5, .max = 80 },
187         .p1 = { .min = 1, .max = 8},
188         .p2 = { .dot_limit = 165000,
189                 .p2_slow = 10, .p2_fast = 5 },
190         .find_pll = intel_g4x_find_best_PLL,
191 };
192
193 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
194         .dot = { .min = 20000, .max = 115000 },
195         .vco = { .min = 1750000, .max = 3500000 },
196         .n = { .min = 1, .max = 3 },
197         .m = { .min = 104, .max = 138 },
198         .m1 = { .min = 17, .max = 23 },
199         .m2 = { .min = 5, .max = 11 },
200         .p = { .min = 28, .max = 112 },
201         .p1 = { .min = 2, .max = 8 },
202         .p2 = { .dot_limit = 0,
203                 .p2_slow = 14, .p2_fast = 14
204         },
205         .find_pll = intel_g4x_find_best_PLL,
206 };
207
208 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
209         .dot = { .min = 80000, .max = 224000 },
210         .vco = { .min = 1750000, .max = 3500000 },
211         .n = { .min = 1, .max = 3 },
212         .m = { .min = 104, .max = 138 },
213         .m1 = { .min = 17, .max = 23 },
214         .m2 = { .min = 5, .max = 11 },
215         .p = { .min = 14, .max = 42 },
216         .p1 = { .min = 2, .max = 6 },
217         .p2 = { .dot_limit = 0,
218                 .p2_slow = 7, .p2_fast = 7
219         },
220         .find_pll = intel_g4x_find_best_PLL,
221 };
222
223 static const intel_limit_t intel_limits_g4x_display_port = {
224         .dot = { .min = 161670, .max = 227000 },
225         .vco = { .min = 1750000, .max = 3500000},
226         .n = { .min = 1, .max = 2 },
227         .m = { .min = 97, .max = 108 },
228         .m1 = { .min = 0x10, .max = 0x12 },
229         .m2 = { .min = 0x05, .max = 0x06 },
230         .p = { .min = 10, .max = 20 },
231         .p1 = { .min = 1, .max = 2},
232         .p2 = { .dot_limit = 0,
233                 .p2_slow = 10, .p2_fast = 10 },
234         .find_pll = intel_find_pll_g4x_dp,
235 };
236
237 static const intel_limit_t intel_limits_pineview_sdvo = {
238         .dot = { .min = 20000, .max = 400000},
239         .vco = { .min = 1700000, .max = 3500000 },
240         /* Pineview's Ncounter is a ring counter */
241         .n = { .min = 3, .max = 6 },
242         .m = { .min = 2, .max = 256 },
243         /* Pineview only has one combined m divider, which we treat as m2. */
244         .m1 = { .min = 0, .max = 0 },
245         .m2 = { .min = 0, .max = 254 },
246         .p = { .min = 5, .max = 80 },
247         .p1 = { .min = 1, .max = 8 },
248         .p2 = { .dot_limit = 200000,
249                 .p2_slow = 10, .p2_fast = 5 },
250         .find_pll = intel_find_best_PLL,
251 };
252
253 static const intel_limit_t intel_limits_pineview_lvds = {
254         .dot = { .min = 20000, .max = 400000 },
255         .vco = { .min = 1700000, .max = 3500000 },
256         .n = { .min = 3, .max = 6 },
257         .m = { .min = 2, .max = 256 },
258         .m1 = { .min = 0, .max = 0 },
259         .m2 = { .min = 0, .max = 254 },
260         .p = { .min = 7, .max = 112 },
261         .p1 = { .min = 1, .max = 8 },
262         .p2 = { .dot_limit = 112000,
263                 .p2_slow = 14, .p2_fast = 14 },
264         .find_pll = intel_find_best_PLL,
265 };
266
267 /* Ironlake / Sandybridge
268  *
269  * We calculate clock using (register_value + 2) for N/M1/M2, so here
270  * the range value for them is (actual_value - 2).
271  */
272 static const intel_limit_t intel_limits_ironlake_dac = {
273         .dot = { .min = 25000, .max = 350000 },
274         .vco = { .min = 1760000, .max = 3510000 },
275         .n = { .min = 1, .max = 5 },
276         .m = { .min = 79, .max = 127 },
277         .m1 = { .min = 12, .max = 22 },
278         .m2 = { .min = 5, .max = 9 },
279         .p = { .min = 5, .max = 80 },
280         .p1 = { .min = 1, .max = 8 },
281         .p2 = { .dot_limit = 225000,
282                 .p2_slow = 10, .p2_fast = 5 },
283         .find_pll = intel_g4x_find_best_PLL,
284 };
285
286 static const intel_limit_t intel_limits_ironlake_single_lvds = {
287         .dot = { .min = 25000, .max = 350000 },
288         .vco = { .min = 1760000, .max = 3510000 },
289         .n = { .min = 1, .max = 3 },
290         .m = { .min = 79, .max = 118 },
291         .m1 = { .min = 12, .max = 22 },
292         .m2 = { .min = 5, .max = 9 },
293         .p = { .min = 28, .max = 112 },
294         .p1 = { .min = 2, .max = 8 },
295         .p2 = { .dot_limit = 225000,
296                 .p2_slow = 14, .p2_fast = 14 },
297         .find_pll = intel_g4x_find_best_PLL,
298 };
299
300 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
301         .dot = { .min = 25000, .max = 350000 },
302         .vco = { .min = 1760000, .max = 3510000 },
303         .n = { .min = 1, .max = 3 },
304         .m = { .min = 79, .max = 127 },
305         .m1 = { .min = 12, .max = 22 },
306         .m2 = { .min = 5, .max = 9 },
307         .p = { .min = 14, .max = 56 },
308         .p1 = { .min = 2, .max = 8 },
309         .p2 = { .dot_limit = 225000,
310                 .p2_slow = 7, .p2_fast = 7 },
311         .find_pll = intel_g4x_find_best_PLL,
312 };
313
314 /* LVDS 100mhz refclk limits. */
315 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
316         .dot = { .min = 25000, .max = 350000 },
317         .vco = { .min = 1760000, .max = 3510000 },
318         .n = { .min = 1, .max = 2 },
319         .m = { .min = 79, .max = 126 },
320         .m1 = { .min = 12, .max = 22 },
321         .m2 = { .min = 5, .max = 9 },
322         .p = { .min = 28, .max = 112 },
323         .p1 = { .min = 2,.max = 8 },
324         .p2 = { .dot_limit = 225000,
325                 .p2_slow = 14, .p2_fast = 14 },
326         .find_pll = intel_g4x_find_best_PLL,
327 };
328
329 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
330         .dot = { .min = 25000, .max = 350000 },
331         .vco = { .min = 1760000, .max = 3510000 },
332         .n = { .min = 1, .max = 3 },
333         .m = { .min = 79, .max = 126 },
334         .m1 = { .min = 12, .max = 22 },
335         .m2 = { .min = 5, .max = 9 },
336         .p = { .min = 14, .max = 42 },
337         .p1 = { .min = 2,.max = 6 },
338         .p2 = { .dot_limit = 225000,
339                 .p2_slow = 7, .p2_fast = 7 },
340         .find_pll = intel_g4x_find_best_PLL,
341 };
342
343 static const intel_limit_t intel_limits_ironlake_display_port = {
344         .dot = { .min = 25000, .max = 350000 },
345         .vco = { .min = 1760000, .max = 3510000},
346         .n = { .min = 1, .max = 2 },
347         .m = { .min = 81, .max = 90 },
348         .m1 = { .min = 12, .max = 22 },
349         .m2 = { .min = 5, .max = 9 },
350         .p = { .min = 10, .max = 20 },
351         .p1 = { .min = 1, .max = 2},
352         .p2 = { .dot_limit = 0,
353                 .p2_slow = 10, .p2_fast = 10 },
354         .find_pll = intel_find_pll_ironlake_dp,
355 };
356
357 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
358                                                 int refclk)
359 {
360         struct drm_device *dev = crtc->dev;
361         struct drm_i915_private *dev_priv = dev->dev_private;
362         const intel_limit_t *limit;
363
364         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
365                 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
366                     LVDS_CLKB_POWER_UP) {
367                         /* LVDS dual channel */
368                         if (refclk == 100000)
369                                 limit = &intel_limits_ironlake_dual_lvds_100m;
370                         else
371                                 limit = &intel_limits_ironlake_dual_lvds;
372                 } else {
373                         if (refclk == 100000)
374                                 limit = &intel_limits_ironlake_single_lvds_100m;
375                         else
376                                 limit = &intel_limits_ironlake_single_lvds;
377                 }
378         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
379                         HAS_eDP)
380                 limit = &intel_limits_ironlake_display_port;
381         else
382                 limit = &intel_limits_ironlake_dac;
383
384         return limit;
385 }
386
387 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
388 {
389         struct drm_device *dev = crtc->dev;
390         struct drm_i915_private *dev_priv = dev->dev_private;
391         const intel_limit_t *limit;
392
393         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
394                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
395                     LVDS_CLKB_POWER_UP)
396                         /* LVDS with dual channel */
397                         limit = &intel_limits_g4x_dual_channel_lvds;
398                 else
399                         /* LVDS with dual channel */
400                         limit = &intel_limits_g4x_single_channel_lvds;
401         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
402                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
403                 limit = &intel_limits_g4x_hdmi;
404         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
405                 limit = &intel_limits_g4x_sdvo;
406         } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
407                 limit = &intel_limits_g4x_display_port;
408         } else /* The option is for other outputs */
409                 limit = &intel_limits_i9xx_sdvo;
410
411         return limit;
412 }
413
414 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
415 {
416         struct drm_device *dev = crtc->dev;
417         const intel_limit_t *limit;
418
419         if (HAS_PCH_SPLIT(dev))
420                 limit = intel_ironlake_limit(crtc, refclk);
421         else if (IS_G4X(dev)) {
422                 limit = intel_g4x_limit(crtc);
423         } else if (IS_PINEVIEW(dev)) {
424                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
425                         limit = &intel_limits_pineview_lvds;
426                 else
427                         limit = &intel_limits_pineview_sdvo;
428         } else if (!IS_GEN2(dev)) {
429                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
430                         limit = &intel_limits_i9xx_lvds;
431                 else
432                         limit = &intel_limits_i9xx_sdvo;
433         } else {
434                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
435                         limit = &intel_limits_i8xx_lvds;
436                 else
437                         limit = &intel_limits_i8xx_dvo;
438         }
439         return limit;
440 }
441
442 /* m1 is reserved as 0 in Pineview, n is a ring counter */
443 static void pineview_clock(int refclk, intel_clock_t *clock)
444 {
445         clock->m = clock->m2 + 2;
446         clock->p = clock->p1 * clock->p2;
447         clock->vco = refclk * clock->m / clock->n;
448         clock->dot = clock->vco / clock->p;
449 }
450
451 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
452 {
453         if (IS_PINEVIEW(dev)) {
454                 pineview_clock(refclk, clock);
455                 return;
456         }
457         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
458         clock->p = clock->p1 * clock->p2;
459         clock->vco = refclk * clock->m / (clock->n + 2);
460         clock->dot = clock->vco / clock->p;
461 }
462
463 /**
464  * Returns whether any output on the specified pipe is of the specified type
465  */
466 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
467 {
468         struct drm_device *dev = crtc->dev;
469         struct drm_mode_config *mode_config = &dev->mode_config;
470         struct intel_encoder *encoder;
471
472         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
473                 if (encoder->base.crtc == crtc && encoder->type == type)
474                         return true;
475
476         return false;
477 }
478
479 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
480 /**
481  * Returns whether the given set of divisors are valid for a given refclk with
482  * the given connectors.
483  */
484
485 static bool intel_PLL_is_valid(struct drm_device *dev,
486                                const intel_limit_t *limit,
487                                const intel_clock_t *clock)
488 {
489         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
490                 INTELPllInvalid ("p1 out of range\n");
491         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
492                 INTELPllInvalid ("p out of range\n");
493         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
494                 INTELPllInvalid ("m2 out of range\n");
495         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
496                 INTELPllInvalid ("m1 out of range\n");
497         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
498                 INTELPllInvalid ("m1 <= m2\n");
499         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
500                 INTELPllInvalid ("m out of range\n");
501         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
502                 INTELPllInvalid ("n out of range\n");
503         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
504                 INTELPllInvalid ("vco out of range\n");
505         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
506          * connector, etc., rather than just a single range.
507          */
508         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
509                 INTELPllInvalid ("dot out of range\n");
510
511         return true;
512 }
513
514 static bool
515 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
516                     int target, int refclk, intel_clock_t *best_clock)
517
518 {
519         struct drm_device *dev = crtc->dev;
520         struct drm_i915_private *dev_priv = dev->dev_private;
521         intel_clock_t clock;
522         int err = target;
523
524         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
525             (I915_READ(LVDS)) != 0) {
526                 /*
527                  * For LVDS, if the panel is on, just rely on its current
528                  * settings for dual-channel.  We haven't figured out how to
529                  * reliably set up different single/dual channel state, if we
530                  * even can.
531                  */
532                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
533                     LVDS_CLKB_POWER_UP)
534                         clock.p2 = limit->p2.p2_fast;
535                 else
536                         clock.p2 = limit->p2.p2_slow;
537         } else {
538                 if (target < limit->p2.dot_limit)
539                         clock.p2 = limit->p2.p2_slow;
540                 else
541                         clock.p2 = limit->p2.p2_fast;
542         }
543
544         memset (best_clock, 0, sizeof (*best_clock));
545
546         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
547              clock.m1++) {
548                 for (clock.m2 = limit->m2.min;
549                      clock.m2 <= limit->m2.max; clock.m2++) {
550                         /* m1 is always 0 in Pineview */
551                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
552                                 break;
553                         for (clock.n = limit->n.min;
554                              clock.n <= limit->n.max; clock.n++) {
555                                 for (clock.p1 = limit->p1.min;
556                                         clock.p1 <= limit->p1.max; clock.p1++) {
557                                         int this_err;
558
559                                         intel_clock(dev, refclk, &clock);
560                                         if (!intel_PLL_is_valid(dev, limit,
561                                                                 &clock))
562                                                 continue;
563
564                                         this_err = abs(clock.dot - target);
565                                         if (this_err < err) {
566                                                 *best_clock = clock;
567                                                 err = this_err;
568                                         }
569                                 }
570                         }
571                 }
572         }
573
574         return (err != target);
575 }
576
577 static bool
578 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
579                         int target, int refclk, intel_clock_t *best_clock)
580 {
581         struct drm_device *dev = crtc->dev;
582         struct drm_i915_private *dev_priv = dev->dev_private;
583         intel_clock_t clock;
584         int max_n;
585         bool found;
586         /* approximately equals target * 0.00585 */
587         int err_most = (target >> 8) + (target >> 9);
588         found = false;
589
590         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
591                 int lvds_reg;
592
593                 if (HAS_PCH_SPLIT(dev))
594                         lvds_reg = PCH_LVDS;
595                 else
596                         lvds_reg = LVDS;
597                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
598                     LVDS_CLKB_POWER_UP)
599                         clock.p2 = limit->p2.p2_fast;
600                 else
601                         clock.p2 = limit->p2.p2_slow;
602         } else {
603                 if (target < limit->p2.dot_limit)
604                         clock.p2 = limit->p2.p2_slow;
605                 else
606                         clock.p2 = limit->p2.p2_fast;
607         }
608
609         memset(best_clock, 0, sizeof(*best_clock));
610         max_n = limit->n.max;
611         /* based on hardware requirement, prefer smaller n to precision */
612         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
613                 /* based on hardware requirement, prefere larger m1,m2 */
614                 for (clock.m1 = limit->m1.max;
615                      clock.m1 >= limit->m1.min; clock.m1--) {
616                         for (clock.m2 = limit->m2.max;
617                              clock.m2 >= limit->m2.min; clock.m2--) {
618                                 for (clock.p1 = limit->p1.max;
619                                      clock.p1 >= limit->p1.min; clock.p1--) {
620                                         int this_err;
621
622                                         intel_clock(dev, refclk, &clock);
623                                         if (!intel_PLL_is_valid(dev, limit,
624                                                                 &clock))
625                                                 continue;
626
627                                         this_err = abs(clock.dot - target);
628                                         if (this_err < err_most) {
629                                                 *best_clock = clock;
630                                                 err_most = this_err;
631                                                 max_n = clock.n;
632                                                 found = true;
633                                         }
634                                 }
635                         }
636                 }
637         }
638         return found;
639 }
640
641 static bool
642 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
643                            int target, int refclk, intel_clock_t *best_clock)
644 {
645         struct drm_device *dev = crtc->dev;
646         intel_clock_t clock;
647
648         if (target < 200000) {
649                 clock.n = 1;
650                 clock.p1 = 2;
651                 clock.p2 = 10;
652                 clock.m1 = 12;
653                 clock.m2 = 9;
654         } else {
655                 clock.n = 2;
656                 clock.p1 = 1;
657                 clock.p2 = 10;
658                 clock.m1 = 14;
659                 clock.m2 = 8;
660         }
661         intel_clock(dev, refclk, &clock);
662         memcpy(best_clock, &clock, sizeof(intel_clock_t));
663         return true;
664 }
665
666 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
667 static bool
668 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
669                       int target, int refclk, intel_clock_t *best_clock)
670 {
671         intel_clock_t clock;
672         if (target < 200000) {
673                 clock.p1 = 2;
674                 clock.p2 = 10;
675                 clock.n = 2;
676                 clock.m1 = 23;
677                 clock.m2 = 8;
678         } else {
679                 clock.p1 = 1;
680                 clock.p2 = 10;
681                 clock.n = 1;
682                 clock.m1 = 14;
683                 clock.m2 = 2;
684         }
685         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
686         clock.p = (clock.p1 * clock.p2);
687         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
688         clock.vco = 0;
689         memcpy(best_clock, &clock, sizeof(intel_clock_t));
690         return true;
691 }
692
693 /**
694  * intel_wait_for_vblank - wait for vblank on a given pipe
695  * @dev: drm device
696  * @pipe: pipe to wait for
697  *
698  * Wait for vblank to occur on a given pipe.  Needed for various bits of
699  * mode setting code.
700  */
701 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
702 {
703         struct drm_i915_private *dev_priv = dev->dev_private;
704         int pipestat_reg = PIPESTAT(pipe);
705
706         /* Clear existing vblank status. Note this will clear any other
707          * sticky status fields as well.
708          *
709          * This races with i915_driver_irq_handler() with the result
710          * that either function could miss a vblank event.  Here it is not
711          * fatal, as we will either wait upon the next vblank interrupt or
712          * timeout.  Generally speaking intel_wait_for_vblank() is only
713          * called during modeset at which time the GPU should be idle and
714          * should *not* be performing page flips and thus not waiting on
715          * vblanks...
716          * Currently, the result of us stealing a vblank from the irq
717          * handler is that a single frame will be skipped during swapbuffers.
718          */
719         I915_WRITE(pipestat_reg,
720                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
721
722         /* Wait for vblank interrupt bit to set */
723         if (wait_for(I915_READ(pipestat_reg) &
724                      PIPE_VBLANK_INTERRUPT_STATUS,
725                      50))
726                 DRM_DEBUG_KMS("vblank wait timed out\n");
727 }
728
729 /*
730  * intel_wait_for_pipe_off - wait for pipe to turn off
731  * @dev: drm device
732  * @pipe: pipe to wait for
733  *
734  * After disabling a pipe, we can't wait for vblank in the usual way,
735  * spinning on the vblank interrupt status bit, since we won't actually
736  * see an interrupt when the pipe is disabled.
737  *
738  * On Gen4 and above:
739  *   wait for the pipe register state bit to turn off
740  *
741  * Otherwise:
742  *   wait for the display line value to settle (it usually
743  *   ends up stopping at the start of the next frame).
744  *
745  */
746 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
747 {
748         struct drm_i915_private *dev_priv = dev->dev_private;
749
750         if (INTEL_INFO(dev)->gen >= 4) {
751                 int reg = PIPECONF(pipe);
752
753                 /* Wait for the Pipe State to go off */
754                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
755                              100))
756                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
757         } else {
758                 u32 last_line;
759                 int reg = PIPEDSL(pipe);
760                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
761
762                 /* Wait for the display line to settle */
763                 do {
764                         last_line = I915_READ(reg) & DSL_LINEMASK;
765                         mdelay(5);
766                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
767                          time_after(timeout, jiffies));
768                 if (time_after(jiffies, timeout))
769                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
770         }
771 }
772
773 static const char *state_string(bool enabled)
774 {
775         return enabled ? "on" : "off";
776 }
777
778 /* Only for pre-ILK configs */
779 static void assert_pll(struct drm_i915_private *dev_priv,
780                        enum pipe pipe, bool state)
781 {
782         int reg;
783         u32 val;
784         bool cur_state;
785
786         reg = DPLL(pipe);
787         val = I915_READ(reg);
788         cur_state = !!(val & DPLL_VCO_ENABLE);
789         WARN(cur_state != state,
790              "PLL state assertion failure (expected %s, current %s)\n",
791              state_string(state), state_string(cur_state));
792 }
793 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
794 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
795
796 /* For ILK+ */
797 static void assert_pch_pll(struct drm_i915_private *dev_priv,
798                            enum pipe pipe, bool state)
799 {
800         int reg;
801         u32 val;
802         bool cur_state;
803
804         reg = PCH_DPLL(pipe);
805         val = I915_READ(reg);
806         cur_state = !!(val & DPLL_VCO_ENABLE);
807         WARN(cur_state != state,
808              "PCH PLL state assertion failure (expected %s, current %s)\n",
809              state_string(state), state_string(cur_state));
810 }
811 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
812 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
813
814 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
815                           enum pipe pipe, bool state)
816 {
817         int reg;
818         u32 val;
819         bool cur_state;
820
821         reg = FDI_TX_CTL(pipe);
822         val = I915_READ(reg);
823         cur_state = !!(val & FDI_TX_ENABLE);
824         WARN(cur_state != state,
825              "FDI TX state assertion failure (expected %s, current %s)\n",
826              state_string(state), state_string(cur_state));
827 }
828 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
829 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
830
831 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
832                           enum pipe pipe, bool state)
833 {
834         int reg;
835         u32 val;
836         bool cur_state;
837
838         reg = FDI_RX_CTL(pipe);
839         val = I915_READ(reg);
840         cur_state = !!(val & FDI_RX_ENABLE);
841         WARN(cur_state != state,
842              "FDI RX state assertion failure (expected %s, current %s)\n",
843              state_string(state), state_string(cur_state));
844 }
845 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
846 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
847
848 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
849                                       enum pipe pipe)
850 {
851         int reg;
852         u32 val;
853
854         /* ILK FDI PLL is always enabled */
855         if (dev_priv->info->gen == 5)
856                 return;
857
858         reg = FDI_TX_CTL(pipe);
859         val = I915_READ(reg);
860         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
861 }
862
863 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
864                                       enum pipe pipe)
865 {
866         int reg;
867         u32 val;
868
869         reg = FDI_RX_CTL(pipe);
870         val = I915_READ(reg);
871         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
872 }
873
874 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
875                                   enum pipe pipe)
876 {
877         int pp_reg, lvds_reg;
878         u32 val;
879         enum pipe panel_pipe = PIPE_A;
880         bool locked = locked;
881
882         if (HAS_PCH_SPLIT(dev_priv->dev)) {
883                 pp_reg = PCH_PP_CONTROL;
884                 lvds_reg = PCH_LVDS;
885         } else {
886                 pp_reg = PP_CONTROL;
887                 lvds_reg = LVDS;
888         }
889
890         val = I915_READ(pp_reg);
891         if (!(val & PANEL_POWER_ON) ||
892             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
893                 locked = false;
894
895         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
896                 panel_pipe = PIPE_B;
897
898         WARN(panel_pipe == pipe && locked,
899              "panel assertion failure, pipe %c regs locked\n",
900              pipe_name(pipe));
901 }
902
903 static void assert_pipe(struct drm_i915_private *dev_priv,
904                         enum pipe pipe, bool state)
905 {
906         int reg;
907         u32 val;
908         bool cur_state;
909
910         reg = PIPECONF(pipe);
911         val = I915_READ(reg);
912         cur_state = !!(val & PIPECONF_ENABLE);
913         WARN(cur_state != state,
914              "pipe %c assertion failure (expected %s, current %s)\n",
915              pipe_name(pipe), state_string(state), state_string(cur_state));
916 }
917 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
918 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
919
920 static void assert_plane_enabled(struct drm_i915_private *dev_priv,
921                                  enum plane plane)
922 {
923         int reg;
924         u32 val;
925
926         reg = DSPCNTR(plane);
927         val = I915_READ(reg);
928         WARN(!(val & DISPLAY_PLANE_ENABLE),
929              "plane %c assertion failure, should be active but is disabled\n",
930              plane_name(plane));
931 }
932
933 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
934                                    enum pipe pipe)
935 {
936         int reg, i;
937         u32 val;
938         int cur_pipe;
939
940         /* Planes are fixed to pipes on ILK+ */
941         if (HAS_PCH_SPLIT(dev_priv->dev))
942                 return;
943
944         /* Need to check both planes against the pipe */
945         for (i = 0; i < 2; i++) {
946                 reg = DSPCNTR(i);
947                 val = I915_READ(reg);
948                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
949                         DISPPLANE_SEL_PIPE_SHIFT;
950                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
951                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
952                      plane_name(i), pipe_name(pipe));
953         }
954 }
955
956 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
957 {
958         u32 val;
959         bool enabled;
960
961         val = I915_READ(PCH_DREF_CONTROL);
962         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
963                             DREF_SUPERSPREAD_SOURCE_MASK));
964         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
965 }
966
967 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
968                                        enum pipe pipe)
969 {
970         int reg;
971         u32 val;
972         bool enabled;
973
974         reg = TRANSCONF(pipe);
975         val = I915_READ(reg);
976         enabled = !!(val & TRANS_ENABLE);
977         WARN(enabled,
978              "transcoder assertion failed, should be off on pipe %c but is still active\n",
979              pipe_name(pipe));
980 }
981
982 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
983                                    enum pipe pipe, int reg)
984 {
985         u32 val = I915_READ(reg);
986         WARN(DP_PIPE_ENABLED(val, pipe),
987              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
988              reg, pipe_name(pipe));
989 }
990
991 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
992                                      enum pipe pipe, int reg)
993 {
994         u32 val = I915_READ(reg);
995         WARN(HDMI_PIPE_ENABLED(val, pipe),
996              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
997              reg, pipe_name(pipe));
998 }
999
1000 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1001                                       enum pipe pipe)
1002 {
1003         int reg;
1004         u32 val;
1005
1006         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
1007         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
1008         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
1009
1010         reg = PCH_ADPA;
1011         val = I915_READ(reg);
1012         WARN(ADPA_PIPE_ENABLED(val, pipe),
1013              "PCH VGA enabled on transcoder %c, should be disabled\n",
1014              pipe_name(pipe));
1015
1016         reg = PCH_LVDS;
1017         val = I915_READ(reg);
1018         WARN(LVDS_PIPE_ENABLED(val, pipe),
1019              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1020              pipe_name(pipe));
1021
1022         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1023         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1024         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1025 }
1026
1027 /**
1028  * intel_enable_pll - enable a PLL
1029  * @dev_priv: i915 private structure
1030  * @pipe: pipe PLL to enable
1031  *
1032  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1033  * make sure the PLL reg is writable first though, since the panel write
1034  * protect mechanism may be enabled.
1035  *
1036  * Note!  This is for pre-ILK only.
1037  */
1038 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1039 {
1040         int reg;
1041         u32 val;
1042
1043         /* No really, not for ILK+ */
1044         BUG_ON(dev_priv->info->gen >= 5);
1045
1046         /* PLL is protected by panel, make sure we can write it */
1047         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1048                 assert_panel_unlocked(dev_priv, pipe);
1049
1050         reg = DPLL(pipe);
1051         val = I915_READ(reg);
1052         val |= DPLL_VCO_ENABLE;
1053
1054         /* We do this three times for luck */
1055         I915_WRITE(reg, val);
1056         POSTING_READ(reg);
1057         udelay(150); /* wait for warmup */
1058         I915_WRITE(reg, val);
1059         POSTING_READ(reg);
1060         udelay(150); /* wait for warmup */
1061         I915_WRITE(reg, val);
1062         POSTING_READ(reg);
1063         udelay(150); /* wait for warmup */
1064 }
1065
1066 /**
1067  * intel_disable_pll - disable a PLL
1068  * @dev_priv: i915 private structure
1069  * @pipe: pipe PLL to disable
1070  *
1071  * Disable the PLL for @pipe, making sure the pipe is off first.
1072  *
1073  * Note!  This is for pre-ILK only.
1074  */
1075 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1076 {
1077         int reg;
1078         u32 val;
1079
1080         /* Don't disable pipe A or pipe A PLLs if needed */
1081         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1082                 return;
1083
1084         /* Make sure the pipe isn't still relying on us */
1085         assert_pipe_disabled(dev_priv, pipe);
1086
1087         reg = DPLL(pipe);
1088         val = I915_READ(reg);
1089         val &= ~DPLL_VCO_ENABLE;
1090         I915_WRITE(reg, val);
1091         POSTING_READ(reg);
1092 }
1093
1094 /**
1095  * intel_enable_pch_pll - enable PCH PLL
1096  * @dev_priv: i915 private structure
1097  * @pipe: pipe PLL to enable
1098  *
1099  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1100  * drives the transcoder clock.
1101  */
1102 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1103                                  enum pipe pipe)
1104 {
1105         int reg;
1106         u32 val;
1107
1108         /* PCH only available on ILK+ */
1109         BUG_ON(dev_priv->info->gen < 5);
1110
1111         /* PCH refclock must be enabled first */
1112         assert_pch_refclk_enabled(dev_priv);
1113
1114         reg = PCH_DPLL(pipe);
1115         val = I915_READ(reg);
1116         val |= DPLL_VCO_ENABLE;
1117         I915_WRITE(reg, val);
1118         POSTING_READ(reg);
1119         udelay(200);
1120 }
1121
1122 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1123                                   enum pipe pipe)
1124 {
1125         int reg;
1126         u32 val;
1127
1128         /* PCH only available on ILK+ */
1129         BUG_ON(dev_priv->info->gen < 5);
1130
1131         /* Make sure transcoder isn't still depending on us */
1132         assert_transcoder_disabled(dev_priv, pipe);
1133
1134         reg = PCH_DPLL(pipe);
1135         val = I915_READ(reg);
1136         val &= ~DPLL_VCO_ENABLE;
1137         I915_WRITE(reg, val);
1138         POSTING_READ(reg);
1139         udelay(200);
1140 }
1141
1142 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1143                                     enum pipe pipe)
1144 {
1145         int reg;
1146         u32 val;
1147
1148         /* PCH only available on ILK+ */
1149         BUG_ON(dev_priv->info->gen < 5);
1150
1151         /* Make sure PCH DPLL is enabled */
1152         assert_pch_pll_enabled(dev_priv, pipe);
1153
1154         /* FDI must be feeding us bits for PCH ports */
1155         assert_fdi_tx_enabled(dev_priv, pipe);
1156         assert_fdi_rx_enabled(dev_priv, pipe);
1157
1158         reg = TRANSCONF(pipe);
1159         val = I915_READ(reg);
1160         /*
1161          * make the BPC in transcoder be consistent with
1162          * that in pipeconf reg.
1163          */
1164         val &= ~PIPE_BPC_MASK;
1165         val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1166         I915_WRITE(reg, val | TRANS_ENABLE);
1167         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1168                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1169 }
1170
1171 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1172                                      enum pipe pipe)
1173 {
1174         int reg;
1175         u32 val;
1176
1177         /* FDI relies on the transcoder */
1178         assert_fdi_tx_disabled(dev_priv, pipe);
1179         assert_fdi_rx_disabled(dev_priv, pipe);
1180
1181         /* Ports must be off as well */
1182         assert_pch_ports_disabled(dev_priv, pipe);
1183
1184         reg = TRANSCONF(pipe);
1185         val = I915_READ(reg);
1186         val &= ~TRANS_ENABLE;
1187         I915_WRITE(reg, val);
1188         /* wait for PCH transcoder off, transcoder state */
1189         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1190                 DRM_ERROR("failed to disable transcoder\n");
1191 }
1192
1193 /**
1194  * intel_enable_pipe - enable a pipe, asserting requirements
1195  * @dev_priv: i915 private structure
1196  * @pipe: pipe to enable
1197  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1198  *
1199  * Enable @pipe, making sure that various hardware specific requirements
1200  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1201  *
1202  * @pipe should be %PIPE_A or %PIPE_B.
1203  *
1204  * Will wait until the pipe is actually running (i.e. first vblank) before
1205  * returning.
1206  */
1207 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1208                               bool pch_port)
1209 {
1210         int reg;
1211         u32 val;
1212
1213         /*
1214          * A pipe without a PLL won't actually be able to drive bits from
1215          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1216          * need the check.
1217          */
1218         if (!HAS_PCH_SPLIT(dev_priv->dev))
1219                 assert_pll_enabled(dev_priv, pipe);
1220         else {
1221                 if (pch_port) {
1222                         /* if driving the PCH, we need FDI enabled */
1223                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1224                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1225                 }
1226                 /* FIXME: assert CPU port conditions for SNB+ */
1227         }
1228
1229         reg = PIPECONF(pipe);
1230         val = I915_READ(reg);
1231         if (val & PIPECONF_ENABLE)
1232                 return;
1233
1234         I915_WRITE(reg, val | PIPECONF_ENABLE);
1235         intel_wait_for_vblank(dev_priv->dev, pipe);
1236 }
1237
1238 /**
1239  * intel_disable_pipe - disable a pipe, asserting requirements
1240  * @dev_priv: i915 private structure
1241  * @pipe: pipe to disable
1242  *
1243  * Disable @pipe, making sure that various hardware specific requirements
1244  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1245  *
1246  * @pipe should be %PIPE_A or %PIPE_B.
1247  *
1248  * Will wait until the pipe has shut down before returning.
1249  */
1250 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1251                                enum pipe pipe)
1252 {
1253         int reg;
1254         u32 val;
1255
1256         /*
1257          * Make sure planes won't keep trying to pump pixels to us,
1258          * or we might hang the display.
1259          */
1260         assert_planes_disabled(dev_priv, pipe);
1261
1262         /* Don't disable pipe A or pipe A PLLs if needed */
1263         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1264                 return;
1265
1266         reg = PIPECONF(pipe);
1267         val = I915_READ(reg);
1268         if ((val & PIPECONF_ENABLE) == 0)
1269                 return;
1270
1271         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1272         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1273 }
1274
1275 /**
1276  * intel_enable_plane - enable a display plane on a given pipe
1277  * @dev_priv: i915 private structure
1278  * @plane: plane to enable
1279  * @pipe: pipe being fed
1280  *
1281  * Enable @plane on @pipe, making sure that @pipe is running first.
1282  */
1283 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1284                                enum plane plane, enum pipe pipe)
1285 {
1286         int reg;
1287         u32 val;
1288
1289         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1290         assert_pipe_enabled(dev_priv, pipe);
1291
1292         reg = DSPCNTR(plane);
1293         val = I915_READ(reg);
1294         if (val & DISPLAY_PLANE_ENABLE)
1295                 return;
1296
1297         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1298         intel_wait_for_vblank(dev_priv->dev, pipe);
1299 }
1300
1301 /*
1302  * Plane regs are double buffered, going from enabled->disabled needs a
1303  * trigger in order to latch.  The display address reg provides this.
1304  */
1305 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1306                                       enum plane plane)
1307 {
1308         u32 reg = DSPADDR(plane);
1309         I915_WRITE(reg, I915_READ(reg));
1310 }
1311
1312 /**
1313  * intel_disable_plane - disable a display plane
1314  * @dev_priv: i915 private structure
1315  * @plane: plane to disable
1316  * @pipe: pipe consuming the data
1317  *
1318  * Disable @plane; should be an independent operation.
1319  */
1320 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1321                                 enum plane plane, enum pipe pipe)
1322 {
1323         int reg;
1324         u32 val;
1325
1326         reg = DSPCNTR(plane);
1327         val = I915_READ(reg);
1328         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1329                 return;
1330
1331         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1332         intel_flush_display_plane(dev_priv, plane);
1333         intel_wait_for_vblank(dev_priv->dev, pipe);
1334 }
1335
1336 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1337                            enum pipe pipe, int reg)
1338 {
1339         u32 val = I915_READ(reg);
1340         if (DP_PIPE_ENABLED(val, pipe))
1341                 I915_WRITE(reg, val & ~DP_PORT_EN);
1342 }
1343
1344 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1345                              enum pipe pipe, int reg)
1346 {
1347         u32 val = I915_READ(reg);
1348         if (HDMI_PIPE_ENABLED(val, pipe))
1349                 I915_WRITE(reg, val & ~PORT_ENABLE);
1350 }
1351
1352 /* Disable any ports connected to this transcoder */
1353 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1354                                     enum pipe pipe)
1355 {
1356         u32 reg, val;
1357
1358         val = I915_READ(PCH_PP_CONTROL);
1359         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1360
1361         disable_pch_dp(dev_priv, pipe, PCH_DP_B);
1362         disable_pch_dp(dev_priv, pipe, PCH_DP_C);
1363         disable_pch_dp(dev_priv, pipe, PCH_DP_D);
1364
1365         reg = PCH_ADPA;
1366         val = I915_READ(reg);
1367         if (ADPA_PIPE_ENABLED(val, pipe))
1368                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1369
1370         reg = PCH_LVDS;
1371         val = I915_READ(reg);
1372         if (LVDS_PIPE_ENABLED(val, pipe)) {
1373                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1374                 POSTING_READ(reg);
1375                 udelay(100);
1376         }
1377
1378         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1379         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1380         disable_pch_hdmi(dev_priv, pipe, HDMID);
1381 }
1382
1383 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1384 {
1385         struct drm_device *dev = crtc->dev;
1386         struct drm_i915_private *dev_priv = dev->dev_private;
1387         struct drm_framebuffer *fb = crtc->fb;
1388         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1389         struct drm_i915_gem_object *obj = intel_fb->obj;
1390         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1391         int plane, i;
1392         u32 fbc_ctl, fbc_ctl2;
1393
1394         if (fb->pitch == dev_priv->cfb_pitch &&
1395             obj->fence_reg == dev_priv->cfb_fence &&
1396             intel_crtc->plane == dev_priv->cfb_plane &&
1397             I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1398                 return;
1399
1400         i8xx_disable_fbc(dev);
1401
1402         dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1403
1404         if (fb->pitch < dev_priv->cfb_pitch)
1405                 dev_priv->cfb_pitch = fb->pitch;
1406
1407         /* FBC_CTL wants 64B units */
1408         dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1409         dev_priv->cfb_fence = obj->fence_reg;
1410         dev_priv->cfb_plane = intel_crtc->plane;
1411         plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1412
1413         /* Clear old tags */
1414         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1415                 I915_WRITE(FBC_TAG + (i * 4), 0);
1416
1417         /* Set it up... */
1418         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
1419         if (obj->tiling_mode != I915_TILING_NONE)
1420                 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1421         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1422         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1423
1424         /* enable it... */
1425         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1426         if (IS_I945GM(dev))
1427                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1428         fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1429         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1430         if (obj->tiling_mode != I915_TILING_NONE)
1431                 fbc_ctl |= dev_priv->cfb_fence;
1432         I915_WRITE(FBC_CONTROL, fbc_ctl);
1433
1434         DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1435                       dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1436 }
1437
1438 void i8xx_disable_fbc(struct drm_device *dev)
1439 {
1440         struct drm_i915_private *dev_priv = dev->dev_private;
1441         u32 fbc_ctl;
1442
1443         /* Disable compression */
1444         fbc_ctl = I915_READ(FBC_CONTROL);
1445         if ((fbc_ctl & FBC_CTL_EN) == 0)
1446                 return;
1447
1448         fbc_ctl &= ~FBC_CTL_EN;
1449         I915_WRITE(FBC_CONTROL, fbc_ctl);
1450
1451         /* Wait for compressing bit to clear */
1452         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1453                 DRM_DEBUG_KMS("FBC idle timed out\n");
1454                 return;
1455         }
1456
1457         DRM_DEBUG_KMS("disabled FBC\n");
1458 }
1459
1460 static bool i8xx_fbc_enabled(struct drm_device *dev)
1461 {
1462         struct drm_i915_private *dev_priv = dev->dev_private;
1463
1464         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1465 }
1466
1467 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1468 {
1469         struct drm_device *dev = crtc->dev;
1470         struct drm_i915_private *dev_priv = dev->dev_private;
1471         struct drm_framebuffer *fb = crtc->fb;
1472         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1473         struct drm_i915_gem_object *obj = intel_fb->obj;
1474         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1475         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1476         unsigned long stall_watermark = 200;
1477         u32 dpfc_ctl;
1478
1479         dpfc_ctl = I915_READ(DPFC_CONTROL);
1480         if (dpfc_ctl & DPFC_CTL_EN) {
1481                 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1482                     dev_priv->cfb_fence == obj->fence_reg &&
1483                     dev_priv->cfb_plane == intel_crtc->plane &&
1484                     dev_priv->cfb_y == crtc->y)
1485                         return;
1486
1487                 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1488                 intel_wait_for_vblank(dev, intel_crtc->pipe);
1489         }
1490
1491         dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1492         dev_priv->cfb_fence = obj->fence_reg;
1493         dev_priv->cfb_plane = intel_crtc->plane;
1494         dev_priv->cfb_y = crtc->y;
1495
1496         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1497         if (obj->tiling_mode != I915_TILING_NONE) {
1498                 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1499                 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1500         } else {
1501                 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1502         }
1503
1504         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1505                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1506                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1507         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1508
1509         /* enable it... */
1510         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1511
1512         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1513 }
1514
1515 void g4x_disable_fbc(struct drm_device *dev)
1516 {
1517         struct drm_i915_private *dev_priv = dev->dev_private;
1518         u32 dpfc_ctl;
1519
1520         /* Disable compression */
1521         dpfc_ctl = I915_READ(DPFC_CONTROL);
1522         if (dpfc_ctl & DPFC_CTL_EN) {
1523                 dpfc_ctl &= ~DPFC_CTL_EN;
1524                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1525
1526                 DRM_DEBUG_KMS("disabled FBC\n");
1527         }
1528 }
1529
1530 static bool g4x_fbc_enabled(struct drm_device *dev)
1531 {
1532         struct drm_i915_private *dev_priv = dev->dev_private;
1533
1534         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1535 }
1536
1537 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1538 {
1539         struct drm_i915_private *dev_priv = dev->dev_private;
1540         u32 blt_ecoskpd;
1541
1542         /* Make sure blitter notifies FBC of writes */
1543         gen6_gt_force_wake_get(dev_priv);
1544         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1545         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1546                 GEN6_BLITTER_LOCK_SHIFT;
1547         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1548         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1549         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1550         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1551                          GEN6_BLITTER_LOCK_SHIFT);
1552         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1553         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1554         gen6_gt_force_wake_put(dev_priv);
1555 }
1556
1557 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1558 {
1559         struct drm_device *dev = crtc->dev;
1560         struct drm_i915_private *dev_priv = dev->dev_private;
1561         struct drm_framebuffer *fb = crtc->fb;
1562         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1563         struct drm_i915_gem_object *obj = intel_fb->obj;
1564         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1565         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1566         unsigned long stall_watermark = 200;
1567         u32 dpfc_ctl;
1568
1569         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1570         if (dpfc_ctl & DPFC_CTL_EN) {
1571                 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1572                     dev_priv->cfb_fence == obj->fence_reg &&
1573                     dev_priv->cfb_plane == intel_crtc->plane &&
1574                     dev_priv->cfb_offset == obj->gtt_offset &&
1575                     dev_priv->cfb_y == crtc->y)
1576                         return;
1577
1578                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1579                 intel_wait_for_vblank(dev, intel_crtc->pipe);
1580         }
1581
1582         dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1583         dev_priv->cfb_fence = obj->fence_reg;
1584         dev_priv->cfb_plane = intel_crtc->plane;
1585         dev_priv->cfb_offset = obj->gtt_offset;
1586         dev_priv->cfb_y = crtc->y;
1587
1588         dpfc_ctl &= DPFC_RESERVED;
1589         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1590         if (obj->tiling_mode != I915_TILING_NONE) {
1591                 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1592                 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1593         } else {
1594                 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1595         }
1596
1597         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1598                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1599                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1600         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1601         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1602         /* enable it... */
1603         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1604
1605         if (IS_GEN6(dev)) {
1606                 I915_WRITE(SNB_DPFC_CTL_SA,
1607                            SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1608                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1609                 sandybridge_blit_fbc_update(dev);
1610         }
1611
1612         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1613 }
1614
1615 void ironlake_disable_fbc(struct drm_device *dev)
1616 {
1617         struct drm_i915_private *dev_priv = dev->dev_private;
1618         u32 dpfc_ctl;
1619
1620         /* Disable compression */
1621         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1622         if (dpfc_ctl & DPFC_CTL_EN) {
1623                 dpfc_ctl &= ~DPFC_CTL_EN;
1624                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1625
1626                 DRM_DEBUG_KMS("disabled FBC\n");
1627         }
1628 }
1629
1630 static bool ironlake_fbc_enabled(struct drm_device *dev)
1631 {
1632         struct drm_i915_private *dev_priv = dev->dev_private;
1633
1634         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1635 }
1636
1637 bool intel_fbc_enabled(struct drm_device *dev)
1638 {
1639         struct drm_i915_private *dev_priv = dev->dev_private;
1640
1641         if (!dev_priv->display.fbc_enabled)
1642                 return false;
1643
1644         return dev_priv->display.fbc_enabled(dev);
1645 }
1646
1647 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1648 {
1649         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1650
1651         if (!dev_priv->display.enable_fbc)
1652                 return;
1653
1654         dev_priv->display.enable_fbc(crtc, interval);
1655 }
1656
1657 void intel_disable_fbc(struct drm_device *dev)
1658 {
1659         struct drm_i915_private *dev_priv = dev->dev_private;
1660
1661         if (!dev_priv->display.disable_fbc)
1662                 return;
1663
1664         dev_priv->display.disable_fbc(dev);
1665 }
1666
1667 /**
1668  * intel_update_fbc - enable/disable FBC as needed
1669  * @dev: the drm_device
1670  *
1671  * Set up the framebuffer compression hardware at mode set time.  We
1672  * enable it if possible:
1673  *   - plane A only (on pre-965)
1674  *   - no pixel mulitply/line duplication
1675  *   - no alpha buffer discard
1676  *   - no dual wide
1677  *   - framebuffer <= 2048 in width, 1536 in height
1678  *
1679  * We can't assume that any compression will take place (worst case),
1680  * so the compressed buffer has to be the same size as the uncompressed
1681  * one.  It also must reside (along with the line length buffer) in
1682  * stolen memory.
1683  *
1684  * We need to enable/disable FBC on a global basis.
1685  */
1686 static void intel_update_fbc(struct drm_device *dev)
1687 {
1688         struct drm_i915_private *dev_priv = dev->dev_private;
1689         struct drm_crtc *crtc = NULL, *tmp_crtc;
1690         struct intel_crtc *intel_crtc;
1691         struct drm_framebuffer *fb;
1692         struct intel_framebuffer *intel_fb;
1693         struct drm_i915_gem_object *obj;
1694
1695         DRM_DEBUG_KMS("\n");
1696
1697         if (!i915_powersave)
1698                 return;
1699
1700         if (!I915_HAS_FBC(dev))
1701                 return;
1702
1703         /*
1704          * If FBC is already on, we just have to verify that we can
1705          * keep it that way...
1706          * Need to disable if:
1707          *   - more than one pipe is active
1708          *   - changing FBC params (stride, fence, mode)
1709          *   - new fb is too large to fit in compressed buffer
1710          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1711          */
1712         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1713                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1714                         if (crtc) {
1715                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1716                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1717                                 goto out_disable;
1718                         }
1719                         crtc = tmp_crtc;
1720                 }
1721         }
1722
1723         if (!crtc || crtc->fb == NULL) {
1724                 DRM_DEBUG_KMS("no output, disabling\n");
1725                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1726                 goto out_disable;
1727         }
1728
1729         intel_crtc = to_intel_crtc(crtc);
1730         fb = crtc->fb;
1731         intel_fb = to_intel_framebuffer(fb);
1732         obj = intel_fb->obj;
1733
1734         if (!i915_enable_fbc) {
1735                 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
1736                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1737                 goto out_disable;
1738         }
1739         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1740                 DRM_DEBUG_KMS("framebuffer too large, disabling "
1741                               "compression\n");
1742                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1743                 goto out_disable;
1744         }
1745         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1746             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1747                 DRM_DEBUG_KMS("mode incompatible with compression, "
1748                               "disabling\n");
1749                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1750                 goto out_disable;
1751         }
1752         if ((crtc->mode.hdisplay > 2048) ||
1753             (crtc->mode.vdisplay > 1536)) {
1754                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1755                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1756                 goto out_disable;
1757         }
1758         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1759                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1760                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1761                 goto out_disable;
1762         }
1763         if (obj->tiling_mode != I915_TILING_X) {
1764                 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1765                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1766                 goto out_disable;
1767         }
1768
1769         /* If the kernel debugger is active, always disable compression */
1770         if (in_dbg_master())
1771                 goto out_disable;
1772
1773         intel_enable_fbc(crtc, 500);
1774         return;
1775
1776 out_disable:
1777         /* Multiple disables should be harmless */
1778         if (intel_fbc_enabled(dev)) {
1779                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1780                 intel_disable_fbc(dev);
1781         }
1782 }
1783
1784 int
1785 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1786                            struct drm_i915_gem_object *obj,
1787                            struct intel_ring_buffer *pipelined)
1788 {
1789         struct drm_i915_private *dev_priv = dev->dev_private;
1790         u32 alignment;
1791         int ret;
1792
1793         switch (obj->tiling_mode) {
1794         case I915_TILING_NONE:
1795                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1796                         alignment = 128 * 1024;
1797                 else if (INTEL_INFO(dev)->gen >= 4)
1798                         alignment = 4 * 1024;
1799                 else
1800                         alignment = 64 * 1024;
1801                 break;
1802         case I915_TILING_X:
1803                 /* pin() will align the object as required by fence */
1804                 alignment = 0;
1805                 break;
1806         case I915_TILING_Y:
1807                 /* FIXME: Is this true? */
1808                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1809                 return -EINVAL;
1810         default:
1811                 BUG();
1812         }
1813
1814         dev_priv->mm.interruptible = false;
1815         ret = i915_gem_object_pin(obj, alignment, true);
1816         if (ret)
1817                 goto err_interruptible;
1818
1819         ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1820         if (ret)
1821                 goto err_unpin;
1822
1823         /* Install a fence for tiled scan-out. Pre-i965 always needs a
1824          * fence, whereas 965+ only requires a fence if using
1825          * framebuffer compression.  For simplicity, we always install
1826          * a fence as the cost is not that onerous.
1827          */
1828         if (obj->tiling_mode != I915_TILING_NONE) {
1829                 ret = i915_gem_object_get_fence(obj, pipelined);
1830                 if (ret)
1831                         goto err_unpin;
1832         }
1833
1834         dev_priv->mm.interruptible = true;
1835         return 0;
1836
1837 err_unpin:
1838         i915_gem_object_unpin(obj);
1839 err_interruptible:
1840         dev_priv->mm.interruptible = true;
1841         return ret;
1842 }
1843
1844 /* Assume fb object is pinned & idle & fenced and just update base pointers */
1845 static int
1846 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847                            int x, int y, enum mode_set_atomic state)
1848 {
1849         struct drm_device *dev = crtc->dev;
1850         struct drm_i915_private *dev_priv = dev->dev_private;
1851         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1852         struct intel_framebuffer *intel_fb;
1853         struct drm_i915_gem_object *obj;
1854         int plane = intel_crtc->plane;
1855         unsigned long Start, Offset;
1856         u32 dspcntr;
1857         u32 reg;
1858
1859         switch (plane) {
1860         case 0:
1861         case 1:
1862                 break;
1863         default:
1864                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1865                 return -EINVAL;
1866         }
1867
1868         intel_fb = to_intel_framebuffer(fb);
1869         obj = intel_fb->obj;
1870
1871         reg = DSPCNTR(plane);
1872         dspcntr = I915_READ(reg);
1873         /* Mask out pixel format bits in case we change it */
1874         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1875         switch (fb->bits_per_pixel) {
1876         case 8:
1877                 dspcntr |= DISPPLANE_8BPP;
1878                 break;
1879         case 16:
1880                 if (fb->depth == 15)
1881                         dspcntr |= DISPPLANE_15_16BPP;
1882                 else
1883                         dspcntr |= DISPPLANE_16BPP;
1884                 break;
1885         case 24:
1886         case 32:
1887                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1888                 break;
1889         default:
1890                 DRM_ERROR("Unknown color depth\n");
1891                 return -EINVAL;
1892         }
1893         if (INTEL_INFO(dev)->gen >= 4) {
1894                 if (obj->tiling_mode != I915_TILING_NONE)
1895                         dspcntr |= DISPPLANE_TILED;
1896                 else
1897                         dspcntr &= ~DISPPLANE_TILED;
1898         }
1899
1900         if (HAS_PCH_SPLIT(dev))
1901                 /* must disable */
1902                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1903
1904         I915_WRITE(reg, dspcntr);
1905
1906         Start = obj->gtt_offset;
1907         Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1908
1909         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1910                       Start, Offset, x, y, fb->pitch);
1911         I915_WRITE(DSPSTRIDE(plane), fb->pitch);
1912         if (INTEL_INFO(dev)->gen >= 4) {
1913                 I915_WRITE(DSPSURF(plane), Start);
1914                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1915                 I915_WRITE(DSPADDR(plane), Offset);
1916         } else
1917                 I915_WRITE(DSPADDR(plane), Start + Offset);
1918         POSTING_READ(reg);
1919
1920         intel_update_fbc(dev);
1921         intel_increase_pllclock(crtc);
1922
1923         return 0;
1924 }
1925
1926 static int
1927 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1928                     struct drm_framebuffer *old_fb)
1929 {
1930         struct drm_device *dev = crtc->dev;
1931         struct drm_i915_master_private *master_priv;
1932         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1933         int ret;
1934
1935         /* no fb bound */
1936         if (!crtc->fb) {
1937                 DRM_DEBUG_KMS("No FB bound\n");
1938                 return 0;
1939         }
1940
1941         switch (intel_crtc->plane) {
1942         case 0:
1943         case 1:
1944                 break;
1945         default:
1946                 return -EINVAL;
1947         }
1948
1949         mutex_lock(&dev->struct_mutex);
1950         ret = intel_pin_and_fence_fb_obj(dev,
1951                                          to_intel_framebuffer(crtc->fb)->obj,
1952                                          NULL);
1953         if (ret != 0) {
1954                 mutex_unlock(&dev->struct_mutex);
1955                 return ret;
1956         }
1957
1958         if (old_fb) {
1959                 struct drm_i915_private *dev_priv = dev->dev_private;
1960                 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1961
1962                 wait_event(dev_priv->pending_flip_queue,
1963                            atomic_read(&dev_priv->mm.wedged) ||
1964                            atomic_read(&obj->pending_flip) == 0);
1965
1966                 /* Big Hammer, we also need to ensure that any pending
1967                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1968                  * current scanout is retired before unpinning the old
1969                  * framebuffer.
1970                  *
1971                  * This should only fail upon a hung GPU, in which case we
1972                  * can safely continue.
1973                  */
1974                 ret = i915_gem_object_flush_gpu(obj);
1975                 (void) ret;
1976         }
1977
1978         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
1979                                          LEAVE_ATOMIC_MODE_SET);
1980         if (ret) {
1981                 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1982                 mutex_unlock(&dev->struct_mutex);
1983                 return ret;
1984         }
1985
1986         if (old_fb) {
1987                 intel_wait_for_vblank(dev, intel_crtc->pipe);
1988                 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
1989         }
1990
1991         mutex_unlock(&dev->struct_mutex);
1992
1993         if (!dev->primary->master)
1994                 return 0;
1995
1996         master_priv = dev->primary->master->driver_priv;
1997         if (!master_priv->sarea_priv)
1998                 return 0;
1999
2000         if (intel_crtc->pipe) {
2001                 master_priv->sarea_priv->pipeB_x = x;
2002                 master_priv->sarea_priv->pipeB_y = y;
2003         } else {
2004                 master_priv->sarea_priv->pipeA_x = x;
2005                 master_priv->sarea_priv->pipeA_y = y;
2006         }
2007
2008         return 0;
2009 }
2010
2011 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2012 {
2013         struct drm_device *dev = crtc->dev;
2014         struct drm_i915_private *dev_priv = dev->dev_private;
2015         u32 dpa_ctl;
2016
2017         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2018         dpa_ctl = I915_READ(DP_A);
2019         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2020
2021         if (clock < 200000) {
2022                 u32 temp;
2023                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2024                 /* workaround for 160Mhz:
2025                    1) program 0x4600c bits 15:0 = 0x8124
2026                    2) program 0x46010 bit 0 = 1
2027                    3) program 0x46034 bit 24 = 1
2028                    4) program 0x64000 bit 14 = 1
2029                    */
2030                 temp = I915_READ(0x4600c);
2031                 temp &= 0xffff0000;
2032                 I915_WRITE(0x4600c, temp | 0x8124);
2033
2034                 temp = I915_READ(0x46010);
2035                 I915_WRITE(0x46010, temp | 1);
2036
2037                 temp = I915_READ(0x46034);
2038                 I915_WRITE(0x46034, temp | (1 << 24));
2039         } else {
2040                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2041         }
2042         I915_WRITE(DP_A, dpa_ctl);
2043
2044         POSTING_READ(DP_A);
2045         udelay(500);
2046 }
2047
2048 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2049 {
2050         struct drm_device *dev = crtc->dev;
2051         struct drm_i915_private *dev_priv = dev->dev_private;
2052         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2053         int pipe = intel_crtc->pipe;
2054         u32 reg, temp;
2055
2056         /* enable normal train */
2057         reg = FDI_TX_CTL(pipe);
2058         temp = I915_READ(reg);
2059         if (IS_IVYBRIDGE(dev)) {
2060                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2061                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2062         } else {
2063                 temp &= ~FDI_LINK_TRAIN_NONE;
2064                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2065         }
2066         I915_WRITE(reg, temp);
2067
2068         reg = FDI_RX_CTL(pipe);
2069         temp = I915_READ(reg);
2070         if (HAS_PCH_CPT(dev)) {
2071                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2072                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2073         } else {
2074                 temp &= ~FDI_LINK_TRAIN_NONE;
2075                 temp |= FDI_LINK_TRAIN_NONE;
2076         }
2077         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2078
2079         /* wait one idle pattern time */
2080         POSTING_READ(reg);
2081         udelay(1000);
2082
2083         /* IVB wants error correction enabled */
2084         if (IS_IVYBRIDGE(dev))
2085                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2086                            FDI_FE_ERRC_ENABLE);
2087 }
2088
2089 /* The FDI link training functions for ILK/Ibexpeak. */
2090 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2091 {
2092         struct drm_device *dev = crtc->dev;
2093         struct drm_i915_private *dev_priv = dev->dev_private;
2094         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2095         int pipe = intel_crtc->pipe;
2096         int plane = intel_crtc->plane;
2097         u32 reg, temp, tries;
2098
2099         /* FDI needs bits from pipe & plane first */
2100         assert_pipe_enabled(dev_priv, pipe);
2101         assert_plane_enabled(dev_priv, plane);
2102
2103         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2104            for train result */
2105         reg = FDI_RX_IMR(pipe);
2106         temp = I915_READ(reg);
2107         temp &= ~FDI_RX_SYMBOL_LOCK;
2108         temp &= ~FDI_RX_BIT_LOCK;
2109         I915_WRITE(reg, temp);
2110         I915_READ(reg);
2111         udelay(150);
2112
2113         /* enable CPU FDI TX and PCH FDI RX */
2114         reg = FDI_TX_CTL(pipe);
2115         temp = I915_READ(reg);
2116         temp &= ~(7 << 19);
2117         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2118         temp &= ~FDI_LINK_TRAIN_NONE;
2119         temp |= FDI_LINK_TRAIN_PATTERN_1;
2120         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2121
2122         reg = FDI_RX_CTL(pipe);
2123         temp = I915_READ(reg);
2124         temp &= ~FDI_LINK_TRAIN_NONE;
2125         temp |= FDI_LINK_TRAIN_PATTERN_1;
2126         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2127
2128         POSTING_READ(reg);
2129         udelay(150);
2130
2131         /* Ironlake workaround, enable clock pointer after FDI enable*/
2132         if (HAS_PCH_IBX(dev)) {
2133                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2134                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2135                            FDI_RX_PHASE_SYNC_POINTER_EN);
2136         }
2137
2138         reg = FDI_RX_IIR(pipe);
2139         for (tries = 0; tries < 5; tries++) {
2140                 temp = I915_READ(reg);
2141                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2142
2143                 if ((temp & FDI_RX_BIT_LOCK)) {
2144                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2145                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2146                         break;
2147                 }
2148         }
2149         if (tries == 5)
2150                 DRM_ERROR("FDI train 1 fail!\n");
2151
2152         /* Train 2 */
2153         reg = FDI_TX_CTL(pipe);
2154         temp = I915_READ(reg);
2155         temp &= ~FDI_LINK_TRAIN_NONE;
2156         temp |= FDI_LINK_TRAIN_PATTERN_2;
2157         I915_WRITE(reg, temp);
2158
2159         reg = FDI_RX_CTL(pipe);
2160         temp = I915_READ(reg);
2161         temp &= ~FDI_LINK_TRAIN_NONE;
2162         temp |= FDI_LINK_TRAIN_PATTERN_2;
2163         I915_WRITE(reg, temp);
2164
2165         POSTING_READ(reg);
2166         udelay(150);
2167
2168         reg = FDI_RX_IIR(pipe);
2169         for (tries = 0; tries < 5; tries++) {
2170                 temp = I915_READ(reg);
2171                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2172
2173                 if (temp & FDI_RX_SYMBOL_LOCK) {
2174                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2175                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2176                         break;
2177                 }
2178         }
2179         if (tries == 5)
2180                 DRM_ERROR("FDI train 2 fail!\n");
2181
2182         DRM_DEBUG_KMS("FDI train done\n");
2183
2184 }
2185
2186 static const int snb_b_fdi_train_param [] = {
2187         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2188         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2189         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2190         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2191 };
2192
2193 /* The FDI link training functions for SNB/Cougarpoint. */
2194 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2195 {
2196         struct drm_device *dev = crtc->dev;
2197         struct drm_i915_private *dev_priv = dev->dev_private;
2198         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2199         int pipe = intel_crtc->pipe;
2200         u32 reg, temp, i;
2201
2202         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2203            for train result */
2204         reg = FDI_RX_IMR(pipe);
2205         temp = I915_READ(reg);
2206         temp &= ~FDI_RX_SYMBOL_LOCK;
2207         temp &= ~FDI_RX_BIT_LOCK;
2208         I915_WRITE(reg, temp);
2209
2210         POSTING_READ(reg);
2211         udelay(150);
2212
2213         /* enable CPU FDI TX and PCH FDI RX */
2214         reg = FDI_TX_CTL(pipe);
2215         temp = I915_READ(reg);
2216         temp &= ~(7 << 19);
2217         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2218         temp &= ~FDI_LINK_TRAIN_NONE;
2219         temp |= FDI_LINK_TRAIN_PATTERN_1;
2220         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2221         /* SNB-B */
2222         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2223         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2224
2225         reg = FDI_RX_CTL(pipe);
2226         temp = I915_READ(reg);
2227         if (HAS_PCH_CPT(dev)) {
2228                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2229                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2230         } else {
2231                 temp &= ~FDI_LINK_TRAIN_NONE;
2232                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2233         }
2234         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2235
2236         POSTING_READ(reg);
2237         udelay(150);
2238
2239         for (i = 0; i < 4; i++ ) {
2240                 reg = FDI_TX_CTL(pipe);
2241                 temp = I915_READ(reg);
2242                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2243                 temp |= snb_b_fdi_train_param[i];
2244                 I915_WRITE(reg, temp);
2245
2246                 POSTING_READ(reg);
2247                 udelay(500);
2248
2249                 reg = FDI_RX_IIR(pipe);
2250                 temp = I915_READ(reg);
2251                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2252
2253                 if (temp & FDI_RX_BIT_LOCK) {
2254                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2255                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2256                         break;
2257                 }
2258         }
2259         if (i == 4)
2260                 DRM_ERROR("FDI train 1 fail!\n");
2261
2262         /* Train 2 */
2263         reg = FDI_TX_CTL(pipe);
2264         temp = I915_READ(reg);
2265         temp &= ~FDI_LINK_TRAIN_NONE;
2266         temp |= FDI_LINK_TRAIN_PATTERN_2;
2267         if (IS_GEN6(dev)) {
2268                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2269                 /* SNB-B */
2270                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2271         }
2272         I915_WRITE(reg, temp);
2273
2274         reg = FDI_RX_CTL(pipe);
2275         temp = I915_READ(reg);
2276         if (HAS_PCH_CPT(dev)) {
2277                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2278                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2279         } else {
2280                 temp &= ~FDI_LINK_TRAIN_NONE;
2281                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2282         }
2283         I915_WRITE(reg, temp);
2284
2285         POSTING_READ(reg);
2286         udelay(150);
2287
2288         for (i = 0; i < 4; i++ ) {
2289                 reg = FDI_TX_CTL(pipe);
2290                 temp = I915_READ(reg);
2291                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2292                 temp |= snb_b_fdi_train_param[i];
2293                 I915_WRITE(reg, temp);
2294
2295                 POSTING_READ(reg);
2296                 udelay(500);
2297
2298                 reg = FDI_RX_IIR(pipe);
2299                 temp = I915_READ(reg);
2300                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2301
2302                 if (temp & FDI_RX_SYMBOL_LOCK) {
2303                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2304                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2305                         break;
2306                 }
2307         }
2308         if (i == 4)
2309                 DRM_ERROR("FDI train 2 fail!\n");
2310
2311         DRM_DEBUG_KMS("FDI train done.\n");
2312 }
2313
2314 /* Manual link training for Ivy Bridge A0 parts */
2315 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2316 {
2317         struct drm_device *dev = crtc->dev;
2318         struct drm_i915_private *dev_priv = dev->dev_private;
2319         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2320         int pipe = intel_crtc->pipe;
2321         u32 reg, temp, i;
2322
2323         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2324            for train result */
2325         reg = FDI_RX_IMR(pipe);
2326         temp = I915_READ(reg);
2327         temp &= ~FDI_RX_SYMBOL_LOCK;
2328         temp &= ~FDI_RX_BIT_LOCK;
2329         I915_WRITE(reg, temp);
2330
2331         POSTING_READ(reg);
2332         udelay(150);
2333
2334         /* enable CPU FDI TX and PCH FDI RX */
2335         reg = FDI_TX_CTL(pipe);
2336         temp = I915_READ(reg);
2337         temp &= ~(7 << 19);
2338         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2339         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2340         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2341         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2342         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2343         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2344
2345         reg = FDI_RX_CTL(pipe);
2346         temp = I915_READ(reg);
2347         temp &= ~FDI_LINK_TRAIN_AUTO;
2348         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2349         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2350         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2351
2352         POSTING_READ(reg);
2353         udelay(150);
2354
2355         for (i = 0; i < 4; i++ ) {
2356                 reg = FDI_TX_CTL(pipe);
2357                 temp = I915_READ(reg);
2358                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2359                 temp |= snb_b_fdi_train_param[i];
2360                 I915_WRITE(reg, temp);
2361
2362                 POSTING_READ(reg);
2363                 udelay(500);
2364
2365                 reg = FDI_RX_IIR(pipe);
2366                 temp = I915_READ(reg);
2367                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2368
2369                 if (temp & FDI_RX_BIT_LOCK ||
2370                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2371                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2372                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2373                         break;
2374                 }
2375         }
2376         if (i == 4)
2377                 DRM_ERROR("FDI train 1 fail!\n");
2378
2379         /* Train 2 */
2380         reg = FDI_TX_CTL(pipe);
2381         temp = I915_READ(reg);
2382         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2383         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2384         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2385         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2386         I915_WRITE(reg, temp);
2387
2388         reg = FDI_RX_CTL(pipe);
2389         temp = I915_READ(reg);
2390         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2391         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2392         I915_WRITE(reg, temp);
2393
2394         POSTING_READ(reg);
2395         udelay(150);
2396
2397         for (i = 0; i < 4; i++ ) {
2398                 reg = FDI_TX_CTL(pipe);
2399                 temp = I915_READ(reg);
2400                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2401                 temp |= snb_b_fdi_train_param[i];
2402                 I915_WRITE(reg, temp);
2403
2404                 POSTING_READ(reg);
2405                 udelay(500);
2406
2407                 reg = FDI_RX_IIR(pipe);
2408                 temp = I915_READ(reg);
2409                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2410
2411                 if (temp & FDI_RX_SYMBOL_LOCK) {
2412                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2413                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2414                         break;
2415                 }
2416         }
2417         if (i == 4)
2418                 DRM_ERROR("FDI train 2 fail!\n");
2419
2420         DRM_DEBUG_KMS("FDI train done.\n");
2421 }
2422
2423 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2424 {
2425         struct drm_device *dev = crtc->dev;
2426         struct drm_i915_private *dev_priv = dev->dev_private;
2427         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2428         int pipe = intel_crtc->pipe;
2429         u32 reg, temp;
2430
2431         /* Write the TU size bits so error detection works */
2432         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2433                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2434
2435         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2436         reg = FDI_RX_CTL(pipe);
2437         temp = I915_READ(reg);
2438         temp &= ~((0x7 << 19) | (0x7 << 16));
2439         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2440         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2441         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2442
2443         POSTING_READ(reg);
2444         udelay(200);
2445
2446         /* Switch from Rawclk to PCDclk */
2447         temp = I915_READ(reg);
2448         I915_WRITE(reg, temp | FDI_PCDCLK);
2449
2450         POSTING_READ(reg);
2451         udelay(200);
2452
2453         /* Enable CPU FDI TX PLL, always on for Ironlake */
2454         reg = FDI_TX_CTL(pipe);
2455         temp = I915_READ(reg);
2456         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2457                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2458
2459                 POSTING_READ(reg);
2460                 udelay(100);
2461         }
2462 }
2463
2464 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2465 {
2466         struct drm_device *dev = crtc->dev;
2467         struct drm_i915_private *dev_priv = dev->dev_private;
2468         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2469         int pipe = intel_crtc->pipe;
2470         u32 reg, temp;
2471
2472         /* disable CPU FDI tx and PCH FDI rx */
2473         reg = FDI_TX_CTL(pipe);
2474         temp = I915_READ(reg);
2475         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2476         POSTING_READ(reg);
2477
2478         reg = FDI_RX_CTL(pipe);
2479         temp = I915_READ(reg);
2480         temp &= ~(0x7 << 16);
2481         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2482         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2483
2484         POSTING_READ(reg);
2485         udelay(100);
2486
2487         /* Ironlake workaround, disable clock pointer after downing FDI */
2488         if (HAS_PCH_IBX(dev)) {
2489                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2490                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2491                            I915_READ(FDI_RX_CHICKEN(pipe) &
2492                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2493         }
2494
2495         /* still set train pattern 1 */
2496         reg = FDI_TX_CTL(pipe);
2497         temp = I915_READ(reg);
2498         temp &= ~FDI_LINK_TRAIN_NONE;
2499         temp |= FDI_LINK_TRAIN_PATTERN_1;
2500         I915_WRITE(reg, temp);
2501
2502         reg = FDI_RX_CTL(pipe);
2503         temp = I915_READ(reg);
2504         if (HAS_PCH_CPT(dev)) {
2505                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2506                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2507         } else {
2508                 temp &= ~FDI_LINK_TRAIN_NONE;
2509                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2510         }
2511         /* BPC in FDI rx is consistent with that in PIPECONF */
2512         temp &= ~(0x07 << 16);
2513         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2514         I915_WRITE(reg, temp);
2515
2516         POSTING_READ(reg);
2517         udelay(100);
2518 }
2519
2520 /*
2521  * When we disable a pipe, we need to clear any pending scanline wait events
2522  * to avoid hanging the ring, which we assume we are waiting on.
2523  */
2524 static void intel_clear_scanline_wait(struct drm_device *dev)
2525 {
2526         struct drm_i915_private *dev_priv = dev->dev_private;
2527         struct intel_ring_buffer *ring;
2528         u32 tmp;
2529
2530         if (IS_GEN2(dev))
2531                 /* Can't break the hang on i8xx */
2532                 return;
2533
2534         ring = LP_RING(dev_priv);
2535         tmp = I915_READ_CTL(ring);
2536         if (tmp & RING_WAIT)
2537                 I915_WRITE_CTL(ring, tmp);
2538 }
2539
2540 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2541 {
2542         struct drm_i915_gem_object *obj;
2543         struct drm_i915_private *dev_priv;
2544
2545         if (crtc->fb == NULL)
2546                 return;
2547
2548         obj = to_intel_framebuffer(crtc->fb)->obj;
2549         dev_priv = crtc->dev->dev_private;
2550         wait_event(dev_priv->pending_flip_queue,
2551                    atomic_read(&obj->pending_flip) == 0);
2552 }
2553
2554 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2555 {
2556         struct drm_device *dev = crtc->dev;
2557         struct drm_mode_config *mode_config = &dev->mode_config;
2558         struct intel_encoder *encoder;
2559
2560         /*
2561          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2562          * must be driven by its own crtc; no sharing is possible.
2563          */
2564         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2565                 if (encoder->base.crtc != crtc)
2566                         continue;
2567
2568                 switch (encoder->type) {
2569                 case INTEL_OUTPUT_EDP:
2570                         if (!intel_encoder_is_pch_edp(&encoder->base))
2571                                 return false;
2572                         continue;
2573                 }
2574         }
2575
2576         return true;
2577 }
2578
2579 /*
2580  * Enable PCH resources required for PCH ports:
2581  *   - PCH PLLs
2582  *   - FDI training & RX/TX
2583  *   - update transcoder timings
2584  *   - DP transcoding bits
2585  *   - transcoder
2586  */
2587 static void ironlake_pch_enable(struct drm_crtc *crtc)
2588 {
2589         struct drm_device *dev = crtc->dev;
2590         struct drm_i915_private *dev_priv = dev->dev_private;
2591         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2592         int pipe = intel_crtc->pipe;
2593         u32 reg, temp;
2594
2595         /* For PCH output, training FDI link */
2596         dev_priv->display.fdi_link_train(crtc);
2597
2598         intel_enable_pch_pll(dev_priv, pipe);
2599
2600         if (HAS_PCH_CPT(dev)) {
2601                 /* Be sure PCH DPLL SEL is set */
2602                 temp = I915_READ(PCH_DPLL_SEL);
2603                 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2604                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2605                 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2606                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2607                 I915_WRITE(PCH_DPLL_SEL, temp);
2608         }
2609
2610         /* set transcoder timing, panel must allow it */
2611         assert_panel_unlocked(dev_priv, pipe);
2612         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2613         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2614         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2615
2616         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2617         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2618         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2619
2620         intel_fdi_normal_train(crtc);
2621
2622         /* For PCH DP, enable TRANS_DP_CTL */
2623         if (HAS_PCH_CPT(dev) &&
2624             intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2625                 reg = TRANS_DP_CTL(pipe);
2626                 temp = I915_READ(reg);
2627                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2628                           TRANS_DP_SYNC_MASK |
2629                           TRANS_DP_BPC_MASK);
2630                 temp |= (TRANS_DP_OUTPUT_ENABLE |
2631                          TRANS_DP_ENH_FRAMING);
2632                 temp |= TRANS_DP_8BPC;
2633
2634                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2635                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2636                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2637                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2638
2639                 switch (intel_trans_dp_port_sel(crtc)) {
2640                 case PCH_DP_B:
2641                         temp |= TRANS_DP_PORT_SEL_B;
2642                         break;
2643                 case PCH_DP_C:
2644                         temp |= TRANS_DP_PORT_SEL_C;
2645                         break;
2646                 case PCH_DP_D:
2647                         temp |= TRANS_DP_PORT_SEL_D;
2648                         break;
2649                 default:
2650                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2651                         temp |= TRANS_DP_PORT_SEL_B;
2652                         break;
2653                 }
2654
2655                 I915_WRITE(reg, temp);
2656         }
2657
2658         intel_enable_transcoder(dev_priv, pipe);
2659 }
2660
2661 static void ironlake_crtc_enable(struct drm_crtc *crtc)
2662 {
2663         struct drm_device *dev = crtc->dev;
2664         struct drm_i915_private *dev_priv = dev->dev_private;
2665         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2666         int pipe = intel_crtc->pipe;
2667         int plane = intel_crtc->plane;
2668         u32 temp;
2669         bool is_pch_port;
2670
2671         if (intel_crtc->active)
2672                 return;
2673
2674         intel_crtc->active = true;
2675         intel_update_watermarks(dev);
2676
2677         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2678                 temp = I915_READ(PCH_LVDS);
2679                 if ((temp & LVDS_PORT_EN) == 0)
2680                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2681         }
2682
2683         is_pch_port = intel_crtc_driving_pch(crtc);
2684
2685         if (is_pch_port)
2686                 ironlake_fdi_pll_enable(crtc);
2687         else
2688                 ironlake_fdi_disable(crtc);
2689
2690         /* Enable panel fitting for LVDS */
2691         if (dev_priv->pch_pf_size &&
2692             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2693                 /* Force use of hard-coded filter coefficients
2694                  * as some pre-programmed values are broken,
2695                  * e.g. x201.
2696                  */
2697                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2698                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2699                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2700         }
2701
2702         /*
2703          * On ILK+ LUT must be loaded before the pipe is running but with
2704          * clocks enabled
2705          */
2706         intel_crtc_load_lut(crtc);
2707
2708         intel_enable_pipe(dev_priv, pipe, is_pch_port);
2709         intel_enable_plane(dev_priv, plane, pipe);
2710
2711         if (is_pch_port)
2712                 ironlake_pch_enable(crtc);
2713
2714         mutex_lock(&dev->struct_mutex);
2715         intel_update_fbc(dev);
2716         mutex_unlock(&dev->struct_mutex);
2717
2718         intel_crtc_update_cursor(crtc, true);
2719 }
2720
2721 static void ironlake_crtc_disable(struct drm_crtc *crtc)
2722 {
2723         struct drm_device *dev = crtc->dev;
2724         struct drm_i915_private *dev_priv = dev->dev_private;
2725         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2726         int pipe = intel_crtc->pipe;
2727         int plane = intel_crtc->plane;
2728         u32 reg, temp;
2729
2730         if (!intel_crtc->active)
2731                 return;
2732
2733         intel_crtc_wait_for_pending_flips(crtc);
2734         drm_vblank_off(dev, pipe);
2735         intel_crtc_update_cursor(crtc, false);
2736
2737         intel_disable_plane(dev_priv, plane, pipe);
2738
2739         if (dev_priv->cfb_plane == plane &&
2740             dev_priv->display.disable_fbc)
2741                 dev_priv->display.disable_fbc(dev);
2742
2743         intel_disable_pipe(dev_priv, pipe);
2744
2745         /* Disable PF */
2746         I915_WRITE(PF_CTL(pipe), 0);
2747         I915_WRITE(PF_WIN_SZ(pipe), 0);
2748
2749         ironlake_fdi_disable(crtc);
2750
2751         /* This is a horrible layering violation; we should be doing this in
2752          * the connector/encoder ->prepare instead, but we don't always have
2753          * enough information there about the config to know whether it will
2754          * actually be necessary or just cause undesired flicker.
2755          */
2756         intel_disable_pch_ports(dev_priv, pipe);
2757
2758         intel_disable_transcoder(dev_priv, pipe);
2759
2760         if (HAS_PCH_CPT(dev)) {
2761                 /* disable TRANS_DP_CTL */
2762                 reg = TRANS_DP_CTL(pipe);
2763                 temp = I915_READ(reg);
2764                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2765                 temp |= TRANS_DP_PORT_SEL_NONE;
2766                 I915_WRITE(reg, temp);
2767
2768                 /* disable DPLL_SEL */
2769                 temp = I915_READ(PCH_DPLL_SEL);
2770                 switch (pipe) {
2771                 case 0:
2772                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2773                         break;
2774                 case 1:
2775                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2776                         break;
2777                 case 2:
2778                         /* FIXME: manage transcoder PLLs? */
2779                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2780                         break;
2781                 default:
2782                         BUG(); /* wtf */
2783                 }
2784                 I915_WRITE(PCH_DPLL_SEL, temp);
2785         }
2786
2787         /* disable PCH DPLL */
2788         intel_disable_pch_pll(dev_priv, pipe);
2789
2790         /* Switch from PCDclk to Rawclk */
2791         reg = FDI_RX_CTL(pipe);
2792         temp = I915_READ(reg);
2793         I915_WRITE(reg, temp & ~FDI_PCDCLK);
2794
2795         /* Disable CPU FDI TX PLL */
2796         reg = FDI_TX_CTL(pipe);
2797         temp = I915_READ(reg);
2798         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2799
2800         POSTING_READ(reg);
2801         udelay(100);
2802
2803         reg = FDI_RX_CTL(pipe);
2804         temp = I915_READ(reg);
2805         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2806
2807         /* Wait for the clocks to turn off. */
2808         POSTING_READ(reg);
2809         udelay(100);
2810
2811         intel_crtc->active = false;
2812         intel_update_watermarks(dev);
2813
2814         mutex_lock(&dev->struct_mutex);
2815         intel_update_fbc(dev);
2816         intel_clear_scanline_wait(dev);
2817         mutex_unlock(&dev->struct_mutex);
2818 }
2819
2820 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2821 {
2822         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2823         int pipe = intel_crtc->pipe;
2824         int plane = intel_crtc->plane;
2825
2826         /* XXX: When our outputs are all unaware of DPMS modes other than off
2827          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2828          */
2829         switch (mode) {
2830         case DRM_MODE_DPMS_ON:
2831         case DRM_MODE_DPMS_STANDBY:
2832         case DRM_MODE_DPMS_SUSPEND:
2833                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
2834                 ironlake_crtc_enable(crtc);
2835                 break;
2836
2837         case DRM_MODE_DPMS_OFF:
2838                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2839                 ironlake_crtc_disable(crtc);
2840                 break;
2841         }
2842 }
2843
2844 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2845 {
2846         if (!enable && intel_crtc->overlay) {
2847                 struct drm_device *dev = intel_crtc->base.dev;
2848                 struct drm_i915_private *dev_priv = dev->dev_private;
2849
2850                 mutex_lock(&dev->struct_mutex);
2851                 dev_priv->mm.interruptible = false;
2852                 (void) intel_overlay_switch_off(intel_crtc->overlay);
2853                 dev_priv->mm.interruptible = true;
2854                 mutex_unlock(&dev->struct_mutex);
2855         }
2856
2857         /* Let userspace switch the overlay on again. In most cases userspace
2858          * has to recompute where to put it anyway.
2859          */
2860 }
2861
2862 static void i9xx_crtc_enable(struct drm_crtc *crtc)
2863 {
2864         struct drm_device *dev = crtc->dev;
2865         struct drm_i915_private *dev_priv = dev->dev_private;
2866         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2867         int pipe = intel_crtc->pipe;
2868         int plane = intel_crtc->plane;
2869
2870         if (intel_crtc->active)
2871                 return;
2872
2873         intel_crtc->active = true;
2874         intel_update_watermarks(dev);
2875
2876         intel_enable_pll(dev_priv, pipe);
2877         intel_enable_pipe(dev_priv, pipe, false);
2878         intel_enable_plane(dev_priv, plane, pipe);
2879
2880         intel_crtc_load_lut(crtc);
2881         intel_update_fbc(dev);
2882
2883         /* Give the overlay scaler a chance to enable if it's on this pipe */
2884         intel_crtc_dpms_overlay(intel_crtc, true);
2885         intel_crtc_update_cursor(crtc, true);
2886 }
2887
2888 static void i9xx_crtc_disable(struct drm_crtc *crtc)
2889 {
2890         struct drm_device *dev = crtc->dev;
2891         struct drm_i915_private *dev_priv = dev->dev_private;
2892         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2893         int pipe = intel_crtc->pipe;
2894         int plane = intel_crtc->plane;
2895
2896         if (!intel_crtc->active)
2897                 return;
2898
2899         /* Give the overlay scaler a chance to disable if it's on this pipe */
2900         intel_crtc_wait_for_pending_flips(crtc);
2901         drm_vblank_off(dev, pipe);
2902         intel_crtc_dpms_overlay(intel_crtc, false);
2903         intel_crtc_update_cursor(crtc, false);
2904
2905         if (dev_priv->cfb_plane == plane &&
2906             dev_priv->display.disable_fbc)
2907                 dev_priv->display.disable_fbc(dev);
2908
2909         intel_disable_plane(dev_priv, plane, pipe);
2910         intel_disable_pipe(dev_priv, pipe);
2911         intel_disable_pll(dev_priv, pipe);
2912
2913         intel_crtc->active = false;
2914         intel_update_fbc(dev);
2915         intel_update_watermarks(dev);
2916         intel_clear_scanline_wait(dev);
2917 }
2918
2919 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2920 {
2921         /* XXX: When our outputs are all unaware of DPMS modes other than off
2922          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2923          */
2924         switch (mode) {
2925         case DRM_MODE_DPMS_ON:
2926         case DRM_MODE_DPMS_STANDBY:
2927         case DRM_MODE_DPMS_SUSPEND:
2928                 i9xx_crtc_enable(crtc);
2929                 break;
2930         case DRM_MODE_DPMS_OFF:
2931                 i9xx_crtc_disable(crtc);
2932                 break;
2933         }
2934 }
2935
2936 /**
2937  * Sets the power management mode of the pipe and plane.
2938  */
2939 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2940 {
2941         struct drm_device *dev = crtc->dev;
2942         struct drm_i915_private *dev_priv = dev->dev_private;
2943         struct drm_i915_master_private *master_priv;
2944         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2945         int pipe = intel_crtc->pipe;
2946         bool enabled;
2947
2948         if (intel_crtc->dpms_mode == mode)
2949                 return;
2950
2951         intel_crtc->dpms_mode = mode;
2952
2953         dev_priv->display.dpms(crtc, mode);
2954
2955         if (!dev->primary->master)
2956                 return;
2957
2958         master_priv = dev->primary->master->driver_priv;
2959         if (!master_priv->sarea_priv)
2960                 return;
2961
2962         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2963
2964         switch (pipe) {
2965         case 0:
2966                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
2967                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
2968                 break;
2969         case 1:
2970                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
2971                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
2972                 break;
2973         default:
2974                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
2975                 break;
2976         }
2977 }
2978
2979 static void intel_crtc_disable(struct drm_crtc *crtc)
2980 {
2981         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2982         struct drm_device *dev = crtc->dev;
2983
2984         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2985
2986         if (crtc->fb) {
2987                 mutex_lock(&dev->struct_mutex);
2988                 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2989                 mutex_unlock(&dev->struct_mutex);
2990         }
2991 }
2992
2993 /* Prepare for a mode set.
2994  *
2995  * Note we could be a lot smarter here.  We need to figure out which outputs
2996  * will be enabled, which disabled (in short, how the config will changes)
2997  * and perform the minimum necessary steps to accomplish that, e.g. updating
2998  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
2999  * panel fitting is in the proper state, etc.
3000  */
3001 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3002 {
3003         i9xx_crtc_disable(crtc);
3004 }
3005
3006 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3007 {
3008         i9xx_crtc_enable(crtc);
3009 }
3010
3011 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3012 {
3013         ironlake_crtc_disable(crtc);
3014 }
3015
3016 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3017 {
3018         ironlake_crtc_enable(crtc);
3019 }
3020
3021 void intel_encoder_prepare (struct drm_encoder *encoder)
3022 {
3023         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3024         /* lvds has its own version of prepare see intel_lvds_prepare */
3025         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3026 }
3027
3028 void intel_encoder_commit (struct drm_encoder *encoder)
3029 {
3030         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3031         /* lvds has its own version of commit see intel_lvds_commit */
3032         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3033 }
3034
3035 void intel_encoder_destroy(struct drm_encoder *encoder)
3036 {
3037         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3038
3039         drm_encoder_cleanup(encoder);
3040         kfree(intel_encoder);
3041 }
3042
3043 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3044                                   struct drm_display_mode *mode,
3045                                   struct drm_display_mode *adjusted_mode)
3046 {
3047         struct drm_device *dev = crtc->dev;
3048
3049         if (HAS_PCH_SPLIT(dev)) {
3050                 /* FDI link clock is fixed at 2.7G */
3051                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3052                         return false;
3053         }
3054
3055         /* XXX some encoders set the crtcinfo, others don't.
3056          * Obviously we need some form of conflict resolution here...
3057          */
3058         if (adjusted_mode->crtc_htotal == 0)
3059                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3060
3061         return true;
3062 }
3063
3064 static int i945_get_display_clock_speed(struct drm_device *dev)
3065 {
3066         return 400000;
3067 }
3068
3069 static int i915_get_display_clock_speed(struct drm_device *dev)
3070 {
3071         return 333000;
3072 }
3073
3074 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3075 {
3076         return 200000;
3077 }
3078
3079 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3080 {
3081         u16 gcfgc = 0;
3082
3083         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3084
3085         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3086                 return 133000;
3087         else {
3088                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3089                 case GC_DISPLAY_CLOCK_333_MHZ:
3090                         return 333000;
3091                 default:
3092                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3093                         return 190000;
3094                 }
3095         }
3096 }
3097
3098 static int i865_get_display_clock_speed(struct drm_device *dev)
3099 {
3100         return 266000;
3101 }
3102
3103 static int i855_get_display_clock_speed(struct drm_device *dev)
3104 {
3105         u16 hpllcc = 0;
3106         /* Assume that the hardware is in the high speed state.  This
3107          * should be the default.
3108          */
3109         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3110         case GC_CLOCK_133_200:
3111         case GC_CLOCK_100_200:
3112                 return 200000;
3113         case GC_CLOCK_166_250:
3114                 return 250000;
3115         case GC_CLOCK_100_133:
3116                 return 133000;
3117         }
3118
3119         /* Shouldn't happen */
3120         return 0;
3121 }
3122
3123 static int i830_get_display_clock_speed(struct drm_device *dev)
3124 {
3125         return 133000;
3126 }
3127
3128 struct fdi_m_n {
3129         u32        tu;
3130         u32        gmch_m;
3131         u32        gmch_n;
3132         u32        link_m;
3133         u32        link_n;
3134 };
3135
3136 static void
3137 fdi_reduce_ratio(u32 *num, u32 *den)
3138 {
3139         while (*num > 0xffffff || *den > 0xffffff) {
3140                 *num >>= 1;
3141                 *den >>= 1;
3142         }
3143 }
3144
3145 static void
3146 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3147                      int link_clock, struct fdi_m_n *m_n)
3148 {
3149         m_n->tu = 64; /* default size */
3150
3151         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3152         m_n->gmch_m = bits_per_pixel * pixel_clock;
3153         m_n->gmch_n = link_clock * nlanes * 8;
3154         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3155
3156         m_n->link_m = pixel_clock;
3157         m_n->link_n = link_clock;
3158         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3159 }
3160
3161
3162 struct intel_watermark_params {
3163         unsigned long fifo_size;
3164         unsigned long max_wm;
3165         unsigned long default_wm;
3166         unsigned long guard_size;
3167         unsigned long cacheline_size;
3168 };
3169
3170 /* Pineview has different values for various configs */
3171 static const struct intel_watermark_params pineview_display_wm = {
3172         PINEVIEW_DISPLAY_FIFO,
3173         PINEVIEW_MAX_WM,
3174         PINEVIEW_DFT_WM,
3175         PINEVIEW_GUARD_WM,
3176         PINEVIEW_FIFO_LINE_SIZE
3177 };
3178 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3179         PINEVIEW_DISPLAY_FIFO,
3180         PINEVIEW_MAX_WM,
3181         PINEVIEW_DFT_HPLLOFF_WM,
3182         PINEVIEW_GUARD_WM,
3183         PINEVIEW_FIFO_LINE_SIZE
3184 };
3185 static const struct intel_watermark_params pineview_cursor_wm = {
3186         PINEVIEW_CURSOR_FIFO,
3187         PINEVIEW_CURSOR_MAX_WM,
3188         PINEVIEW_CURSOR_DFT_WM,
3189         PINEVIEW_CURSOR_GUARD_WM,
3190         PINEVIEW_FIFO_LINE_SIZE,
3191 };
3192 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3193         PINEVIEW_CURSOR_FIFO,
3194         PINEVIEW_CURSOR_MAX_WM,
3195         PINEVIEW_CURSOR_DFT_WM,
3196         PINEVIEW_CURSOR_GUARD_WM,
3197         PINEVIEW_FIFO_LINE_SIZE
3198 };
3199 static const struct intel_watermark_params g4x_wm_info = {
3200         G4X_FIFO_SIZE,
3201         G4X_MAX_WM,
3202         G4X_MAX_WM,
3203         2,
3204         G4X_FIFO_LINE_SIZE,
3205 };
3206 static const struct intel_watermark_params g4x_cursor_wm_info = {
3207         I965_CURSOR_FIFO,
3208         I965_CURSOR_MAX_WM,
3209         I965_CURSOR_DFT_WM,
3210         2,
3211         G4X_FIFO_LINE_SIZE,
3212 };
3213 static const struct intel_watermark_params i965_cursor_wm_info = {
3214         I965_CURSOR_FIFO,
3215         I965_CURSOR_MAX_WM,
3216         I965_CURSOR_DFT_WM,
3217         2,
3218         I915_FIFO_LINE_SIZE,
3219 };
3220 static const struct intel_watermark_params i945_wm_info = {
3221         I945_FIFO_SIZE,
3222         I915_MAX_WM,
3223         1,
3224         2,
3225         I915_FIFO_LINE_SIZE
3226 };
3227 static const struct intel_watermark_params i915_wm_info = {
3228         I915_FIFO_SIZE,
3229         I915_MAX_WM,
3230         1,
3231         2,
3232         I915_FIFO_LINE_SIZE
3233 };
3234 static const struct intel_watermark_params i855_wm_info = {
3235         I855GM_FIFO_SIZE,
3236         I915_MAX_WM,
3237         1,
3238         2,
3239         I830_FIFO_LINE_SIZE
3240 };
3241 static const struct intel_watermark_params i830_wm_info = {
3242         I830_FIFO_SIZE,
3243         I915_MAX_WM,
3244         1,
3245         2,
3246         I830_FIFO_LINE_SIZE
3247 };
3248
3249 static const struct intel_watermark_params ironlake_display_wm_info = {
3250         ILK_DISPLAY_FIFO,
3251         ILK_DISPLAY_MAXWM,
3252         ILK_DISPLAY_DFTWM,
3253         2,
3254         ILK_FIFO_LINE_SIZE
3255 };
3256 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3257         ILK_CURSOR_FIFO,
3258         ILK_CURSOR_MAXWM,
3259         ILK_CURSOR_DFTWM,
3260         2,
3261         ILK_FIFO_LINE_SIZE
3262 };
3263 static const struct intel_watermark_params ironlake_display_srwm_info = {
3264         ILK_DISPLAY_SR_FIFO,
3265         ILK_DISPLAY_MAX_SRWM,
3266         ILK_DISPLAY_DFT_SRWM,
3267         2,
3268         ILK_FIFO_LINE_SIZE
3269 };
3270 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3271         ILK_CURSOR_SR_FIFO,
3272         ILK_CURSOR_MAX_SRWM,
3273         ILK_CURSOR_DFT_SRWM,
3274         2,
3275         ILK_FIFO_LINE_SIZE
3276 };
3277
3278 static const struct intel_watermark_params sandybridge_display_wm_info = {
3279         SNB_DISPLAY_FIFO,
3280         SNB_DISPLAY_MAXWM,
3281         SNB_DISPLAY_DFTWM,
3282         2,
3283         SNB_FIFO_LINE_SIZE
3284 };
3285 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3286         SNB_CURSOR_FIFO,
3287         SNB_CURSOR_MAXWM,
3288         SNB_CURSOR_DFTWM,
3289         2,
3290         SNB_FIFO_LINE_SIZE
3291 };
3292 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3293         SNB_DISPLAY_SR_FIFO,
3294         SNB_DISPLAY_MAX_SRWM,
3295         SNB_DISPLAY_DFT_SRWM,
3296         2,
3297         SNB_FIFO_LINE_SIZE
3298 };
3299 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3300         SNB_CURSOR_SR_FIFO,
3301         SNB_CURSOR_MAX_SRWM,
3302         SNB_CURSOR_DFT_SRWM,
3303         2,
3304         SNB_FIFO_LINE_SIZE
3305 };
3306
3307
3308 /**
3309  * intel_calculate_wm - calculate watermark level
3310  * @clock_in_khz: pixel clock
3311  * @wm: chip FIFO params
3312  * @pixel_size: display pixel size
3313  * @latency_ns: memory latency for the platform
3314  *
3315  * Calculate the watermark level (the level at which the display plane will
3316  * start fetching from memory again).  Each chip has a different display
3317  * FIFO size and allocation, so the caller needs to figure that out and pass
3318  * in the correct intel_watermark_params structure.
3319  *
3320  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3321  * on the pixel size.  When it reaches the watermark level, it'll start
3322  * fetching FIFO line sized based chunks from memory until the FIFO fills
3323  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3324  * will occur, and a display engine hang could result.
3325  */
3326 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3327                                         const struct intel_watermark_params *wm,
3328                                         int fifo_size,
3329                                         int pixel_size,
3330                                         unsigned long latency_ns)
3331 {
3332         long entries_required, wm_size;
3333
3334         /*
3335          * Note: we need to make sure we don't overflow for various clock &
3336          * latency values.
3337          * clocks go from a few thousand to several hundred thousand.
3338          * latency is usually a few thousand
3339          */
3340         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3341                 1000;
3342         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3343
3344         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3345
3346         wm_size = fifo_size - (entries_required + wm->guard_size);
3347
3348         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3349
3350         /* Don't promote wm_size to unsigned... */
3351         if (wm_size > (long)wm->max_wm)
3352                 wm_size = wm->max_wm;
3353         if (wm_size <= 0)
3354                 wm_size = wm->default_wm;
3355         return wm_size;
3356 }
3357
3358 struct cxsr_latency {
3359         int is_desktop;
3360         int is_ddr3;
3361         unsigned long fsb_freq;
3362         unsigned long mem_freq;
3363         unsigned long display_sr;
3364         unsigned long display_hpll_disable;
3365         unsigned long cursor_sr;
3366         unsigned long cursor_hpll_disable;
3367 };
3368
3369 static const struct cxsr_latency cxsr_latency_table[] = {
3370         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3371         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3372         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3373         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3374         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3375
3376         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3377         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3378         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3379         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3380         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3381
3382         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3383         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3384         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3385         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3386         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3387
3388         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3389         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3390         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3391         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3392         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3393
3394         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3395         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3396         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3397         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3398         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3399
3400         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3401         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3402         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3403         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3404         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3405 };
3406
3407 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3408                                                          int is_ddr3,
3409                                                          int fsb,
3410                                                          int mem)
3411 {
3412         const struct cxsr_latency *latency;
3413         int i;
3414
3415         if (fsb == 0 || mem == 0)
3416                 return NULL;
3417
3418         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3419                 latency = &cxsr_latency_table[i];
3420                 if (is_desktop == latency->is_desktop &&
3421                     is_ddr3 == latency->is_ddr3 &&
3422                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3423                         return latency;
3424         }
3425
3426         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3427
3428         return NULL;
3429 }
3430
3431 static void pineview_disable_cxsr(struct drm_device *dev)
3432 {
3433         struct drm_i915_private *dev_priv = dev->dev_private;
3434
3435         /* deactivate cxsr */
3436         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3437 }
3438
3439 /*
3440  * Latency for FIFO fetches is dependent on several factors:
3441  *   - memory configuration (speed, channels)
3442  *   - chipset
3443  *   - current MCH state
3444  * It can be fairly high in some situations, so here we assume a fairly
3445  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3446  * set this value too high, the FIFO will fetch frequently to stay full)
3447  * and power consumption (set it too low to save power and we might see
3448  * FIFO underruns and display "flicker").
3449  *
3450  * A value of 5us seems to be a good balance; safe for very low end
3451  * platforms but not overly aggressive on lower latency configs.
3452  */
3453 static const int latency_ns = 5000;
3454
3455 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3456 {
3457         struct drm_i915_private *dev_priv = dev->dev_private;
3458         uint32_t dsparb = I915_READ(DSPARB);
3459         int size;
3460
3461         size = dsparb & 0x7f;
3462         if (plane)
3463                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3464
3465         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3466                       plane ? "B" : "A", size);
3467
3468         return size;
3469 }
3470
3471 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3472 {
3473         struct drm_i915_private *dev_priv = dev->dev_private;
3474         uint32_t dsparb = I915_READ(DSPARB);
3475         int size;
3476
3477         size = dsparb & 0x1ff;
3478         if (plane)
3479                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3480         size >>= 1; /* Convert to cachelines */
3481
3482         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3483                       plane ? "B" : "A", size);
3484
3485         return size;
3486 }
3487
3488 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3489 {
3490         struct drm_i915_private *dev_priv = dev->dev_private;
3491         uint32_t dsparb = I915_READ(DSPARB);
3492         int size;
3493
3494         size = dsparb & 0x7f;
3495         size >>= 2; /* Convert to cachelines */
3496
3497         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3498                       plane ? "B" : "A",
3499                       size);
3500
3501         return size;
3502 }
3503
3504 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3505 {
3506         struct drm_i915_private *dev_priv = dev->dev_private;
3507         uint32_t dsparb = I915_READ(DSPARB);
3508         int size;
3509
3510         size = dsparb & 0x7f;
3511         size >>= 1; /* Convert to cachelines */
3512
3513         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3514                       plane ? "B" : "A", size);
3515
3516         return size;
3517 }
3518
3519 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3520 {
3521         struct drm_crtc *crtc, *enabled = NULL;
3522
3523         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3524                 if (crtc->enabled && crtc->fb) {
3525                         if (enabled)
3526                                 return NULL;
3527                         enabled = crtc;
3528                 }
3529         }
3530
3531         return enabled;
3532 }
3533
3534 static void pineview_update_wm(struct drm_device *dev)
3535 {
3536         struct drm_i915_private *dev_priv = dev->dev_private;
3537         struct drm_crtc *crtc;
3538         const struct cxsr_latency *latency;
3539         u32 reg;
3540         unsigned long wm;
3541
3542         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3543                                          dev_priv->fsb_freq, dev_priv->mem_freq);
3544         if (!latency) {
3545                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3546                 pineview_disable_cxsr(dev);
3547                 return;
3548         }
3549
3550         crtc = single_enabled_crtc(dev);
3551         if (crtc) {
3552                 int clock = crtc->mode.clock;
3553                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3554
3555                 /* Display SR */
3556                 wm = intel_calculate_wm(clock, &pineview_display_wm,
3557                                         pineview_display_wm.fifo_size,
3558                                         pixel_size, latency->display_sr);
3559                 reg = I915_READ(DSPFW1);
3560                 reg &= ~DSPFW_SR_MASK;
3561                 reg |= wm << DSPFW_SR_SHIFT;
3562                 I915_WRITE(DSPFW1, reg);
3563                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3564
3565                 /* cursor SR */
3566                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3567                                         pineview_display_wm.fifo_size,
3568                                         pixel_size, latency->cursor_sr);
3569                 reg = I915_READ(DSPFW3);
3570                 reg &= ~DSPFW_CURSOR_SR_MASK;
3571                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3572                 I915_WRITE(DSPFW3, reg);
3573
3574                 /* Display HPLL off SR */
3575                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3576                                         pineview_display_hplloff_wm.fifo_size,
3577                                         pixel_size, latency->display_hpll_disable);
3578                 reg = I915_READ(DSPFW3);
3579                 reg &= ~DSPFW_HPLL_SR_MASK;
3580                 reg |= wm & DSPFW_HPLL_SR_MASK;
3581                 I915_WRITE(DSPFW3, reg);
3582
3583                 /* cursor HPLL off SR */
3584                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3585                                         pineview_display_hplloff_wm.fifo_size,
3586                                         pixel_size, latency->cursor_hpll_disable);
3587                 reg = I915_READ(DSPFW3);
3588                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
3589                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3590                 I915_WRITE(DSPFW3, reg);
3591                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3592
3593                 /* activate cxsr */
3594                 I915_WRITE(DSPFW3,
3595                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3596                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
3597         } else {
3598                 pineview_disable_cxsr(dev);
3599                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
3600         }
3601 }
3602
3603 static bool g4x_compute_wm0(struct drm_device *dev,
3604                             int plane,
3605                             const struct intel_watermark_params *display,
3606                             int display_latency_ns,
3607                             const struct intel_watermark_params *cursor,
3608                             int cursor_latency_ns,
3609                             int *plane_wm,
3610                             int *cursor_wm)
3611 {
3612         struct drm_crtc *crtc;
3613         int htotal, hdisplay, clock, pixel_size;
3614         int line_time_us, line_count;
3615         int entries, tlb_miss;
3616
3617         crtc = intel_get_crtc_for_plane(dev, plane);
3618         if (crtc->fb == NULL || !crtc->enabled) {
3619                 *cursor_wm = cursor->guard_size;
3620                 *plane_wm = display->guard_size;
3621                 return false;
3622         }
3623
3624         htotal = crtc->mode.htotal;
3625         hdisplay = crtc->mode.hdisplay;
3626         clock = crtc->mode.clock;
3627         pixel_size = crtc->fb->bits_per_pixel / 8;
3628
3629         /* Use the small buffer method to calculate plane watermark */
3630         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3631         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3632         if (tlb_miss > 0)
3633                 entries += tlb_miss;
3634         entries = DIV_ROUND_UP(entries, display->cacheline_size);
3635         *plane_wm = entries + display->guard_size;
3636         if (*plane_wm > (int)display->max_wm)
3637                 *plane_wm = display->max_wm;
3638
3639         /* Use the large buffer method to calculate cursor watermark */
3640         line_time_us = ((htotal * 1000) / clock);
3641         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3642         entries = line_count * 64 * pixel_size;
3643         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3644         if (tlb_miss > 0)
3645                 entries += tlb_miss;
3646         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3647         *cursor_wm = entries + cursor->guard_size;
3648         if (*cursor_wm > (int)cursor->max_wm)
3649                 *cursor_wm = (int)cursor->max_wm;
3650
3651         return true;
3652 }
3653
3654 /*
3655  * Check the wm result.
3656  *
3657  * If any calculated watermark values is larger than the maximum value that
3658  * can be programmed into the associated watermark register, that watermark
3659  * must be disabled.
3660  */
3661 static bool g4x_check_srwm(struct drm_device *dev,
3662                            int display_wm, int cursor_wm,
3663                            const struct intel_watermark_params *display,
3664                            const struct intel_watermark_params *cursor)
3665 {
3666         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3667                       display_wm, cursor_wm);
3668
3669         if (display_wm > display->max_wm) {
3670                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3671                               display_wm, display->max_wm);
3672                 return false;
3673         }
3674
3675         if (cursor_wm > cursor->max_wm) {
3676                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3677                               cursor_wm, cursor->max_wm);
3678                 return false;
3679         }
3680
3681         if (!(display_wm || cursor_wm)) {
3682                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3683                 return false;
3684         }
3685
3686         return true;
3687 }
3688
3689 static bool g4x_compute_srwm(struct drm_device *dev,
3690                              int plane,
3691                              int latency_ns,
3692                              const struct intel_watermark_params *display,
3693                              const struct intel_watermark_params *cursor,
3694                              int *display_wm, int *cursor_wm)
3695 {
3696         struct drm_crtc *crtc;
3697         int hdisplay, htotal, pixel_size, clock;
3698         unsigned long line_time_us;
3699         int line_count, line_size;
3700         int small, large;
3701         int entries;
3702
3703         if (!latency_ns) {
3704                 *display_wm = *cursor_wm = 0;
3705                 return false;
3706         }
3707
3708         crtc = intel_get_crtc_for_plane(dev, plane);
3709         hdisplay = crtc->mode.hdisplay;
3710         htotal = crtc->mode.htotal;
3711         clock = crtc->mode.clock;
3712         pixel_size = crtc->fb->bits_per_pixel / 8;
3713
3714         line_time_us = (htotal * 1000) / clock;
3715         line_count = (latency_ns / line_time_us + 1000) / 1000;
3716         line_size = hdisplay * pixel_size;
3717
3718         /* Use the minimum of the small and large buffer method for primary */
3719         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3720         large = line_count * line_size;
3721
3722         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3723         *display_wm = entries + display->guard_size;
3724
3725         /* calculate the self-refresh watermark for display cursor */
3726         entries = line_count * pixel_size * 64;
3727         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3728         *cursor_wm = entries + cursor->guard_size;
3729
3730         return g4x_check_srwm(dev,
3731                               *display_wm, *cursor_wm,
3732                               display, cursor);
3733 }
3734
3735 #define single_plane_enabled(mask) is_power_of_2(mask)
3736
3737 static void g4x_update_wm(struct drm_device *dev)
3738 {
3739         static const int sr_latency_ns = 12000;
3740         struct drm_i915_private *dev_priv = dev->dev_private;
3741         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3742         int plane_sr, cursor_sr;
3743         unsigned int enabled = 0;
3744
3745         if (g4x_compute_wm0(dev, 0,
3746                             &g4x_wm_info, latency_ns,
3747                             &g4x_cursor_wm_info, latency_ns,
3748                             &planea_wm, &cursora_wm))
3749                 enabled |= 1;
3750
3751         if (g4x_compute_wm0(dev, 1,
3752                             &g4x_wm_info, latency_ns,
3753                             &g4x_cursor_wm_info, latency_ns,
3754                             &planeb_wm, &cursorb_wm))
3755                 enabled |= 2;
3756
3757         plane_sr = cursor_sr = 0;
3758         if (single_plane_enabled(enabled) &&
3759             g4x_compute_srwm(dev, ffs(enabled) - 1,
3760                              sr_latency_ns,
3761                              &g4x_wm_info,
3762                              &g4x_cursor_wm_info,
3763                              &plane_sr, &cursor_sr))
3764                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3765         else
3766                 I915_WRITE(FW_BLC_SELF,
3767                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
3768
3769         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
3770                       planea_wm, cursora_wm,
3771                       planeb_wm, cursorb_wm,
3772                       plane_sr, cursor_sr);
3773
3774         I915_WRITE(DSPFW1,
3775                    (plane_sr << DSPFW_SR_SHIFT) |
3776                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
3777                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
3778                    planea_wm);
3779         I915_WRITE(DSPFW2,
3780                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
3781                    (cursora_wm << DSPFW_CURSORA_SHIFT));
3782         /* HPLL off in SR has some issues on G4x... disable it */
3783         I915_WRITE(DSPFW3,
3784                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
3785                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3786 }
3787
3788 static void i965_update_wm(struct drm_device *dev)
3789 {
3790         struct drm_i915_private *dev_priv = dev->dev_private;
3791         struct drm_crtc *crtc;
3792         int srwm = 1;
3793         int cursor_sr = 16;
3794
3795         /* Calc sr entries for one plane configs */
3796         crtc = single_enabled_crtc(dev);
3797         if (crtc) {
3798                 /* self-refresh has much higher latency */
3799                 static const int sr_latency_ns = 12000;
3800                 int clock = crtc->mode.clock;
3801                 int htotal = crtc->mode.htotal;
3802                 int hdisplay = crtc->mode.hdisplay;
3803                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3804                 unsigned long line_time_us;
3805                 int entries;
3806
3807                 line_time_us = ((htotal * 1000) / clock);
3808
3809                 /* Use ns/us then divide to preserve precision */
3810                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3811                         pixel_size * hdisplay;
3812                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
3813                 srwm = I965_FIFO_SIZE - entries;
3814                 if (srwm < 0)
3815                         srwm = 1;
3816                 srwm &= 0x1ff;
3817                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
3818                               entries, srwm);
3819
3820                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3821                         pixel_size * 64;
3822                 entries = DIV_ROUND_UP(entries,
3823                                           i965_cursor_wm_info.cacheline_size);
3824                 cursor_sr = i965_cursor_wm_info.fifo_size -
3825                         (entries + i965_cursor_wm_info.guard_size);
3826
3827                 if (cursor_sr > i965_cursor_wm_info.max_wm)
3828                         cursor_sr = i965_cursor_wm_info.max_wm;
3829
3830                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3831                               "cursor %d\n", srwm, cursor_sr);
3832
3833                 if (IS_CRESTLINE(dev))
3834                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3835         } else {
3836                 /* Turn off self refresh if both pipes are enabled */
3837                 if (IS_CRESTLINE(dev))
3838                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3839                                    & ~FW_BLC_SELF_EN);
3840         }
3841
3842         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
3843                       srwm);
3844
3845         /* 965 has limitations... */
3846         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
3847                    (8 << 16) | (8 << 8) | (8 << 0));
3848         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3849         /* update cursor SR watermark */
3850         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3851 }
3852
3853 static void i9xx_update_wm(struct drm_device *dev)
3854 {
3855         struct drm_i915_private *dev_priv = dev->dev_private;
3856         const struct intel_watermark_params *wm_info;
3857         uint32_t fwater_lo;
3858         uint32_t fwater_hi;
3859         int cwm, srwm = 1;
3860         int fifo_size;
3861         int planea_wm, planeb_wm;
3862         struct drm_crtc *crtc, *enabled = NULL;
3863
3864         if (IS_I945GM(dev))
3865                 wm_info = &i945_wm_info;
3866         else if (!IS_GEN2(dev))
3867                 wm_info = &i915_wm_info;
3868         else
3869                 wm_info = &i855_wm_info;
3870
3871         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
3872         crtc = intel_get_crtc_for_plane(dev, 0);
3873         if (crtc->enabled && crtc->fb) {
3874                 planea_wm = intel_calculate_wm(crtc->mode.clock,
3875                                                wm_info, fifo_size,
3876                                                crtc->fb->bits_per_pixel / 8,
3877                                                latency_ns);
3878                 enabled = crtc;
3879         } else
3880                 planea_wm = fifo_size - wm_info->guard_size;
3881
3882         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
3883         crtc = intel_get_crtc_for_plane(dev, 1);
3884         if (crtc->enabled && crtc->fb) {
3885                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
3886                                                wm_info, fifo_size,
3887                                                crtc->fb->bits_per_pixel / 8,
3888                                                latency_ns);
3889                 if (enabled == NULL)
3890                         enabled = crtc;
3891                 else
3892                         enabled = NULL;
3893         } else
3894                 planeb_wm = fifo_size - wm_info->guard_size;
3895
3896         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
3897
3898         /*
3899          * Overlay gets an aggressive default since video jitter is bad.
3900          */
3901         cwm = 2;
3902
3903         /* Play safe and disable self-refresh before adjusting watermarks. */
3904         if (IS_I945G(dev) || IS_I945GM(dev))
3905                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
3906         else if (IS_I915GM(dev))
3907                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
3908
3909         /* Calc sr entries for one plane configs */
3910         if (HAS_FW_BLC(dev) && enabled) {
3911                 /* self-refresh has much higher latency */
3912                 static const int sr_latency_ns = 6000;
3913                 int clock = enabled->mode.clock;
3914                 int htotal = enabled->mode.htotal;
3915                 int hdisplay = enabled->mode.hdisplay;
3916                 int pixel_size = enabled->fb->bits_per_pixel / 8;
3917                 unsigned long line_time_us;
3918                 int entries;
3919
3920                 line_time_us = (htotal * 1000) / clock;
3921
3922                 /* Use ns/us then divide to preserve precision */
3923                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3924                         pixel_size * hdisplay;
3925                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
3926                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
3927                 srwm = wm_info->fifo_size - entries;
3928                 if (srwm < 0)
3929                         srwm = 1;
3930
3931                 if (IS_I945G(dev) || IS_I945GM(dev))
3932                         I915_WRITE(FW_BLC_SELF,
3933                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
3934                 else if (IS_I915GM(dev))
3935                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
3936         }
3937
3938         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
3939                       planea_wm, planeb_wm, cwm, srwm);
3940
3941         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
3942         fwater_hi = (cwm & 0x1f);
3943
3944         /* Set request length to 8 cachelines per fetch */
3945         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
3946         fwater_hi = fwater_hi | (1 << 8);
3947
3948         I915_WRITE(FW_BLC, fwater_lo);
3949         I915_WRITE(FW_BLC2, fwater_hi);
3950
3951         if (HAS_FW_BLC(dev)) {
3952                 if (enabled) {
3953                         if (IS_I945G(dev) || IS_I945GM(dev))
3954                                 I915_WRITE(FW_BLC_SELF,
3955                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
3956                         else if (IS_I915GM(dev))
3957                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
3958                         DRM_DEBUG_KMS("memory self refresh enabled\n");
3959                 } else
3960                         DRM_DEBUG_KMS("memory self refresh disabled\n");
3961         }
3962 }
3963
3964 static void i830_update_wm(struct drm_device *dev)
3965 {
3966         struct drm_i915_private *dev_priv = dev->dev_private;
3967         struct drm_crtc *crtc;
3968         uint32_t fwater_lo;
3969         int planea_wm;
3970
3971         crtc = single_enabled_crtc(dev);
3972         if (crtc == NULL)
3973                 return;
3974
3975         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
3976                                        dev_priv->display.get_fifo_size(dev, 0),
3977                                        crtc->fb->bits_per_pixel / 8,
3978                                        latency_ns);
3979         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
3980         fwater_lo |= (3<<8) | planea_wm;
3981
3982         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
3983
3984         I915_WRITE(FW_BLC, fwater_lo);
3985 }
3986
3987 #define ILK_LP0_PLANE_LATENCY           700
3988 #define ILK_LP0_CURSOR_LATENCY          1300
3989
3990 /*
3991  * Check the wm result.
3992  *
3993  * If any calculated watermark values is larger than the maximum value that
3994  * can be programmed into the associated watermark register, that watermark
3995  * must be disabled.
3996  */
3997 static bool ironlake_check_srwm(struct drm_device *dev, int level,
3998                                 int fbc_wm, int display_wm, int cursor_wm,
3999                                 const struct intel_watermark_params *display,
4000                                 const struct intel_watermark_params *cursor)
4001 {
4002         struct drm_i915_private *dev_priv = dev->dev_private;
4003
4004         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4005                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4006
4007         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4008                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4009                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4010
4011                 /* fbc has it's own way to disable FBC WM */
4012                 I915_WRITE(DISP_ARB_CTL,
4013                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4014                 return false;
4015         }
4016
4017         if (display_wm > display->max_wm) {
4018                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4019                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4020                 return false;
4021         }
4022
4023         if (cursor_wm > cursor->max_wm) {
4024                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4025                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4026                 return false;
4027         }
4028
4029         if (!(fbc_wm || display_wm || cursor_wm)) {
4030                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4031                 return false;
4032         }
4033
4034         return true;
4035 }
4036
4037 /*
4038  * Compute watermark values of WM[1-3],
4039  */
4040 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4041                                   int latency_ns,
4042                                   const struct intel_watermark_params *display,
4043                                   const struct intel_watermark_params *cursor,
4044                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4045 {
4046         struct drm_crtc *crtc;
4047         unsigned long line_time_us;
4048         int hdisplay, htotal, pixel_size, clock;
4049         int line_count, line_size;
4050         int small, large;
4051         int entries;
4052
4053         if (!latency_ns) {
4054                 *fbc_wm = *display_wm = *cursor_wm = 0;
4055                 return false;
4056         }
4057
4058         crtc = intel_get_crtc_for_plane(dev, plane);
4059         hdisplay = crtc->mode.hdisplay;
4060         htotal = crtc->mode.htotal;
4061         clock = crtc->mode.clock;
4062         pixel_size = crtc->fb->bits_per_pixel / 8;
4063
4064         line_time_us = (htotal * 1000) / clock;
4065         line_count = (latency_ns / line_time_us + 1000) / 1000;
4066         line_size = hdisplay * pixel_size;
4067
4068         /* Use the minimum of the small and large buffer method for primary */
4069         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4070         large = line_count * line_size;
4071
4072         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4073         *display_wm = entries + display->guard_size;
4074
4075         /*
4076          * Spec says:
4077          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4078          */
4079         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4080
4081         /* calculate the self-refresh watermark for display cursor */
4082         entries = line_count * pixel_size * 64;
4083         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4084         *cursor_wm = entries + cursor->guard_size;
4085
4086         return ironlake_check_srwm(dev, level,
4087                                    *fbc_wm, *display_wm, *cursor_wm,
4088                                    display, cursor);
4089 }
4090
4091 static void ironlake_update_wm(struct drm_device *dev)
4092 {
4093         struct drm_i915_private *dev_priv = dev->dev_private;
4094         int fbc_wm, plane_wm, cursor_wm;
4095         unsigned int enabled;
4096
4097         enabled = 0;
4098         if (g4x_compute_wm0(dev, 0,
4099                             &ironlake_display_wm_info,
4100                             ILK_LP0_PLANE_LATENCY,
4101                             &ironlake_cursor_wm_info,
4102                             ILK_LP0_CURSOR_LATENCY,
4103                             &plane_wm, &cursor_wm)) {
4104                 I915_WRITE(WM0_PIPEA_ILK,
4105                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4106                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4107                               " plane %d, " "cursor: %d\n",
4108                               plane_wm, cursor_wm);
4109                 enabled |= 1;
4110         }
4111
4112         if (g4x_compute_wm0(dev, 1,
4113                             &ironlake_display_wm_info,
4114                             ILK_LP0_PLANE_LATENCY,
4115                             &ironlake_cursor_wm_info,
4116                             ILK_LP0_CURSOR_LATENCY,
4117                             &plane_wm, &cursor_wm)) {
4118                 I915_WRITE(WM0_PIPEB_ILK,
4119                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4120                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4121                               " plane %d, cursor: %d\n",
4122                               plane_wm, cursor_wm);
4123                 enabled |= 2;
4124         }
4125
4126         /*
4127          * Calculate and update the self-refresh watermark only when one
4128          * display plane is used.
4129          */
4130         I915_WRITE(WM3_LP_ILK, 0);
4131         I915_WRITE(WM2_LP_ILK, 0);
4132         I915_WRITE(WM1_LP_ILK, 0);
4133
4134         if (!single_plane_enabled(enabled))
4135                 return;
4136         enabled = ffs(enabled) - 1;
4137
4138         /* WM1 */
4139         if (!ironlake_compute_srwm(dev, 1, enabled,
4140                                    ILK_READ_WM1_LATENCY() * 500,
4141                                    &ironlake_display_srwm_info,
4142                                    &ironlake_cursor_srwm_info,
4143                                    &fbc_wm, &plane_wm, &cursor_wm))
4144                 return;
4145
4146         I915_WRITE(WM1_LP_ILK,
4147                    WM1_LP_SR_EN |
4148                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4149                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4150                    (plane_wm << WM1_LP_SR_SHIFT) |
4151                    cursor_wm);
4152
4153         /* WM2 */
4154         if (!ironlake_compute_srwm(dev, 2, enabled,
4155                                    ILK_READ_WM2_LATENCY() * 500,
4156                                    &ironlake_display_srwm_info,
4157                                    &ironlake_cursor_srwm_info,
4158                                    &fbc_wm, &plane_wm, &cursor_wm))
4159                 return;
4160
4161         I915_WRITE(WM2_LP_ILK,
4162                    WM2_LP_EN |
4163                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4164                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4165                    (plane_wm << WM1_LP_SR_SHIFT) |
4166                    cursor_wm);
4167
4168         /*
4169          * WM3 is unsupported on ILK, probably because we don't have latency
4170          * data for that power state
4171          */
4172 }
4173
4174 static void sandybridge_update_wm(struct drm_device *dev)
4175 {
4176         struct drm_i915_private *dev_priv = dev->dev_private;
4177         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4178         int fbc_wm, plane_wm, cursor_wm;
4179         unsigned int enabled;
4180
4181         enabled = 0;
4182         if (g4x_compute_wm0(dev, 0,
4183                             &sandybridge_display_wm_info, latency,
4184                             &sandybridge_cursor_wm_info, latency,
4185                             &plane_wm, &cursor_wm)) {
4186                 I915_WRITE(WM0_PIPEA_ILK,
4187                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4188                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4189                               " plane %d, " "cursor: %d\n",
4190                               plane_wm, cursor_wm);
4191                 enabled |= 1;
4192         }
4193
4194         if (g4x_compute_wm0(dev, 1,
4195                             &sandybridge_display_wm_info, latency,
4196                             &sandybridge_cursor_wm_info, latency,
4197                             &plane_wm, &cursor_wm)) {
4198                 I915_WRITE(WM0_PIPEB_ILK,
4199                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4200                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4201                               " plane %d, cursor: %d\n",
4202                               plane_wm, cursor_wm);
4203                 enabled |= 2;
4204         }
4205
4206         /*
4207          * Calculate and update the self-refresh watermark only when one
4208          * display plane is used.
4209          *
4210          * SNB support 3 levels of watermark.
4211          *
4212          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4213          * and disabled in the descending order
4214          *
4215          */
4216         I915_WRITE(WM3_LP_ILK, 0);
4217         I915_WRITE(WM2_LP_ILK, 0);
4218         I915_WRITE(WM1_LP_ILK, 0);
4219
4220         if (!single_plane_enabled(enabled))
4221                 return;
4222         enabled = ffs(enabled) - 1;
4223
4224         /* WM1 */
4225         if (!ironlake_compute_srwm(dev, 1, enabled,
4226                                    SNB_READ_WM1_LATENCY() * 500,
4227                                    &sandybridge_display_srwm_info,
4228                                    &sandybridge_cursor_srwm_info,
4229                                    &fbc_wm, &plane_wm, &cursor_wm))
4230                 return;
4231
4232         I915_WRITE(WM1_LP_ILK,
4233                    WM1_LP_SR_EN |
4234                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4235                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4236                    (plane_wm << WM1_LP_SR_SHIFT) |
4237                    cursor_wm);
4238
4239         /* WM2 */
4240         if (!ironlake_compute_srwm(dev, 2, enabled,
4241                                    SNB_READ_WM2_LATENCY() * 500,
4242                                    &sandybridge_display_srwm_info,
4243                                    &sandybridge_cursor_srwm_info,
4244                                    &fbc_wm, &plane_wm, &cursor_wm))
4245                 return;
4246
4247         I915_WRITE(WM2_LP_ILK,
4248                    WM2_LP_EN |
4249                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4250                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4251                    (plane_wm << WM1_LP_SR_SHIFT) |
4252                    cursor_wm);
4253
4254         /* WM3 */
4255         if (!ironlake_compute_srwm(dev, 3, enabled,
4256                                    SNB_READ_WM3_LATENCY() * 500,
4257                                    &sandybridge_display_srwm_info,
4258                                    &sandybridge_cursor_srwm_info,
4259                                    &fbc_wm, &plane_wm, &cursor_wm))
4260                 return;
4261
4262         I915_WRITE(WM3_LP_ILK,
4263                    WM3_LP_EN |
4264                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4265                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4266                    (plane_wm << WM1_LP_SR_SHIFT) |
4267                    cursor_wm);
4268 }
4269
4270 /**
4271  * intel_update_watermarks - update FIFO watermark values based on current modes
4272  *
4273  * Calculate watermark values for the various WM regs based on current mode
4274  * and plane configuration.
4275  *
4276  * There are several cases to deal with here:
4277  *   - normal (i.e. non-self-refresh)
4278  *   - self-refresh (SR) mode
4279  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4280  *   - lines are small relative to FIFO size (buffer can hold more than 2
4281  *     lines), so need to account for TLB latency
4282  *
4283  *   The normal calculation is:
4284  *     watermark = dotclock * bytes per pixel * latency
4285  *   where latency is platform & configuration dependent (we assume pessimal
4286  *   values here).
4287  *
4288  *   The SR calculation is:
4289  *     watermark = (trunc(latency/line time)+1) * surface width *
4290  *       bytes per pixel
4291  *   where
4292  *     line time = htotal / dotclock
4293  *     surface width = hdisplay for normal plane and 64 for cursor
4294  *   and latency is assumed to be high, as above.
4295  *
4296  * The final value programmed to the register should always be rounded up,
4297  * and include an extra 2 entries to account for clock crossings.
4298  *
4299  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4300  * to set the non-SR watermarks to 8.
4301  */
4302 static void intel_update_watermarks(struct drm_device *dev)
4303 {
4304         struct drm_i915_private *dev_priv = dev->dev_private;
4305
4306         if (dev_priv->display.update_wm)
4307                 dev_priv->display.update_wm(dev);
4308 }
4309
4310 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4311 {
4312         return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4313                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4314 }
4315
4316 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4317                               struct drm_display_mode *mode,
4318                               struct drm_display_mode *adjusted_mode,
4319                               int x, int y,
4320                               struct drm_framebuffer *old_fb)
4321 {
4322         struct drm_device *dev = crtc->dev;
4323         struct drm_i915_private *dev_priv = dev->dev_private;
4324         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4325         int pipe = intel_crtc->pipe;
4326         int plane = intel_crtc->plane;
4327         int refclk, num_connectors = 0;
4328         intel_clock_t clock, reduced_clock;
4329         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4330         bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4331         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4332         struct drm_mode_config *mode_config = &dev->mode_config;
4333         struct intel_encoder *encoder;
4334         const intel_limit_t *limit;
4335         int ret;
4336         u32 temp;
4337         u32 lvds_sync = 0;
4338
4339         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4340                 if (encoder->base.crtc != crtc)
4341                         continue;
4342
4343                 switch (encoder->type) {
4344                 case INTEL_OUTPUT_LVDS:
4345                         is_lvds = true;
4346                         break;
4347                 case INTEL_OUTPUT_SDVO:
4348                 case INTEL_OUTPUT_HDMI:
4349                         is_sdvo = true;
4350                         if (encoder->needs_tv_clock)
4351                                 is_tv = true;
4352                         break;
4353                 case INTEL_OUTPUT_DVO:
4354                         is_dvo = true;
4355                         break;
4356                 case INTEL_OUTPUT_TVOUT:
4357                         is_tv = true;
4358                         break;
4359                 case INTEL_OUTPUT_ANALOG:
4360                         is_crt = true;
4361                         break;
4362                 case INTEL_OUTPUT_DISPLAYPORT:
4363                         is_dp = true;
4364                         break;
4365                 }
4366
4367                 num_connectors++;
4368         }
4369
4370         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4371                 refclk = dev_priv->lvds_ssc_freq * 1000;
4372                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4373                               refclk / 1000);
4374         } else if (!IS_GEN2(dev)) {
4375                 refclk = 96000;
4376         } else {
4377                 refclk = 48000;
4378         }
4379
4380         /*
4381          * Returns a set of divisors for the desired target clock with the given
4382          * refclk, or FALSE.  The returned values represent the clock equation:
4383          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4384          */
4385         limit = intel_limit(crtc, refclk);
4386         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4387         if (!ok) {
4388                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4389                 return -EINVAL;
4390         }
4391
4392         /* Ensure that the cursor is valid for the new mode before changing... */
4393         intel_crtc_update_cursor(crtc, true);
4394
4395         if (is_lvds && dev_priv->lvds_downclock_avail) {
4396                 has_reduced_clock = limit->find_pll(limit, crtc,
4397                                                     dev_priv->lvds_downclock,
4398                                                     refclk,
4399                                                     &reduced_clock);
4400                 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4401                         /*
4402                          * If the different P is found, it means that we can't
4403                          * switch the display clock by using the FP0/FP1.
4404                          * In such case we will disable the LVDS downclock
4405                          * feature.
4406                          */
4407                         DRM_DEBUG_KMS("Different P is found for "
4408                                       "LVDS clock/downclock\n");
4409                         has_reduced_clock = 0;
4410                 }
4411         }
4412         /* SDVO TV has fixed PLL values depend on its clock range,
4413            this mirrors vbios setting. */
4414         if (is_sdvo && is_tv) {
4415                 if (adjusted_mode->clock >= 100000
4416                     && adjusted_mode->clock < 140500) {
4417                         clock.p1 = 2;
4418                         clock.p2 = 10;
4419                         clock.n = 3;
4420                         clock.m1 = 16;
4421                         clock.m2 = 8;
4422                 } else if (adjusted_mode->clock >= 140500
4423                            && adjusted_mode->clock <= 200000) {
4424                         clock.p1 = 1;
4425                         clock.p2 = 10;
4426                         clock.n = 6;
4427                         clock.m1 = 12;
4428                         clock.m2 = 8;
4429                 }
4430         }
4431
4432         if (IS_PINEVIEW(dev)) {
4433                 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4434                 if (has_reduced_clock)
4435                         fp2 = (1 << reduced_clock.n) << 16 |
4436                                 reduced_clock.m1 << 8 | reduced_clock.m2;
4437         } else {
4438                 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4439                 if (has_reduced_clock)
4440                         fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4441                                 reduced_clock.m2;
4442         }
4443
4444         dpll = DPLL_VGA_MODE_DIS;
4445
4446         if (!IS_GEN2(dev)) {
4447                 if (is_lvds)
4448                         dpll |= DPLLB_MODE_LVDS;
4449                 else
4450                         dpll |= DPLLB_MODE_DAC_SERIAL;
4451                 if (is_sdvo) {
4452                         int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4453                         if (pixel_multiplier > 1) {
4454                                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4455                                         dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4456                         }
4457                         dpll |= DPLL_DVO_HIGH_SPEED;
4458                 }
4459                 if (is_dp)
4460                         dpll |= DPLL_DVO_HIGH_SPEED;
4461
4462                 /* compute bitmask from p1 value */
4463                 if (IS_PINEVIEW(dev))
4464                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4465                 else {
4466                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4467                         if (IS_G4X(dev) && has_reduced_clock)
4468                                 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4469                 }
4470                 switch (clock.p2) {
4471                 case 5:
4472                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4473                         break;
4474                 case 7:
4475                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4476                         break;
4477                 case 10:
4478                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4479                         break;
4480                 case 14:
4481                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4482                         break;
4483                 }
4484                 if (INTEL_INFO(dev)->gen >= 4)
4485                         dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4486         } else {
4487                 if (is_lvds) {
4488                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4489                 } else {
4490                         if (clock.p1 == 2)
4491                                 dpll |= PLL_P1_DIVIDE_BY_TWO;
4492                         else
4493                                 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4494                         if (clock.p2 == 4)
4495                                 dpll |= PLL_P2_DIVIDE_BY_4;
4496                 }
4497         }
4498
4499         if (is_sdvo && is_tv)
4500                 dpll |= PLL_REF_INPUT_TVCLKINBC;
4501         else if (is_tv)
4502                 /* XXX: just matching BIOS for now */
4503                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
4504                 dpll |= 3;
4505         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4506                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4507         else
4508                 dpll |= PLL_REF_INPUT_DREFCLK;
4509
4510         /* setup pipeconf */
4511         pipeconf = I915_READ(PIPECONF(pipe));
4512
4513         /* Set up the display plane register */
4514         dspcntr = DISPPLANE_GAMMA_ENABLE;
4515
4516         /* Ironlake's plane is forced to pipe, bit 24 is to
4517            enable color space conversion */
4518         if (pipe == 0)
4519                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4520         else
4521                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4522
4523         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4524                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4525                  * core speed.
4526                  *
4527                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4528                  * pipe == 0 check?
4529                  */
4530                 if (mode->clock >
4531                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4532                         pipeconf |= PIPECONF_DOUBLE_WIDE;
4533                 else
4534                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4535         }
4536
4537         dpll |= DPLL_VCO_ENABLE;
4538
4539         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4540         drm_mode_debug_printmodeline(mode);
4541
4542         I915_WRITE(FP0(pipe), fp);
4543         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4544
4545         POSTING_READ(DPLL(pipe));
4546         udelay(150);
4547
4548         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4549          * This is an exception to the general rule that mode_set doesn't turn
4550          * things on.
4551          */
4552         if (is_lvds) {
4553                 temp = I915_READ(LVDS);
4554                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4555                 if (pipe == 1) {
4556                         temp |= LVDS_PIPEB_SELECT;
4557                 } else {
4558                         temp &= ~LVDS_PIPEB_SELECT;
4559                 }
4560                 /* set the corresponsding LVDS_BORDER bit */
4561                 temp |= dev_priv->lvds_border_bits;
4562                 /* Set the B0-B3 data pairs corresponding to whether we're going to
4563                  * set the DPLLs for dual-channel mode or not.
4564                  */
4565                 if (clock.p2 == 7)
4566                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4567                 else
4568                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4569
4570                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4571                  * appropriately here, but we need to look more thoroughly into how
4572                  * panels behave in the two modes.
4573                  */
4574                 /* set the dithering flag on LVDS as needed */
4575                 if (INTEL_INFO(dev)->gen >= 4) {
4576                         if (dev_priv->lvds_dither)
4577                                 temp |= LVDS_ENABLE_DITHER;
4578                         else
4579                                 temp &= ~LVDS_ENABLE_DITHER;
4580                 }
4581                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4582                         lvds_sync |= LVDS_HSYNC_POLARITY;
4583                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4584                         lvds_sync |= LVDS_VSYNC_POLARITY;
4585                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4586                     != lvds_sync) {
4587                         char flags[2] = "-+";
4588                         DRM_INFO("Changing LVDS panel from "
4589                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4590                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
4591                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
4592                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4593                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4594                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4595                         temp |= lvds_sync;
4596                 }
4597                 I915_WRITE(LVDS, temp);
4598         }
4599
4600         if (is_dp) {
4601                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4602         }
4603
4604         I915_WRITE(DPLL(pipe), dpll);
4605
4606         /* Wait for the clocks to stabilize. */
4607         POSTING_READ(DPLL(pipe));
4608         udelay(150);
4609
4610         if (INTEL_INFO(dev)->gen >= 4) {
4611                 temp = 0;
4612                 if (is_sdvo) {
4613                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4614                         if (temp > 1)
4615                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4616                         else
4617                                 temp = 0;
4618                 }
4619                 I915_WRITE(DPLL_MD(pipe), temp);
4620         } else {
4621                 /* The pixel multiplier can only be updated once the
4622                  * DPLL is enabled and the clocks are stable.
4623                  *
4624                  * So write it again.
4625                  */
4626                 I915_WRITE(DPLL(pipe), dpll);
4627         }
4628
4629         intel_crtc->lowfreq_avail = false;
4630         if (is_lvds && has_reduced_clock && i915_powersave) {
4631                 I915_WRITE(FP1(pipe), fp2);
4632                 intel_crtc->lowfreq_avail = true;
4633                 if (HAS_PIPE_CXSR(dev)) {
4634                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4635                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4636                 }
4637         } else {
4638                 I915_WRITE(FP1(pipe), fp);
4639                 if (HAS_PIPE_CXSR(dev)) {
4640                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4641                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4642                 }
4643         }
4644
4645         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4646                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4647                 /* the chip adds 2 halflines automatically */
4648                 adjusted_mode->crtc_vdisplay -= 1;
4649                 adjusted_mode->crtc_vtotal -= 1;
4650                 adjusted_mode->crtc_vblank_start -= 1;
4651                 adjusted_mode->crtc_vblank_end -= 1;
4652                 adjusted_mode->crtc_vsync_end -= 1;
4653                 adjusted_mode->crtc_vsync_start -= 1;
4654         } else
4655                 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4656
4657         I915_WRITE(HTOTAL(pipe),
4658                    (adjusted_mode->crtc_hdisplay - 1) |
4659                    ((adjusted_mode->crtc_htotal - 1) << 16));
4660         I915_WRITE(HBLANK(pipe),
4661                    (adjusted_mode->crtc_hblank_start - 1) |
4662                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
4663         I915_WRITE(HSYNC(pipe),
4664                    (adjusted_mode->crtc_hsync_start - 1) |
4665                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
4666
4667         I915_WRITE(VTOTAL(pipe),
4668                    (adjusted_mode->crtc_vdisplay - 1) |
4669                    ((adjusted_mode->crtc_vtotal - 1) << 16));
4670         I915_WRITE(VBLANK(pipe),
4671                    (adjusted_mode->crtc_vblank_start - 1) |
4672                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
4673         I915_WRITE(VSYNC(pipe),
4674                    (adjusted_mode->crtc_vsync_start - 1) |
4675                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
4676
4677         /* pipesrc and dspsize control the size that is scaled from,
4678          * which should always be the user's requested size.
4679          */
4680         I915_WRITE(DSPSIZE(plane),
4681                    ((mode->vdisplay - 1) << 16) |
4682                    (mode->hdisplay - 1));
4683         I915_WRITE(DSPPOS(plane), 0);
4684         I915_WRITE(PIPESRC(pipe),
4685                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4686
4687         I915_WRITE(PIPECONF(pipe), pipeconf);
4688         POSTING_READ(PIPECONF(pipe));
4689         intel_enable_pipe(dev_priv, pipe, false);
4690
4691         intel_wait_for_vblank(dev, pipe);
4692
4693         I915_WRITE(DSPCNTR(plane), dspcntr);
4694         POSTING_READ(DSPCNTR(plane));
4695         intel_enable_plane(dev_priv, plane, pipe);
4696
4697         ret = intel_pipe_set_base(crtc, x, y, old_fb);
4698
4699         intel_update_watermarks(dev);
4700
4701         return ret;
4702 }
4703
4704 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4705                                   struct drm_display_mode *mode,
4706                                   struct drm_display_mode *adjusted_mode,
4707                                   int x, int y,
4708                                   struct drm_framebuffer *old_fb)
4709 {
4710         struct drm_device *dev = crtc->dev;
4711         struct drm_i915_private *dev_priv = dev->dev_private;
4712         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4713         int pipe = intel_crtc->pipe;
4714         int plane = intel_crtc->plane;
4715         int refclk, num_connectors = 0;
4716         intel_clock_t clock, reduced_clock;
4717         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4718         bool ok, has_reduced_clock = false, is_sdvo = false;
4719         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4720         struct intel_encoder *has_edp_encoder = NULL;
4721         struct drm_mode_config *mode_config = &dev->mode_config;
4722         struct intel_encoder *encoder;
4723         const intel_limit_t *limit;
4724         int ret;
4725         struct fdi_m_n m_n = {0};
4726         u32 temp;
4727         u32 lvds_sync = 0;
4728         int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
4729
4730         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4731                 if (encoder->base.crtc != crtc)
4732                         continue;
4733
4734                 switch (encoder->type) {
4735                 case INTEL_OUTPUT_LVDS:
4736                         is_lvds = true;
4737                         break;
4738                 case INTEL_OUTPUT_SDVO:
4739                 case INTEL_OUTPUT_HDMI:
4740                         is_sdvo = true;
4741                         if (encoder->needs_tv_clock)
4742                                 is_tv = true;
4743                         break;
4744                 case INTEL_OUTPUT_TVOUT:
4745                         is_tv = true;
4746                         break;
4747                 case INTEL_OUTPUT_ANALOG:
4748                         is_crt = true;
4749                         break;
4750                 case INTEL_OUTPUT_DISPLAYPORT:
4751                         is_dp = true;
4752                         break;
4753                 case INTEL_OUTPUT_EDP:
4754                         has_edp_encoder = encoder;
4755                         break;
4756                 }
4757
4758                 num_connectors++;
4759         }
4760
4761         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4762                 refclk = dev_priv->lvds_ssc_freq * 1000;
4763                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4764                               refclk / 1000);
4765         } else {
4766                 refclk = 96000;
4767                 if (!has_edp_encoder ||
4768                     intel_encoder_is_pch_edp(&has_edp_encoder->base))
4769                         refclk = 120000; /* 120Mhz refclk */
4770         }
4771
4772         /*
4773          * Returns a set of divisors for the desired target clock with the given
4774          * refclk, or FALSE.  The returned values represent the clock equation:
4775          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4776          */
4777         limit = intel_limit(crtc, refclk);
4778         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4779         if (!ok) {
4780                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4781                 return -EINVAL;
4782         }
4783
4784         /* Ensure that the cursor is valid for the new mode before changing... */
4785         intel_crtc_update_cursor(crtc, true);
4786
4787         if (is_lvds && dev_priv->lvds_downclock_avail) {
4788                 has_reduced_clock = limit->find_pll(limit, crtc,
4789                                                     dev_priv->lvds_downclock,
4790                                                     refclk,
4791                                                     &reduced_clock);
4792                 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4793                         /*
4794                          * If the different P is found, it means that we can't
4795                          * switch the display clock by using the FP0/FP1.
4796                          * In such case we will disable the LVDS downclock
4797                          * feature.
4798                          */
4799                         DRM_DEBUG_KMS("Different P is found for "
4800                                       "LVDS clock/downclock\n");
4801                         has_reduced_clock = 0;
4802                 }
4803         }
4804         /* SDVO TV has fixed PLL values depend on its clock range,
4805            this mirrors vbios setting. */
4806         if (is_sdvo && is_tv) {
4807                 if (adjusted_mode->clock >= 100000
4808                     && adjusted_mode->clock < 140500) {
4809                         clock.p1 = 2;
4810                         clock.p2 = 10;
4811                         clock.n = 3;
4812                         clock.m1 = 16;
4813                         clock.m2 = 8;
4814                 } else if (adjusted_mode->clock >= 140500
4815                            && adjusted_mode->clock <= 200000) {
4816                         clock.p1 = 1;
4817                         clock.p2 = 10;
4818                         clock.n = 6;
4819                         clock.m1 = 12;
4820                         clock.m2 = 8;
4821                 }
4822         }
4823
4824         /* FDI link */
4825         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4826         lane = 0;
4827         /* CPU eDP doesn't require FDI link, so just set DP M/N
4828            according to current link config */
4829         if (has_edp_encoder &&
4830             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4831                 target_clock = mode->clock;
4832                 intel_edp_link_config(has_edp_encoder,
4833                                       &lane, &link_bw);
4834         } else {
4835                 /* [e]DP over FDI requires target mode clock
4836                    instead of link clock */
4837                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4838                         target_clock = mode->clock;
4839                 else
4840                         target_clock = adjusted_mode->clock;
4841
4842                 /* FDI is a binary signal running at ~2.7GHz, encoding
4843                  * each output octet as 10 bits. The actual frequency
4844                  * is stored as a divider into a 100MHz clock, and the
4845                  * mode pixel clock is stored in units of 1KHz.
4846                  * Hence the bw of each lane in terms of the mode signal
4847                  * is:
4848                  */
4849                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4850         }
4851
4852         /* determine panel color depth */
4853         temp = I915_READ(PIPECONF(pipe));
4854         temp &= ~PIPE_BPC_MASK;
4855         if (is_lvds) {
4856                 /* the BPC will be 6 if it is 18-bit LVDS panel */
4857                 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4858                         temp |= PIPE_8BPC;
4859                 else
4860                         temp |= PIPE_6BPC;
4861         } else if (has_edp_encoder) {
4862                 switch (dev_priv->edp.bpp/3) {
4863                 case 8:
4864                         temp |= PIPE_8BPC;
4865                         break;
4866                 case 10:
4867                         temp |= PIPE_10BPC;
4868                         break;
4869                 case 6:
4870                         temp |= PIPE_6BPC;
4871                         break;
4872                 case 12:
4873                         temp |= PIPE_12BPC;
4874                         break;
4875                 }
4876         } else
4877                 temp |= PIPE_8BPC;
4878         I915_WRITE(PIPECONF(pipe), temp);
4879
4880         switch (temp & PIPE_BPC_MASK) {
4881         case PIPE_8BPC:
4882                 bpp = 24;
4883                 break;
4884         case PIPE_10BPC:
4885                 bpp = 30;
4886                 break;
4887         case PIPE_6BPC:
4888                 bpp = 18;
4889                 break;
4890         case PIPE_12BPC:
4891                 bpp = 36;
4892                 break;
4893         default:
4894                 DRM_ERROR("unknown pipe bpc value\n");
4895                 bpp = 24;
4896         }
4897
4898         if (!lane) {
4899                 /*
4900                  * Account for spread spectrum to avoid
4901                  * oversubscribing the link. Max center spread
4902                  * is 2.5%; use 5% for safety's sake.
4903                  */
4904                 u32 bps = target_clock * bpp * 21 / 20;
4905                 lane = bps / (link_bw * 8) + 1;
4906         }
4907
4908         intel_crtc->fdi_lanes = lane;
4909
4910         if (pixel_multiplier > 1)
4911                 link_bw *= pixel_multiplier;
4912         ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4913
4914         /* Ironlake: try to setup display ref clock before DPLL
4915          * enabling. This is only under driver's control after
4916          * PCH B stepping, previous chipset stepping should be
4917          * ignoring this setting.
4918          */
4919         temp = I915_READ(PCH_DREF_CONTROL);
4920         /* Always enable nonspread source */
4921         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4922         temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4923         temp &= ~DREF_SSC_SOURCE_MASK;
4924         temp |= DREF_SSC_SOURCE_ENABLE;
4925         I915_WRITE(PCH_DREF_CONTROL, temp);
4926
4927         POSTING_READ(PCH_DREF_CONTROL);
4928         udelay(200);
4929
4930         if (has_edp_encoder) {
4931                 if (intel_panel_use_ssc(dev_priv)) {
4932                         temp |= DREF_SSC1_ENABLE;
4933                         I915_WRITE(PCH_DREF_CONTROL, temp);
4934
4935                         POSTING_READ(PCH_DREF_CONTROL);
4936                         udelay(200);
4937                 }
4938                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4939
4940                 /* Enable CPU source on CPU attached eDP */
4941                 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4942                         if (intel_panel_use_ssc(dev_priv))
4943                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4944                         else
4945                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4946                 } else {
4947                         /* Enable SSC on PCH eDP if needed */
4948                         if (intel_panel_use_ssc(dev_priv)) {
4949                                 DRM_ERROR("enabling SSC on PCH\n");
4950                                 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4951                         }
4952                 }
4953                 I915_WRITE(PCH_DREF_CONTROL, temp);
4954                 POSTING_READ(PCH_DREF_CONTROL);
4955                 udelay(200);
4956         }
4957
4958         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4959         if (has_reduced_clock)
4960                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4961                         reduced_clock.m2;
4962
4963         /* Enable autotuning of the PLL clock (if permissible) */
4964         factor = 21;
4965         if (is_lvds) {
4966                 if ((intel_panel_use_ssc(dev_priv) &&
4967                      dev_priv->lvds_ssc_freq == 100) ||
4968                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4969                         factor = 25;
4970         } else if (is_sdvo && is_tv)
4971                 factor = 20;
4972
4973         if (clock.m1 < factor * clock.n)
4974                 fp |= FP_CB_TUNE;
4975
4976         dpll = 0;
4977
4978         if (is_lvds)
4979                 dpll |= DPLLB_MODE_LVDS;
4980         else
4981                 dpll |= DPLLB_MODE_DAC_SERIAL;
4982         if (is_sdvo) {
4983                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4984                 if (pixel_multiplier > 1) {
4985                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4986                 }
4987                 dpll |= DPLL_DVO_HIGH_SPEED;
4988         }
4989         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4990                 dpll |= DPLL_DVO_HIGH_SPEED;
4991
4992         /* compute bitmask from p1 value */
4993         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4994         /* also FPA1 */
4995         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4996
4997         switch (clock.p2) {
4998         case 5:
4999                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5000                 break;
5001         case 7:
5002                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5003                 break;
5004         case 10:
5005                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5006                 break;
5007         case 14:
5008                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5009                 break;
5010         }
5011
5012         if (is_sdvo && is_tv)
5013                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5014         else if (is_tv)
5015                 /* XXX: just matching BIOS for now */
5016                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5017                 dpll |= 3;
5018         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5019                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5020         else
5021                 dpll |= PLL_REF_INPUT_DREFCLK;
5022
5023         /* setup pipeconf */
5024         pipeconf = I915_READ(PIPECONF(pipe));
5025
5026         /* Set up the display plane register */
5027         dspcntr = DISPPLANE_GAMMA_ENABLE;
5028
5029         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5030         drm_mode_debug_printmodeline(mode);
5031
5032         /* PCH eDP needs FDI, but CPU eDP does not */
5033         if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5034                 I915_WRITE(PCH_FP0(pipe), fp);
5035                 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5036
5037                 POSTING_READ(PCH_DPLL(pipe));
5038                 udelay(150);
5039         }
5040
5041         /* enable transcoder DPLL */
5042         if (HAS_PCH_CPT(dev)) {
5043                 temp = I915_READ(PCH_DPLL_SEL);
5044                 switch (pipe) {
5045                 case 0:
5046                         temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5047                         break;
5048                 case 1:
5049                         temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5050                         break;
5051                 case 2:
5052                         /* FIXME: manage transcoder PLLs? */
5053                         temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5054                         break;
5055                 default:
5056                         BUG();
5057                 }
5058                 I915_WRITE(PCH_DPLL_SEL, temp);
5059
5060                 POSTING_READ(PCH_DPLL_SEL);
5061                 udelay(150);
5062         }
5063
5064         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5065          * This is an exception to the general rule that mode_set doesn't turn
5066          * things on.
5067          */
5068         if (is_lvds) {
5069                 temp = I915_READ(PCH_LVDS);
5070                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5071                 if (pipe == 1) {
5072                         if (HAS_PCH_CPT(dev))
5073                                 temp |= PORT_TRANS_B_SEL_CPT;
5074                         else
5075                                 temp |= LVDS_PIPEB_SELECT;
5076                 } else {
5077                         if (HAS_PCH_CPT(dev))
5078                                 temp &= ~PORT_TRANS_SEL_MASK;
5079                         else
5080                                 temp &= ~LVDS_PIPEB_SELECT;
5081                 }
5082                 /* set the corresponsding LVDS_BORDER bit */
5083                 temp |= dev_priv->lvds_border_bits;
5084                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5085                  * set the DPLLs for dual-channel mode or not.
5086                  */
5087                 if (clock.p2 == 7)
5088                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5089                 else
5090                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5091
5092                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5093                  * appropriately here, but we need to look more thoroughly into how
5094                  * panels behave in the two modes.
5095                  */
5096                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5097                         lvds_sync |= LVDS_HSYNC_POLARITY;
5098                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5099                         lvds_sync |= LVDS_VSYNC_POLARITY;
5100                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5101                     != lvds_sync) {
5102                         char flags[2] = "-+";
5103                         DRM_INFO("Changing LVDS panel from "
5104                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5105                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5106                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5107                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5108                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5109                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5110                         temp |= lvds_sync;
5111                 }
5112                 I915_WRITE(PCH_LVDS, temp);
5113         }
5114
5115         /* set the dithering flag and clear for anything other than a panel. */
5116         pipeconf &= ~PIPECONF_DITHER_EN;
5117         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5118         if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
5119                 pipeconf |= PIPECONF_DITHER_EN;
5120                 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5121         }
5122
5123         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5124                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5125         } else {
5126                 /* For non-DP output, clear any trans DP clock recovery setting.*/
5127                 I915_WRITE(TRANSDATA_M1(pipe), 0);
5128                 I915_WRITE(TRANSDATA_N1(pipe), 0);
5129                 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5130                 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5131         }
5132
5133         if (!has_edp_encoder ||
5134             intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5135                 I915_WRITE(PCH_DPLL(pipe), dpll);
5136
5137                 /* Wait for the clocks to stabilize. */
5138                 POSTING_READ(PCH_DPLL(pipe));
5139                 udelay(150);
5140
5141                 /* The pixel multiplier can only be updated once the
5142                  * DPLL is enabled and the clocks are stable.
5143                  *
5144                  * So write it again.
5145                  */
5146                 I915_WRITE(PCH_DPLL(pipe), dpll);
5147         }
5148
5149         intel_crtc->lowfreq_avail = false;
5150         if (is_lvds && has_reduced_clock && i915_powersave) {
5151                 I915_WRITE(PCH_FP1(pipe), fp2);
5152                 intel_crtc->lowfreq_avail = true;
5153                 if (HAS_PIPE_CXSR(dev)) {
5154                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5155                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5156                 }
5157         } else {
5158                 I915_WRITE(PCH_FP1(pipe), fp);
5159                 if (HAS_PIPE_CXSR(dev)) {
5160                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5161                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5162                 }
5163         }
5164
5165         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5166                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5167                 /* the chip adds 2 halflines automatically */
5168                 adjusted_mode->crtc_vdisplay -= 1;
5169                 adjusted_mode->crtc_vtotal -= 1;
5170                 adjusted_mode->crtc_vblank_start -= 1;
5171                 adjusted_mode->crtc_vblank_end -= 1;
5172                 adjusted_mode->crtc_vsync_end -= 1;
5173                 adjusted_mode->crtc_vsync_start -= 1;
5174         } else
5175                 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5176
5177         I915_WRITE(HTOTAL(pipe),
5178                    (adjusted_mode->crtc_hdisplay - 1) |
5179                    ((adjusted_mode->crtc_htotal - 1) << 16));
5180         I915_WRITE(HBLANK(pipe),
5181                    (adjusted_mode->crtc_hblank_start - 1) |
5182                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5183         I915_WRITE(HSYNC(pipe),
5184                    (adjusted_mode->crtc_hsync_start - 1) |
5185                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5186
5187         I915_WRITE(VTOTAL(pipe),
5188                    (adjusted_mode->crtc_vdisplay - 1) |
5189                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5190         I915_WRITE(VBLANK(pipe),
5191                    (adjusted_mode->crtc_vblank_start - 1) |
5192                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5193         I915_WRITE(VSYNC(pipe),
5194                    (adjusted_mode->crtc_vsync_start - 1) |
5195                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5196
5197         /* pipesrc controls the size that is scaled from, which should
5198          * always be the user's requested size.
5199          */
5200         I915_WRITE(PIPESRC(pipe),
5201                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5202
5203         I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5204         I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5205         I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5206         I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5207
5208         if (has_edp_encoder &&
5209             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5210                 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5211         }
5212
5213         I915_WRITE(PIPECONF(pipe), pipeconf);
5214         POSTING_READ(PIPECONF(pipe));
5215
5216         intel_wait_for_vblank(dev, pipe);
5217
5218         if (IS_GEN5(dev)) {
5219                 /* enable address swizzle for tiling buffer */
5220                 temp = I915_READ(DISP_ARB_CTL);
5221                 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5222         }
5223
5224         I915_WRITE(DSPCNTR(plane), dspcntr);
5225         POSTING_READ(DSPCNTR(plane));
5226
5227         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5228
5229         intel_update_watermarks(dev);
5230
5231         return ret;
5232 }
5233
5234 static int intel_crtc_mode_set(struct drm_crtc *crtc,
5235                                struct drm_display_mode *mode,
5236                                struct drm_display_mode *adjusted_mode,
5237                                int x, int y,
5238                                struct drm_framebuffer *old_fb)
5239 {
5240         struct drm_device *dev = crtc->dev;
5241         struct drm_i915_private *dev_priv = dev->dev_private;
5242         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5243         int pipe = intel_crtc->pipe;
5244         int ret;
5245
5246         drm_vblank_pre_modeset(dev, pipe);
5247
5248         ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5249                                               x, y, old_fb);
5250
5251         drm_vblank_post_modeset(dev, pipe);
5252
5253         return ret;
5254 }
5255
5256 /** Loads the palette/gamma unit for the CRTC with the prepared values */
5257 void intel_crtc_load_lut(struct drm_crtc *crtc)
5258 {
5259         struct drm_device *dev = crtc->dev;
5260         struct drm_i915_private *dev_priv = dev->dev_private;
5261         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5262         int palreg = PALETTE(intel_crtc->pipe);
5263         int i;
5264
5265         /* The clocks have to be on to load the palette. */
5266         if (!crtc->enabled)
5267                 return;
5268
5269         /* use legacy palette for Ironlake */
5270         if (HAS_PCH_SPLIT(dev))
5271                 palreg = LGC_PALETTE(intel_crtc->pipe);
5272
5273         for (i = 0; i < 256; i++) {
5274                 I915_WRITE(palreg + 4 * i,
5275                            (intel_crtc->lut_r[i] << 16) |
5276                            (intel_crtc->lut_g[i] << 8) |
5277                            intel_crtc->lut_b[i]);
5278         }
5279 }
5280
5281 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5282 {
5283         struct drm_device *dev = crtc->dev;
5284         struct drm_i915_private *dev_priv = dev->dev_private;
5285         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5286         bool visible = base != 0;
5287         u32 cntl;
5288
5289         if (intel_crtc->cursor_visible == visible)
5290                 return;
5291
5292         cntl = I915_READ(_CURACNTR);
5293         if (visible) {
5294                 /* On these chipsets we can only modify the base whilst
5295                  * the cursor is disabled.
5296                  */
5297                 I915_WRITE(_CURABASE, base);
5298
5299                 cntl &= ~(CURSOR_FORMAT_MASK);
5300                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5301                 cntl |= CURSOR_ENABLE |
5302                         CURSOR_GAMMA_ENABLE |
5303                         CURSOR_FORMAT_ARGB;
5304         } else
5305                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
5306         I915_WRITE(_CURACNTR, cntl);
5307
5308         intel_crtc->cursor_visible = visible;
5309 }
5310
5311 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5312 {
5313         struct drm_device *dev = crtc->dev;
5314         struct drm_i915_private *dev_priv = dev->dev_private;
5315         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5316         int pipe = intel_crtc->pipe;
5317         bool visible = base != 0;
5318
5319         if (intel_crtc->cursor_visible != visible) {
5320                 uint32_t cntl = I915_READ(CURCNTR(pipe));
5321                 if (base) {
5322                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5323                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5324                         cntl |= pipe << 28; /* Connect to correct pipe */
5325                 } else {
5326                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5327                         cntl |= CURSOR_MODE_DISABLE;
5328                 }
5329                 I915_WRITE(CURCNTR(pipe), cntl);
5330
5331                 intel_crtc->cursor_visible = visible;
5332         }
5333         /* and commit changes on next vblank */
5334         I915_WRITE(CURBASE(pipe), base);
5335 }
5336
5337 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5338 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5339                                      bool on)
5340 {
5341         struct drm_device *dev = crtc->dev;
5342         struct drm_i915_private *dev_priv = dev->dev_private;
5343         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5344         int pipe = intel_crtc->pipe;
5345         int x = intel_crtc->cursor_x;
5346         int y = intel_crtc->cursor_y;
5347         u32 base, pos;
5348         bool visible;
5349
5350         pos = 0;
5351
5352         if (on && crtc->enabled && crtc->fb) {
5353                 base = intel_crtc->cursor_addr;
5354                 if (x > (int) crtc->fb->width)
5355                         base = 0;
5356
5357                 if (y > (int) crtc->fb->height)
5358                         base = 0;
5359         } else
5360                 base = 0;
5361
5362         if (x < 0) {
5363                 if (x + intel_crtc->cursor_width < 0)
5364                         base = 0;
5365
5366                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5367                 x = -x;
5368         }
5369         pos |= x << CURSOR_X_SHIFT;
5370
5371         if (y < 0) {
5372                 if (y + intel_crtc->cursor_height < 0)
5373                         base = 0;
5374
5375                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5376                 y = -y;
5377         }
5378         pos |= y << CURSOR_Y_SHIFT;
5379
5380         visible = base != 0;
5381         if (!visible && !intel_crtc->cursor_visible)
5382                 return;
5383
5384         I915_WRITE(CURPOS(pipe), pos);
5385         if (IS_845G(dev) || IS_I865G(dev))
5386                 i845_update_cursor(crtc, base);
5387         else
5388                 i9xx_update_cursor(crtc, base);
5389
5390         if (visible)
5391                 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
5392 }
5393
5394 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5395                                  struct drm_file *file,
5396                                  uint32_t handle,
5397                                  uint32_t width, uint32_t height)
5398 {
5399         struct drm_device *dev = crtc->dev;
5400         struct drm_i915_private *dev_priv = dev->dev_private;
5401         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5402         struct drm_i915_gem_object *obj;
5403         uint32_t addr;
5404         int ret;
5405
5406         DRM_DEBUG_KMS("\n");
5407
5408         /* if we want to turn off the cursor ignore width and height */
5409         if (!handle) {
5410                 DRM_DEBUG_KMS("cursor off\n");
5411                 addr = 0;
5412                 obj = NULL;
5413                 mutex_lock(&dev->struct_mutex);
5414                 goto finish;
5415         }
5416
5417         /* Currently we only support 64x64 cursors */
5418         if (width != 64 || height != 64) {
5419                 DRM_ERROR("we currently only support 64x64 cursors\n");
5420                 return -EINVAL;
5421         }
5422
5423         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5424         if (&obj->base == NULL)
5425                 return -ENOENT;
5426
5427         if (obj->base.size < width * height * 4) {
5428                 DRM_ERROR("buffer is to small\n");
5429                 ret = -ENOMEM;
5430                 goto fail;
5431         }
5432
5433         /* we only need to pin inside GTT if cursor is non-phy */
5434         mutex_lock(&dev->struct_mutex);
5435         if (!dev_priv->info->cursor_needs_physical) {
5436                 if (obj->tiling_mode) {
5437                         DRM_ERROR("cursor cannot be tiled\n");
5438                         ret = -EINVAL;
5439                         goto fail_locked;
5440                 }
5441
5442                 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
5443                 if (ret) {
5444                         DRM_ERROR("failed to pin cursor bo\n");
5445                         goto fail_locked;
5446                 }
5447
5448                 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
5449                 if (ret) {
5450                         DRM_ERROR("failed to move cursor bo into the GTT\n");
5451                         goto fail_unpin;
5452                 }
5453
5454                 ret = i915_gem_object_put_fence(obj);
5455                 if (ret) {
5456                         DRM_ERROR("failed to move cursor bo into the GTT\n");
5457                         goto fail_unpin;
5458                 }
5459
5460                 addr = obj->gtt_offset;
5461         } else {
5462                 int align = IS_I830(dev) ? 16 * 1024 : 256;
5463                 ret = i915_gem_attach_phys_object(dev, obj,
5464                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5465                                                   align);
5466                 if (ret) {
5467                         DRM_ERROR("failed to attach phys object\n");
5468                         goto fail_locked;
5469                 }
5470                 addr = obj->phys_obj->handle->busaddr;
5471         }
5472
5473         if (IS_GEN2(dev))
5474                 I915_WRITE(CURSIZE, (height << 12) | width);
5475
5476  finish:
5477         if (intel_crtc->cursor_bo) {
5478                 if (dev_priv->info->cursor_needs_physical) {
5479                         if (intel_crtc->cursor_bo != obj)
5480                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5481                 } else
5482                         i915_gem_object_unpin(intel_crtc->cursor_bo);
5483                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5484         }
5485
5486         mutex_unlock(&dev->struct_mutex);
5487
5488         intel_crtc->cursor_addr = addr;
5489         intel_crtc->cursor_bo = obj;
5490         intel_crtc->cursor_width = width;
5491         intel_crtc->cursor_height = height;
5492
5493         intel_crtc_update_cursor(crtc, true);
5494
5495         return 0;
5496 fail_unpin:
5497         i915_gem_object_unpin(obj);
5498 fail_locked:
5499         mutex_unlock(&dev->struct_mutex);
5500 fail:
5501         drm_gem_object_unreference_unlocked(&obj->base);
5502         return ret;
5503 }
5504
5505 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5506 {
5507         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5508
5509         intel_crtc->cursor_x = x;
5510         intel_crtc->cursor_y = y;
5511
5512         intel_crtc_update_cursor(crtc, true);
5513
5514         return 0;
5515 }
5516
5517 /** Sets the color ramps on behalf of RandR */
5518 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5519                                  u16 blue, int regno)
5520 {
5521         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5522
5523         intel_crtc->lut_r[regno] = red >> 8;
5524         intel_crtc->lut_g[regno] = green >> 8;
5525         intel_crtc->lut_b[regno] = blue >> 8;
5526 }
5527
5528 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5529                              u16 *blue, int regno)
5530 {
5531         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5532
5533         *red = intel_crtc->lut_r[regno] << 8;
5534         *green = intel_crtc->lut_g[regno] << 8;
5535         *blue = intel_crtc->lut_b[regno] << 8;
5536 }
5537
5538 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5539                                  u16 *blue, uint32_t start, uint32_t size)
5540 {
5541         int end = (start + size > 256) ? 256 : start + size, i;
5542         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5543
5544         for (i = start; i < end; i++) {
5545                 intel_crtc->lut_r[i] = red[i] >> 8;
5546                 intel_crtc->lut_g[i] = green[i] >> 8;
5547                 intel_crtc->lut_b[i] = blue[i] >> 8;
5548         }
5549
5550         intel_crtc_load_lut(crtc);
5551 }
5552
5553 /**
5554  * Get a pipe with a simple mode set on it for doing load-based monitor
5555  * detection.
5556  *
5557  * It will be up to the load-detect code to adjust the pipe as appropriate for
5558  * its requirements.  The pipe will be connected to no other encoders.
5559  *
5560  * Currently this code will only succeed if there is a pipe with no encoders
5561  * configured for it.  In the future, it could choose to temporarily disable
5562  * some outputs to free up a pipe for its use.
5563  *
5564  * \return crtc, or NULL if no pipes are available.
5565  */
5566
5567 /* VESA 640x480x72Hz mode to set on the pipe */
5568 static struct drm_display_mode load_detect_mode = {
5569         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5570                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5571 };
5572
5573 static struct drm_framebuffer *
5574 intel_framebuffer_create(struct drm_device *dev,
5575                          struct drm_mode_fb_cmd *mode_cmd,
5576                          struct drm_i915_gem_object *obj)
5577 {
5578         struct intel_framebuffer *intel_fb;
5579         int ret;
5580
5581         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5582         if (!intel_fb) {
5583                 drm_gem_object_unreference_unlocked(&obj->base);
5584                 return ERR_PTR(-ENOMEM);
5585         }
5586
5587         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5588         if (ret) {
5589                 drm_gem_object_unreference_unlocked(&obj->base);
5590                 kfree(intel_fb);
5591                 return ERR_PTR(ret);
5592         }
5593
5594         return &intel_fb->base;
5595 }
5596
5597 static u32
5598 intel_framebuffer_pitch_for_width(int width, int bpp)
5599 {
5600         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5601         return ALIGN(pitch, 64);
5602 }
5603
5604 static u32
5605 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5606 {
5607         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5608         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5609 }
5610
5611 static struct drm_framebuffer *
5612 intel_framebuffer_create_for_mode(struct drm_device *dev,
5613                                   struct drm_display_mode *mode,
5614                                   int depth, int bpp)
5615 {
5616         struct drm_i915_gem_object *obj;
5617         struct drm_mode_fb_cmd mode_cmd;
5618
5619         obj = i915_gem_alloc_object(dev,
5620                                     intel_framebuffer_size_for_mode(mode, bpp));
5621         if (obj == NULL)
5622                 return ERR_PTR(-ENOMEM);
5623
5624         mode_cmd.width = mode->hdisplay;
5625         mode_cmd.height = mode->vdisplay;
5626         mode_cmd.depth = depth;
5627         mode_cmd.bpp = bpp;
5628         mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
5629
5630         return intel_framebuffer_create(dev, &mode_cmd, obj);
5631 }
5632
5633 static struct drm_framebuffer *
5634 mode_fits_in_fbdev(struct drm_device *dev,
5635                    struct drm_display_mode *mode)
5636 {
5637         struct drm_i915_private *dev_priv = dev->dev_private;
5638         struct drm_i915_gem_object *obj;
5639         struct drm_framebuffer *fb;
5640
5641         if (dev_priv->fbdev == NULL)
5642                 return NULL;
5643
5644         obj = dev_priv->fbdev->ifb.obj;
5645         if (obj == NULL)
5646                 return NULL;
5647
5648         fb = &dev_priv->fbdev->ifb.base;
5649         if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5650                                                           fb->bits_per_pixel))
5651                 return NULL;
5652
5653         if (obj->base.size < mode->vdisplay * fb->pitch)
5654                 return NULL;
5655
5656         return fb;
5657 }
5658
5659 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5660                                 struct drm_connector *connector,
5661                                 struct drm_display_mode *mode,
5662                                 struct intel_load_detect_pipe *old)
5663 {
5664         struct intel_crtc *intel_crtc;
5665         struct drm_crtc *possible_crtc;
5666         struct drm_encoder *encoder = &intel_encoder->base;
5667         struct drm_crtc *crtc = NULL;
5668         struct drm_device *dev = encoder->dev;
5669         struct drm_framebuffer *old_fb;
5670         int i = -1;
5671
5672         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5673                       connector->base.id, drm_get_connector_name(connector),
5674                       encoder->base.id, drm_get_encoder_name(encoder));
5675
5676         /*
5677          * Algorithm gets a little messy:
5678          *
5679          *   - if the connector already has an assigned crtc, use it (but make
5680          *     sure it's on first)
5681          *
5682          *   - try to find the first unused crtc that can drive this connector,
5683          *     and use that if we find one
5684          */
5685
5686         /* See if we already have a CRTC for this connector */
5687         if (encoder->crtc) {
5688                 crtc = encoder->crtc;
5689
5690                 intel_crtc = to_intel_crtc(crtc);
5691                 old->dpms_mode = intel_crtc->dpms_mode;
5692                 old->load_detect_temp = false;
5693
5694                 /* Make sure the crtc and connector are running */
5695                 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5696                         struct drm_encoder_helper_funcs *encoder_funcs;
5697                         struct drm_crtc_helper_funcs *crtc_funcs;
5698
5699                         crtc_funcs = crtc->helper_private;
5700                         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5701
5702                         encoder_funcs = encoder->helper_private;
5703                         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5704                 }
5705
5706                 return true;
5707         }
5708
5709         /* Find an unused one (if possible) */
5710         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5711                 i++;
5712                 if (!(encoder->possible_crtcs & (1 << i)))
5713                         continue;
5714                 if (!possible_crtc->enabled) {
5715                         crtc = possible_crtc;
5716                         break;
5717                 }
5718         }
5719
5720         /*
5721          * If we didn't find an unused CRTC, don't use any.
5722          */
5723         if (!crtc) {
5724                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5725                 return false;
5726         }
5727
5728         encoder->crtc = crtc;
5729         connector->encoder = encoder;
5730
5731         intel_crtc = to_intel_crtc(crtc);
5732         old->dpms_mode = intel_crtc->dpms_mode;
5733         old->load_detect_temp = true;
5734         old->release_fb = NULL;
5735
5736         if (!mode)
5737                 mode = &load_detect_mode;
5738
5739         old_fb = crtc->fb;
5740
5741         /* We need a framebuffer large enough to accommodate all accesses
5742          * that the plane may generate whilst we perform load detection.
5743          * We can not rely on the fbcon either being present (we get called
5744          * during its initialisation to detect all boot displays, or it may
5745          * not even exist) or that it is large enough to satisfy the
5746          * requested mode.
5747          */
5748         crtc->fb = mode_fits_in_fbdev(dev, mode);
5749         if (crtc->fb == NULL) {
5750                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5751                 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5752                 old->release_fb = crtc->fb;
5753         } else
5754                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5755         if (IS_ERR(crtc->fb)) {
5756                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5757                 crtc->fb = old_fb;
5758                 return false;
5759         }
5760
5761         if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5762                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5763                 if (old->release_fb)
5764                         old->release_fb->funcs->destroy(old->release_fb);
5765                 crtc->fb = old_fb;
5766                 return false;
5767         }
5768
5769         /* let the connector get through one full cycle before testing */
5770         intel_wait_for_vblank(dev, intel_crtc->pipe);
5771
5772         return true;
5773 }
5774
5775 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5776                                     struct drm_connector *connector,
5777                                     struct intel_load_detect_pipe *old)
5778 {
5779         struct drm_encoder *encoder = &intel_encoder->base;
5780         struct drm_device *dev = encoder->dev;
5781         struct drm_crtc *crtc = encoder->crtc;
5782         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5783         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5784
5785         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5786                       connector->base.id, drm_get_connector_name(connector),
5787                       encoder->base.id, drm_get_encoder_name(encoder));
5788
5789         if (old->load_detect_temp) {
5790                 connector->encoder = NULL;
5791                 drm_helper_disable_unused_functions(dev);
5792
5793                 if (old->release_fb)
5794                         old->release_fb->funcs->destroy(old->release_fb);
5795
5796                 return;
5797         }
5798
5799         /* Switch crtc and encoder back off if necessary */
5800         if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5801                 encoder_funcs->dpms(encoder, old->dpms_mode);
5802                 crtc_funcs->dpms(crtc, old->dpms_mode);
5803         }
5804 }
5805
5806 /* Returns the clock of the currently programmed mode of the given pipe. */
5807 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5808 {
5809         struct drm_i915_private *dev_priv = dev->dev_private;
5810         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5811         int pipe = intel_crtc->pipe;
5812         u32 dpll = I915_READ(DPLL(pipe));
5813         u32 fp;
5814         intel_clock_t clock;
5815
5816         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5817                 fp = I915_READ(FP0(pipe));
5818         else
5819                 fp = I915_READ(FP1(pipe));
5820
5821         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5822         if (IS_PINEVIEW(dev)) {
5823                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5824                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5825         } else {
5826                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5827                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5828         }
5829
5830         if (!IS_GEN2(dev)) {
5831                 if (IS_PINEVIEW(dev))
5832                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5833                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5834                 else
5835                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5836                                DPLL_FPA01_P1_POST_DIV_SHIFT);
5837
5838                 switch (dpll & DPLL_MODE_MASK) {
5839                 case DPLLB_MODE_DAC_SERIAL:
5840                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5841                                 5 : 10;
5842                         break;
5843                 case DPLLB_MODE_LVDS:
5844                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5845                                 7 : 14;
5846                         break;
5847                 default:
5848                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5849                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
5850                         return 0;
5851                 }
5852
5853                 /* XXX: Handle the 100Mhz refclk */
5854                 intel_clock(dev, 96000, &clock);
5855         } else {
5856                 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5857
5858                 if (is_lvds) {
5859                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5860                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
5861                         clock.p2 = 14;
5862
5863                         if ((dpll & PLL_REF_INPUT_MASK) ==
5864                             PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5865                                 /* XXX: might not be 66MHz */
5866                                 intel_clock(dev, 66000, &clock);
5867                         } else
5868                                 intel_clock(dev, 48000, &clock);
5869                 } else {
5870                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
5871                                 clock.p1 = 2;
5872                         else {
5873                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5874                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5875                         }
5876                         if (dpll & PLL_P2_DIVIDE_BY_4)
5877                                 clock.p2 = 4;
5878                         else
5879                                 clock.p2 = 2;
5880
5881                         intel_clock(dev, 48000, &clock);
5882                 }
5883         }
5884
5885         /* XXX: It would be nice to validate the clocks, but we can't reuse
5886          * i830PllIsValid() because it relies on the xf86_config connector
5887          * configuration being accurate, which it isn't necessarily.
5888          */
5889
5890         return clock.dot;
5891 }
5892
5893 /** Returns the currently programmed mode of the given pipe. */
5894 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5895                                              struct drm_crtc *crtc)
5896 {
5897         struct drm_i915_private *dev_priv = dev->dev_private;
5898         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5899         int pipe = intel_crtc->pipe;
5900         struct drm_display_mode *mode;
5901         int htot = I915_READ(HTOTAL(pipe));
5902         int hsync = I915_READ(HSYNC(pipe));
5903         int vtot = I915_READ(VTOTAL(pipe));
5904         int vsync = I915_READ(VSYNC(pipe));
5905
5906         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5907         if (!mode)
5908                 return NULL;
5909
5910         mode->clock = intel_crtc_clock_get(dev, crtc);
5911         mode->hdisplay = (htot & 0xffff) + 1;
5912         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5913         mode->hsync_start = (hsync & 0xffff) + 1;
5914         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5915         mode->vdisplay = (vtot & 0xffff) + 1;
5916         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5917         mode->vsync_start = (vsync & 0xffff) + 1;
5918         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5919
5920         drm_mode_set_name(mode);
5921         drm_mode_set_crtcinfo(mode, 0);
5922
5923         return mode;
5924 }
5925
5926 #define GPU_IDLE_TIMEOUT 500 /* ms */
5927
5928 /* When this timer fires, we've been idle for awhile */
5929 static void intel_gpu_idle_timer(unsigned long arg)
5930 {
5931         struct drm_device *dev = (struct drm_device *)arg;
5932         drm_i915_private_t *dev_priv = dev->dev_private;
5933
5934         if (!list_empty(&dev_priv->mm.active_list)) {
5935                 /* Still processing requests, so just re-arm the timer. */
5936                 mod_timer(&dev_priv->idle_timer, jiffies +
5937                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5938                 return;
5939         }
5940
5941         dev_priv->busy = false;
5942         queue_work(dev_priv->wq, &dev_priv->idle_work);
5943 }
5944
5945 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
5946
5947 static void intel_crtc_idle_timer(unsigned long arg)
5948 {
5949         struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5950         struct drm_crtc *crtc = &intel_crtc->base;
5951         drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5952         struct intel_framebuffer *intel_fb;
5953
5954         intel_fb = to_intel_framebuffer(crtc->fb);
5955         if (intel_fb && intel_fb->obj->active) {
5956                 /* The framebuffer is still being accessed by the GPU. */
5957                 mod_timer(&intel_crtc->idle_timer, jiffies +
5958                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5959                 return;
5960         }
5961
5962         intel_crtc->busy = false;
5963         queue_work(dev_priv->wq, &dev_priv->idle_work);
5964 }
5965
5966 static void intel_increase_pllclock(struct drm_crtc *crtc)
5967 {
5968         struct drm_device *dev = crtc->dev;
5969         drm_i915_private_t *dev_priv = dev->dev_private;
5970         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5971         int pipe = intel_crtc->pipe;
5972         int dpll_reg = DPLL(pipe);
5973         int dpll;
5974
5975         if (HAS_PCH_SPLIT(dev))
5976                 return;
5977
5978         if (!dev_priv->lvds_downclock_avail)
5979                 return;
5980
5981         dpll = I915_READ(dpll_reg);
5982         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5983                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
5984
5985                 /* Unlock panel regs */
5986                 I915_WRITE(PP_CONTROL,
5987                            I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
5988
5989                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5990                 I915_WRITE(dpll_reg, dpll);
5991                 intel_wait_for_vblank(dev, pipe);
5992
5993                 dpll = I915_READ(dpll_reg);
5994                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5995                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5996
5997                 /* ...and lock them again */
5998                 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
5999         }
6000
6001         /* Schedule downclock */
6002         mod_timer(&intel_crtc->idle_timer, jiffies +
6003                   msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6004 }
6005
6006 static void intel_decrease_pllclock(struct drm_crtc *crtc)
6007 {
6008         struct drm_device *dev = crtc->dev;
6009         drm_i915_private_t *dev_priv = dev->dev_private;
6010         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6011         int pipe = intel_crtc->pipe;
6012         int dpll_reg = DPLL(pipe);
6013         int dpll = I915_READ(dpll_reg);
6014
6015         if (HAS_PCH_SPLIT(dev))
6016                 return;
6017
6018         if (!dev_priv->lvds_downclock_avail)
6019                 return;
6020
6021         /*
6022          * Since this is called by a timer, we should never get here in
6023          * the manual case.
6024          */
6025         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
6026                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
6027
6028                 /* Unlock panel regs */
6029                 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
6030                            PANEL_UNLOCK_REGS);
6031
6032                 dpll |= DISPLAY_RATE_SELECT_FPA1;
6033                 I915_WRITE(dpll_reg, dpll);
6034                 intel_wait_for_vblank(dev, pipe);
6035                 dpll = I915_READ(dpll_reg);
6036                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
6037                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6038
6039                 /* ...and lock them again */
6040                 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6041         }
6042
6043 }
6044
6045 /**
6046  * intel_idle_update - adjust clocks for idleness
6047  * @work: work struct
6048  *
6049  * Either the GPU or display (or both) went idle.  Check the busy status
6050  * here and adjust the CRTC and GPU clocks as necessary.
6051  */
6052 static void intel_idle_update(struct work_struct *work)
6053 {
6054         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
6055                                                     idle_work);
6056         struct drm_device *dev = dev_priv->dev;
6057         struct drm_crtc *crtc;
6058         struct intel_crtc *intel_crtc;
6059
6060         if (!i915_powersave)
6061                 return;
6062
6063         mutex_lock(&dev->struct_mutex);
6064
6065         i915_update_gfx_val(dev_priv);
6066
6067         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6068                 /* Skip inactive CRTCs */
6069                 if (!crtc->fb)
6070                         continue;
6071
6072                 intel_crtc = to_intel_crtc(crtc);
6073                 if (!intel_crtc->busy)
6074                         intel_decrease_pllclock(crtc);
6075         }
6076
6077
6078         mutex_unlock(&dev->struct_mutex);
6079 }
6080
6081 /**
6082  * intel_mark_busy - mark the GPU and possibly the display busy
6083  * @dev: drm device
6084  * @obj: object we're operating on
6085  *
6086  * Callers can use this function to indicate that the GPU is busy processing
6087  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
6088  * buffer), we'll also mark the display as busy, so we know to increase its
6089  * clock frequency.
6090  */
6091 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
6092 {
6093         drm_i915_private_t *dev_priv = dev->dev_private;
6094         struct drm_crtc *crtc = NULL;
6095         struct intel_framebuffer *intel_fb;
6096         struct intel_crtc *intel_crtc;
6097
6098         if (!drm_core_check_feature(dev, DRIVER_MODESET))
6099                 return;
6100
6101         if (!dev_priv->busy)
6102                 dev_priv->busy = true;
6103         else
6104                 mod_timer(&dev_priv->idle_timer, jiffies +
6105                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6106
6107         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6108                 if (!crtc->fb)
6109                         continue;
6110
6111                 intel_crtc = to_intel_crtc(crtc);
6112                 intel_fb = to_intel_framebuffer(crtc->fb);
6113                 if (intel_fb->obj == obj) {
6114                         if (!intel_crtc->busy) {
6115                                 /* Non-busy -> busy, upclock */
6116                                 intel_increase_pllclock(crtc);
6117                                 intel_crtc->busy = true;
6118                         } else {
6119                                 /* Busy -> busy, put off timer */
6120                                 mod_timer(&intel_crtc->idle_timer, jiffies +
6121                                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6122                         }
6123                 }
6124         }
6125 }
6126
6127 static void intel_crtc_destroy(struct drm_crtc *crtc)
6128 {
6129         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6130         struct drm_device *dev = crtc->dev;
6131         struct intel_unpin_work *work;
6132         unsigned long flags;
6133
6134         spin_lock_irqsave(&dev->event_lock, flags);
6135         work = intel_crtc->unpin_work;
6136         intel_crtc->unpin_work = NULL;
6137         spin_unlock_irqrestore(&dev->event_lock, flags);
6138
6139         if (work) {
6140                 cancel_work_sync(&work->work);
6141                 kfree(work);
6142         }
6143
6144         drm_crtc_cleanup(crtc);
6145
6146         kfree(intel_crtc);
6147 }
6148
6149 static void intel_unpin_work_fn(struct work_struct *__work)
6150 {
6151         struct intel_unpin_work *work =
6152                 container_of(__work, struct intel_unpin_work, work);
6153
6154         mutex_lock(&work->dev->struct_mutex);
6155         i915_gem_object_unpin(work->old_fb_obj);
6156         drm_gem_object_unreference(&work->pending_flip_obj->base);
6157         drm_gem_object_unreference(&work->old_fb_obj->base);
6158
6159         mutex_unlock(&work->dev->struct_mutex);
6160         kfree(work);
6161 }
6162
6163 static void do_intel_finish_page_flip(struct drm_device *dev,
6164                                       struct drm_crtc *crtc)
6165 {
6166         drm_i915_private_t *dev_priv = dev->dev_private;
6167         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6168         struct intel_unpin_work *work;
6169         struct drm_i915_gem_object *obj;
6170         struct drm_pending_vblank_event *e;
6171         struct timeval tnow, tvbl;
6172         unsigned long flags;
6173
6174         /* Ignore early vblank irqs */
6175         if (intel_crtc == NULL)
6176                 return;
6177
6178         do_gettimeofday(&tnow);
6179
6180         spin_lock_irqsave(&dev->event_lock, flags);
6181         work = intel_crtc->unpin_work;
6182         if (work == NULL || !work->pending) {
6183                 spin_unlock_irqrestore(&dev->event_lock, flags);
6184                 return;
6185         }
6186
6187         intel_crtc->unpin_work = NULL;
6188
6189         if (work->event) {
6190                 e = work->event;
6191                 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6192
6193                 /* Called before vblank count and timestamps have
6194                  * been updated for the vblank interval of flip
6195                  * completion? Need to increment vblank count and
6196                  * add one videorefresh duration to returned timestamp
6197                  * to account for this. We assume this happened if we
6198                  * get called over 0.9 frame durations after the last
6199                  * timestamped vblank.
6200                  *
6201                  * This calculation can not be used with vrefresh rates
6202                  * below 5Hz (10Hz to be on the safe side) without
6203                  * promoting to 64 integers.
6204                  */
6205                 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
6206                     9 * crtc->framedur_ns) {
6207                         e->event.sequence++;
6208                         tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
6209                                              crtc->framedur_ns);
6210                 }
6211
6212                 e->event.tv_sec = tvbl.tv_sec;
6213                 e->event.tv_usec = tvbl.tv_usec;
6214
6215                 list_add_tail(&e->base.link,
6216                               &e->base.file_priv->event_list);
6217                 wake_up_interruptible(&e->base.file_priv->event_wait);
6218         }
6219
6220         drm_vblank_put(dev, intel_crtc->pipe);
6221
6222         spin_unlock_irqrestore(&dev->event_lock, flags);
6223
6224         obj = work->old_fb_obj;
6225
6226         atomic_clear_mask(1 << intel_crtc->plane,
6227                           &obj->pending_flip.counter);
6228         if (atomic_read(&obj->pending_flip) == 0)
6229                 wake_up(&dev_priv->pending_flip_queue);
6230
6231         schedule_work(&work->work);
6232
6233         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6234 }
6235
6236 void intel_finish_page_flip(struct drm_device *dev, int pipe)
6237 {
6238         drm_i915_private_t *dev_priv = dev->dev_private;
6239         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6240
6241         do_intel_finish_page_flip(dev, crtc);
6242 }
6243
6244 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6245 {
6246         drm_i915_private_t *dev_priv = dev->dev_private;
6247         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6248
6249         do_intel_finish_page_flip(dev, crtc);
6250 }
6251
6252 void intel_prepare_page_flip(struct drm_device *dev, int plane)
6253 {
6254         drm_i915_private_t *dev_priv = dev->dev_private;
6255         struct intel_crtc *intel_crtc =
6256                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6257         unsigned long flags;
6258
6259         spin_lock_irqsave(&dev->event_lock, flags);
6260         if (intel_crtc->unpin_work) {
6261                 if ((++intel_crtc->unpin_work->pending) > 1)
6262                         DRM_ERROR("Prepared flip multiple times\n");
6263         } else {
6264                 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6265         }
6266         spin_unlock_irqrestore(&dev->event_lock, flags);
6267 }
6268
6269 static int intel_gen2_queue_flip(struct drm_device *dev,
6270                                  struct drm_crtc *crtc,
6271                                  struct drm_framebuffer *fb,
6272                                  struct drm_i915_gem_object *obj)
6273 {
6274         struct drm_i915_private *dev_priv = dev->dev_private;
6275         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6276         unsigned long offset;
6277         u32 flip_mask;
6278         int ret;
6279
6280         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6281         if (ret)
6282                 goto out;
6283
6284         /* Offset into the new buffer for cases of shared fbs between CRTCs */
6285         offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6286
6287         ret = BEGIN_LP_RING(6);
6288         if (ret)
6289                 goto out;
6290
6291         /* Can't queue multiple flips, so wait for the previous
6292          * one to finish before executing the next.
6293          */
6294         if (intel_crtc->plane)
6295                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6296         else
6297                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6298         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6299         OUT_RING(MI_NOOP);
6300         OUT_RING(MI_DISPLAY_FLIP |
6301                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6302         OUT_RING(fb->pitch);
6303         OUT_RING(obj->gtt_offset + offset);
6304         OUT_RING(MI_NOOP);
6305         ADVANCE_LP_RING();
6306 out:
6307         return ret;
6308 }
6309
6310 static int intel_gen3_queue_flip(struct drm_device *dev,
6311                                  struct drm_crtc *crtc,
6312                                  struct drm_framebuffer *fb,
6313                                  struct drm_i915_gem_object *obj)
6314 {
6315         struct drm_i915_private *dev_priv = dev->dev_private;
6316         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317         unsigned long offset;
6318         u32 flip_mask;
6319         int ret;
6320
6321         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6322         if (ret)
6323                 goto out;
6324
6325         /* Offset into the new buffer for cases of shared fbs between CRTCs */
6326         offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6327
6328         ret = BEGIN_LP_RING(6);
6329         if (ret)
6330                 goto out;
6331
6332         if (intel_crtc->plane)
6333                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6334         else
6335                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6336         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6337         OUT_RING(MI_NOOP);
6338         OUT_RING(MI_DISPLAY_FLIP_I915 |
6339                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6340         OUT_RING(fb->pitch);
6341         OUT_RING(obj->gtt_offset + offset);
6342         OUT_RING(MI_NOOP);
6343
6344         ADVANCE_LP_RING();
6345 out:
6346         return ret;
6347 }
6348
6349 static int intel_gen4_queue_flip(struct drm_device *dev,
6350                                  struct drm_crtc *crtc,
6351                                  struct drm_framebuffer *fb,
6352                                  struct drm_i915_gem_object *obj)
6353 {
6354         struct drm_i915_private *dev_priv = dev->dev_private;
6355         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6356         uint32_t pf, pipesrc;
6357         int ret;
6358
6359         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6360         if (ret)
6361                 goto out;
6362
6363         ret = BEGIN_LP_RING(4);
6364         if (ret)
6365                 goto out;
6366
6367         /* i965+ uses the linear or tiled offsets from the
6368          * Display Registers (which do not change across a page-flip)
6369          * so we need only reprogram the base address.
6370          */
6371         OUT_RING(MI_DISPLAY_FLIP |
6372                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6373         OUT_RING(fb->pitch);
6374         OUT_RING(obj->gtt_offset | obj->tiling_mode);
6375
6376         /* XXX Enabling the panel-fitter across page-flip is so far
6377          * untested on non-native modes, so ignore it for now.
6378          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6379          */
6380         pf = 0;
6381         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6382         OUT_RING(pf | pipesrc);
6383         ADVANCE_LP_RING();
6384 out:
6385         return ret;
6386 }
6387
6388 static int intel_gen6_queue_flip(struct drm_device *dev,
6389                                  struct drm_crtc *crtc,
6390                                  struct drm_framebuffer *fb,
6391                                  struct drm_i915_gem_object *obj)
6392 {
6393         struct drm_i915_private *dev_priv = dev->dev_private;
6394         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6395         uint32_t pf, pipesrc;
6396         int ret;
6397
6398         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6399         if (ret)
6400                 goto out;
6401
6402         ret = BEGIN_LP_RING(4);
6403         if (ret)
6404                 goto out;
6405
6406         OUT_RING(MI_DISPLAY_FLIP |
6407                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6408         OUT_RING(fb->pitch | obj->tiling_mode);
6409         OUT_RING(obj->gtt_offset);
6410
6411         pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6412         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6413         OUT_RING(pf | pipesrc);
6414         ADVANCE_LP_RING();
6415 out:
6416         return ret;
6417 }
6418
6419 /*
6420  * On gen7 we currently use the blit ring because (in early silicon at least)
6421  * the render ring doesn't give us interrpts for page flip completion, which
6422  * means clients will hang after the first flip is queued.  Fortunately the
6423  * blit ring generates interrupts properly, so use it instead.
6424  */
6425 static int intel_gen7_queue_flip(struct drm_device *dev,
6426                                  struct drm_crtc *crtc,
6427                                  struct drm_framebuffer *fb,
6428                                  struct drm_i915_gem_object *obj)
6429 {
6430         struct drm_i915_private *dev_priv = dev->dev_private;
6431         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6432         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6433         int ret;
6434
6435         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6436         if (ret)
6437                 goto out;
6438
6439         ret = intel_ring_begin(ring, 4);
6440         if (ret)
6441                 goto out;
6442
6443         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6444         intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6445         intel_ring_emit(ring, (obj->gtt_offset));
6446         intel_ring_emit(ring, (MI_NOOP));
6447         intel_ring_advance(ring);
6448 out:
6449         return ret;
6450 }
6451
6452 static int intel_default_queue_flip(struct drm_device *dev,
6453                                     struct drm_crtc *crtc,
6454                                     struct drm_framebuffer *fb,
6455                                     struct drm_i915_gem_object *obj)
6456 {
6457         return -ENODEV;
6458 }
6459
6460 static int intel_crtc_page_flip(struct drm_crtc *crtc,
6461                                 struct drm_framebuffer *fb,
6462                                 struct drm_pending_vblank_event *event)
6463 {
6464         struct drm_device *dev = crtc->dev;
6465         struct drm_i915_private *dev_priv = dev->dev_private;
6466         struct intel_framebuffer *intel_fb;
6467         struct drm_i915_gem_object *obj;
6468         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6469         struct intel_unpin_work *work;
6470         unsigned long flags;
6471         int ret;
6472
6473         work = kzalloc(sizeof *work, GFP_KERNEL);
6474         if (work == NULL)
6475                 return -ENOMEM;
6476
6477         work->event = event;
6478         work->dev = crtc->dev;
6479         intel_fb = to_intel_framebuffer(crtc->fb);
6480         work->old_fb_obj = intel_fb->obj;
6481         INIT_WORK(&work->work, intel_unpin_work_fn);
6482
6483         /* We borrow the event spin lock for protecting unpin_work */
6484         spin_lock_irqsave(&dev->event_lock, flags);
6485         if (intel_crtc->unpin_work) {
6486                 spin_unlock_irqrestore(&dev->event_lock, flags);
6487                 kfree(work);
6488
6489                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6490                 return -EBUSY;
6491         }
6492         intel_crtc->unpin_work = work;
6493         spin_unlock_irqrestore(&dev->event_lock, flags);
6494
6495         intel_fb = to_intel_framebuffer(fb);
6496         obj = intel_fb->obj;
6497
6498         mutex_lock(&dev->struct_mutex);
6499
6500         /* Reference the objects for the scheduled work. */
6501         drm_gem_object_reference(&work->old_fb_obj->base);
6502         drm_gem_object_reference(&obj->base);
6503
6504         crtc->fb = fb;
6505
6506         ret = drm_vblank_get(dev, intel_crtc->pipe);
6507         if (ret)
6508                 goto cleanup_objs;
6509
6510         work->pending_flip_obj = obj;
6511
6512         work->enable_stall_check = true;
6513
6514         /* Block clients from rendering to the new back buffer until
6515          * the flip occurs and the object is no longer visible.
6516          */
6517         atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6518
6519         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6520         if (ret)
6521                 goto cleanup_pending;
6522
6523         mutex_unlock(&dev->struct_mutex);
6524
6525         trace_i915_flip_request(intel_crtc->plane, obj);
6526
6527         return 0;
6528
6529 cleanup_pending:
6530         atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6531 cleanup_objs:
6532         drm_gem_object_unreference(&work->old_fb_obj->base);
6533         drm_gem_object_unreference(&obj->base);
6534         mutex_unlock(&dev->struct_mutex);
6535
6536         spin_lock_irqsave(&dev->event_lock, flags);
6537         intel_crtc->unpin_work = NULL;
6538         spin_unlock_irqrestore(&dev->event_lock, flags);
6539
6540         kfree(work);
6541
6542         return ret;
6543 }
6544
6545 static void intel_sanitize_modesetting(struct drm_device *dev,
6546                                        int pipe, int plane)
6547 {
6548         struct drm_i915_private *dev_priv = dev->dev_private;
6549         u32 reg, val;
6550
6551         if (HAS_PCH_SPLIT(dev))
6552                 return;
6553
6554         /* Who knows what state these registers were left in by the BIOS or
6555          * grub?
6556          *
6557          * If we leave the registers in a conflicting state (e.g. with the
6558          * display plane reading from the other pipe than the one we intend
6559          * to use) then when we attempt to teardown the active mode, we will
6560          * not disable the pipes and planes in the correct order -- leaving
6561          * a plane reading from a disabled pipe and possibly leading to
6562          * undefined behaviour.
6563          */
6564
6565         reg = DSPCNTR(plane);
6566         val = I915_READ(reg);
6567
6568         if ((val & DISPLAY_PLANE_ENABLE) == 0)
6569                 return;
6570         if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6571                 return;
6572
6573         /* This display plane is active and attached to the other CPU pipe. */
6574         pipe = !pipe;
6575
6576         /* Disable the plane and wait for it to stop reading from the pipe. */
6577         intel_disable_plane(dev_priv, plane, pipe);
6578         intel_disable_pipe(dev_priv, pipe);
6579 }
6580
6581 static void intel_crtc_reset(struct drm_crtc *crtc)
6582 {
6583         struct drm_device *dev = crtc->dev;
6584         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6585
6586         /* Reset flags back to the 'unknown' status so that they
6587          * will be correctly set on the initial modeset.
6588          */
6589         intel_crtc->dpms_mode = -1;
6590
6591         /* We need to fix up any BIOS configuration that conflicts with
6592          * our expectations.
6593          */
6594         intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6595 }
6596
6597 static struct drm_crtc_helper_funcs intel_helper_funcs = {
6598         .dpms = intel_crtc_dpms,
6599         .mode_fixup = intel_crtc_mode_fixup,
6600         .mode_set = intel_crtc_mode_set,
6601         .mode_set_base = intel_pipe_set_base,
6602         .mode_set_base_atomic = intel_pipe_set_base_atomic,
6603         .load_lut = intel_crtc_load_lut,
6604         .disable = intel_crtc_disable,
6605 };
6606
6607 static const struct drm_crtc_funcs intel_crtc_funcs = {
6608         .reset = intel_crtc_reset,
6609         .cursor_set = intel_crtc_cursor_set,
6610         .cursor_move = intel_crtc_cursor_move,
6611         .gamma_set = intel_crtc_gamma_set,
6612         .set_config = drm_crtc_helper_set_config,
6613         .destroy = intel_crtc_destroy,
6614         .page_flip = intel_crtc_page_flip,
6615 };
6616
6617 static void intel_crtc_init(struct drm_device *dev, int pipe)
6618 {
6619         drm_i915_private_t *dev_priv = dev->dev_private;
6620         struct intel_crtc *intel_crtc;
6621         int i;
6622
6623         intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6624         if (intel_crtc == NULL)
6625                 return;
6626
6627         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6628
6629         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6630         for (i = 0; i < 256; i++) {
6631                 intel_crtc->lut_r[i] = i;
6632                 intel_crtc->lut_g[i] = i;
6633                 intel_crtc->lut_b[i] = i;
6634         }
6635
6636         /* Swap pipes & planes for FBC on pre-965 */
6637         intel_crtc->pipe = pipe;
6638         intel_crtc->plane = pipe;
6639         if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6640                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6641                 intel_crtc->plane = !pipe;
6642         }
6643
6644         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6645                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6646         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6647         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6648
6649         intel_crtc_reset(&intel_crtc->base);
6650         intel_crtc->active = true; /* force the pipe off on setup_init_config */
6651
6652         if (HAS_PCH_SPLIT(dev)) {
6653                 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6654                 intel_helper_funcs.commit = ironlake_crtc_commit;
6655         } else {
6656                 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6657                 intel_helper_funcs.commit = i9xx_crtc_commit;
6658         }
6659
6660         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6661
6662         intel_crtc->busy = false;
6663
6664         setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6665                     (unsigned long)intel_crtc);
6666 }
6667
6668 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6669                                 struct drm_file *file)
6670 {
6671         drm_i915_private_t *dev_priv = dev->dev_private;
6672         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6673         struct drm_mode_object *drmmode_obj;
6674         struct intel_crtc *crtc;
6675
6676         if (!dev_priv) {
6677                 DRM_ERROR("called with no initialization\n");
6678                 return -EINVAL;
6679         }
6680
6681         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6682                         DRM_MODE_OBJECT_CRTC);
6683
6684         if (!drmmode_obj) {
6685                 DRM_ERROR("no such CRTC id\n");
6686                 return -EINVAL;
6687         }
6688
6689         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6690         pipe_from_crtc_id->pipe = crtc->pipe;
6691
6692         return 0;
6693 }
6694
6695 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6696 {
6697         struct intel_encoder *encoder;
6698         int index_mask = 0;
6699         int entry = 0;
6700
6701         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6702                 if (type_mask & encoder->clone_mask)
6703                         index_mask |= (1 << entry);
6704                 entry++;
6705         }
6706
6707         return index_mask;
6708 }
6709
6710 static bool has_edp_a(struct drm_device *dev)
6711 {
6712         struct drm_i915_private *dev_priv = dev->dev_private;
6713
6714         if (!IS_MOBILE(dev))
6715                 return false;
6716
6717         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6718                 return false;
6719
6720         if (IS_GEN5(dev) &&
6721             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6722                 return false;
6723
6724         return true;
6725 }
6726
6727 static void intel_setup_outputs(struct drm_device *dev)
6728 {
6729         struct drm_i915_private *dev_priv = dev->dev_private;
6730         struct intel_encoder *encoder;
6731         bool dpd_is_edp = false;
6732         bool has_lvds = false;
6733
6734         if (IS_MOBILE(dev) && !IS_I830(dev))
6735                 has_lvds = intel_lvds_init(dev);
6736         if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6737                 /* disable the panel fitter on everything but LVDS */
6738                 I915_WRITE(PFIT_CONTROL, 0);
6739         }
6740
6741         if (HAS_PCH_SPLIT(dev)) {
6742                 dpd_is_edp = intel_dpd_is_edp(dev);
6743
6744                 if (has_edp_a(dev))
6745                         intel_dp_init(dev, DP_A);
6746
6747                 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6748                         intel_dp_init(dev, PCH_DP_D);
6749         }
6750
6751         intel_crt_init(dev);
6752
6753         if (HAS_PCH_SPLIT(dev)) {
6754                 int found;
6755
6756                 if (I915_READ(HDMIB) & PORT_DETECTED) {
6757                         /* PCH SDVOB multiplex with HDMIB */
6758                         found = intel_sdvo_init(dev, PCH_SDVOB);
6759                         if (!found)
6760                                 intel_hdmi_init(dev, HDMIB);
6761                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6762                                 intel_dp_init(dev, PCH_DP_B);
6763                 }
6764
6765                 if (I915_READ(HDMIC) & PORT_DETECTED)
6766                         intel_hdmi_init(dev, HDMIC);
6767
6768                 if (I915_READ(HDMID) & PORT_DETECTED)
6769                         intel_hdmi_init(dev, HDMID);
6770
6771                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6772                         intel_dp_init(dev, PCH_DP_C);
6773
6774                 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6775                         intel_dp_init(dev, PCH_DP_D);
6776
6777         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6778                 bool found = false;
6779
6780                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6781                         DRM_DEBUG_KMS("probing SDVOB\n");
6782                         found = intel_sdvo_init(dev, SDVOB);
6783                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6784                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6785                                 intel_hdmi_init(dev, SDVOB);
6786                         }
6787
6788                         if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6789                                 DRM_DEBUG_KMS("probing DP_B\n");
6790                                 intel_dp_init(dev, DP_B);
6791                         }
6792                 }
6793
6794                 /* Before G4X SDVOC doesn't have its own detect register */
6795
6796                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6797                         DRM_DEBUG_KMS("probing SDVOC\n");
6798                         found = intel_sdvo_init(dev, SDVOC);
6799                 }
6800
6801                 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6802
6803                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6804                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6805                                 intel_hdmi_init(dev, SDVOC);
6806                         }
6807                         if (SUPPORTS_INTEGRATED_DP(dev)) {
6808                                 DRM_DEBUG_KMS("probing DP_C\n");
6809                                 intel_dp_init(dev, DP_C);
6810                         }
6811                 }
6812
6813                 if (SUPPORTS_INTEGRATED_DP(dev) &&
6814                     (I915_READ(DP_D) & DP_DETECTED)) {
6815                         DRM_DEBUG_KMS("probing DP_D\n");
6816                         intel_dp_init(dev, DP_D);
6817                 }
6818         } else if (IS_GEN2(dev))
6819                 intel_dvo_init(dev);
6820
6821         if (SUPPORTS_TV(dev))
6822                 intel_tv_init(dev);
6823
6824         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6825                 encoder->base.possible_crtcs = encoder->crtc_mask;
6826                 encoder->base.possible_clones =
6827                         intel_encoder_clones(dev, encoder->clone_mask);
6828         }
6829
6830         intel_panel_setup_backlight(dev);
6831
6832         /* disable all the possible outputs/crtcs before entering KMS mode */
6833         drm_helper_disable_unused_functions(dev);
6834 }
6835
6836 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6837 {
6838         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6839
6840         drm_framebuffer_cleanup(fb);
6841         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6842
6843         kfree(intel_fb);
6844 }
6845
6846 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6847                                                 struct drm_file *file,
6848                                                 unsigned int *handle)
6849 {
6850         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6851         struct drm_i915_gem_object *obj = intel_fb->obj;
6852
6853         return drm_gem_handle_create(file, &obj->base, handle);
6854 }
6855
6856 static const struct drm_framebuffer_funcs intel_fb_funcs = {
6857         .destroy = intel_user_framebuffer_destroy,
6858         .create_handle = intel_user_framebuffer_create_handle,
6859 };
6860
6861 int intel_framebuffer_init(struct drm_device *dev,
6862                            struct intel_framebuffer *intel_fb,
6863                            struct drm_mode_fb_cmd *mode_cmd,
6864                            struct drm_i915_gem_object *obj)
6865 {
6866         int ret;
6867
6868         if (obj->tiling_mode == I915_TILING_Y)
6869                 return -EINVAL;
6870
6871         if (mode_cmd->pitch & 63)
6872                 return -EINVAL;
6873
6874         switch (mode_cmd->bpp) {
6875         case 8:
6876         case 16:
6877         case 24:
6878         case 32:
6879                 break;
6880         default:
6881                 return -EINVAL;
6882         }
6883
6884         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6885         if (ret) {
6886                 DRM_ERROR("framebuffer init failed %d\n", ret);
6887                 return ret;
6888         }
6889
6890         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6891         intel_fb->obj = obj;
6892         return 0;
6893 }
6894
6895 static struct drm_framebuffer *
6896 intel_user_framebuffer_create(struct drm_device *dev,
6897                               struct drm_file *filp,
6898                               struct drm_mode_fb_cmd *mode_cmd)
6899 {
6900         struct drm_i915_gem_object *obj;
6901
6902         obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
6903         if (&obj->base == NULL)
6904                 return ERR_PTR(-ENOENT);
6905
6906         return intel_framebuffer_create(dev, mode_cmd, obj);
6907 }
6908
6909 static const struct drm_mode_config_funcs intel_mode_funcs = {
6910         .fb_create = intel_user_framebuffer_create,
6911         .output_poll_changed = intel_fb_output_poll_changed,
6912 };
6913
6914 static struct drm_i915_gem_object *
6915 intel_alloc_context_page(struct drm_device *dev)
6916 {
6917         struct drm_i915_gem_object *ctx;
6918         int ret;
6919
6920         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
6921
6922         ctx = i915_gem_alloc_object(dev, 4096);
6923         if (!ctx) {
6924                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
6925                 return NULL;
6926         }
6927
6928         ret = i915_gem_object_pin(ctx, 4096, true);
6929         if (ret) {
6930                 DRM_ERROR("failed to pin power context: %d\n", ret);
6931                 goto err_unref;
6932         }
6933
6934         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
6935         if (ret) {
6936                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
6937                 goto err_unpin;
6938         }
6939
6940         return ctx;
6941
6942 err_unpin:
6943         i915_gem_object_unpin(ctx);
6944 err_unref:
6945         drm_gem_object_unreference(&ctx->base);
6946         mutex_unlock(&dev->struct_mutex);
6947         return NULL;
6948 }
6949
6950 bool ironlake_set_drps(struct drm_device *dev, u8 val)
6951 {
6952         struct drm_i915_private *dev_priv = dev->dev_private;
6953         u16 rgvswctl;
6954
6955         rgvswctl = I915_READ16(MEMSWCTL);
6956         if (rgvswctl & MEMCTL_CMD_STS) {
6957                 DRM_DEBUG("gpu busy, RCS change rejected\n");
6958                 return false; /* still busy with another command */
6959         }
6960
6961         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6962                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6963         I915_WRITE16(MEMSWCTL, rgvswctl);
6964         POSTING_READ16(MEMSWCTL);
6965
6966         rgvswctl |= MEMCTL_CMD_STS;
6967         I915_WRITE16(MEMSWCTL, rgvswctl);
6968
6969         return true;
6970 }
6971
6972 void ironlake_enable_drps(struct drm_device *dev)
6973 {
6974         struct drm_i915_private *dev_priv = dev->dev_private;
6975         u32 rgvmodectl = I915_READ(MEMMODECTL);
6976         u8 fmax, fmin, fstart, vstart;
6977
6978         /* Enable temp reporting */
6979         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6980         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
6981
6982         /* 100ms RC evaluation intervals */
6983         I915_WRITE(RCUPEI, 100000);
6984         I915_WRITE(RCDNEI, 100000);
6985
6986         /* Set max/min thresholds to 90ms and 80ms respectively */
6987         I915_WRITE(RCBMAXAVG, 90000);
6988         I915_WRITE(RCBMINAVG, 80000);
6989
6990         I915_WRITE(MEMIHYST, 1);
6991
6992         /* Set up min, max, and cur for interrupt handling */
6993         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6994         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6995         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6996                 MEMMODE_FSTART_SHIFT;
6997
6998         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6999                 PXVFREQ_PX_SHIFT;
7000
7001         dev_priv->fmax = fmax; /* IPS callback will increase this */
7002         dev_priv->fstart = fstart;
7003
7004         dev_priv->max_delay = fstart;
7005         dev_priv->min_delay = fmin;
7006         dev_priv->cur_delay = fstart;
7007
7008         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
7009                          fmax, fmin, fstart);
7010
7011         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
7012
7013         /*
7014          * Interrupts will be enabled in ironlake_irq_postinstall
7015          */
7016
7017         I915_WRITE(VIDSTART, vstart);
7018         POSTING_READ(VIDSTART);
7019
7020         rgvmodectl |= MEMMODE_SWMODE_EN;
7021         I915_WRITE(MEMMODECTL, rgvmodectl);
7022
7023         if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
7024                 DRM_ERROR("stuck trying to change perf mode\n");
7025         msleep(1);
7026
7027         ironlake_set_drps(dev, fstart);
7028
7029         dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
7030                 I915_READ(0x112e0);
7031         dev_priv->last_time1 = jiffies_to_msecs(jiffies);
7032         dev_priv->last_count2 = I915_READ(0x112f4);
7033         getrawmonotonic(&dev_priv->last_time2);
7034 }
7035
7036 void ironlake_disable_drps(struct drm_device *dev)
7037 {
7038         struct drm_i915_private *dev_priv = dev->dev_private;
7039         u16 rgvswctl = I915_READ16(MEMSWCTL);
7040
7041         /* Ack interrupts, disable EFC interrupt */
7042         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
7043         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
7044         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
7045         I915_WRITE(DEIIR, DE_PCU_EVENT);
7046         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
7047
7048         /* Go back to the starting frequency */
7049         ironlake_set_drps(dev, dev_priv->fstart);
7050         msleep(1);
7051         rgvswctl |= MEMCTL_CMD_STS;
7052         I915_WRITE(MEMSWCTL, rgvswctl);
7053         msleep(1);
7054
7055 }
7056
7057 void gen6_set_rps(struct drm_device *dev, u8 val)
7058 {
7059         struct drm_i915_private *dev_priv = dev->dev_private;
7060         u32 swreq;
7061
7062         swreq = (val & 0x3ff) << 25;
7063         I915_WRITE(GEN6_RPNSWREQ, swreq);
7064 }
7065
7066 void gen6_disable_rps(struct drm_device *dev)
7067 {
7068         struct drm_i915_private *dev_priv = dev->dev_private;
7069
7070         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
7071         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
7072         I915_WRITE(GEN6_PMIER, 0);
7073
7074         spin_lock_irq(&dev_priv->rps_lock);
7075         dev_priv->pm_iir = 0;
7076         spin_unlock_irq(&dev_priv->rps_lock);
7077
7078         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
7079 }
7080
7081 static unsigned long intel_pxfreq(u32 vidfreq)
7082 {
7083         unsigned long freq;
7084         int div = (vidfreq & 0x3f0000) >> 16;
7085         int post = (vidfreq & 0x3000) >> 12;
7086         int pre = (vidfreq & 0x7);
7087
7088         if (!pre)
7089                 return 0;
7090
7091         freq = ((div * 133333) / ((1<<post) * pre));
7092
7093         return freq;
7094 }
7095
7096 void intel_init_emon(struct drm_device *dev)
7097 {
7098         struct drm_i915_private *dev_priv = dev->dev_private;
7099         u32 lcfuse;
7100         u8 pxw[16];
7101         int i;
7102
7103         /* Disable to program */
7104         I915_WRITE(ECR, 0);
7105         POSTING_READ(ECR);
7106
7107         /* Program energy weights for various events */
7108         I915_WRITE(SDEW, 0x15040d00);
7109         I915_WRITE(CSIEW0, 0x007f0000);
7110         I915_WRITE(CSIEW1, 0x1e220004);
7111         I915_WRITE(CSIEW2, 0x04000004);
7112
7113         for (i = 0; i < 5; i++)
7114                 I915_WRITE(PEW + (i * 4), 0);
7115         for (i = 0; i < 3; i++)
7116                 I915_WRITE(DEW + (i * 4), 0);
7117
7118         /* Program P-state weights to account for frequency power adjustment */
7119         for (i = 0; i < 16; i++) {
7120                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
7121                 unsigned long freq = intel_pxfreq(pxvidfreq);
7122                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7123                         PXVFREQ_PX_SHIFT;
7124                 unsigned long val;
7125
7126                 val = vid * vid;
7127                 val *= (freq / 1000);
7128                 val *= 255;
7129                 val /= (127*127*900);
7130                 if (val > 0xff)
7131                         DRM_ERROR("bad pxval: %ld\n", val);
7132                 pxw[i] = val;
7133         }
7134         /* Render standby states get 0 weight */
7135         pxw[14] = 0;
7136         pxw[15] = 0;
7137
7138         for (i = 0; i < 4; i++) {
7139                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7140                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7141                 I915_WRITE(PXW + (i * 4), val);
7142         }
7143
7144         /* Adjust magic regs to magic values (more experimental results) */
7145         I915_WRITE(OGW0, 0);
7146         I915_WRITE(OGW1, 0);
7147         I915_WRITE(EG0, 0x00007f00);
7148         I915_WRITE(EG1, 0x0000000e);
7149         I915_WRITE(EG2, 0x000e0000);
7150         I915_WRITE(EG3, 0x68000300);
7151         I915_WRITE(EG4, 0x42000000);
7152         I915_WRITE(EG5, 0x00140031);
7153         I915_WRITE(EG6, 0);
7154         I915_WRITE(EG7, 0);
7155
7156         for (i = 0; i < 8; i++)
7157                 I915_WRITE(PXWL + (i * 4), 0);
7158
7159         /* Enable PMON + select events */
7160         I915_WRITE(ECR, 0x80000019);
7161
7162         lcfuse = I915_READ(LCFUSE02);
7163
7164         dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7165 }
7166
7167 void gen6_enable_rps(struct drm_i915_private *dev_priv)
7168 {
7169         u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7170         u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
7171         u32 pcu_mbox, rc6_mask = 0;
7172         int cur_freq, min_freq, max_freq;
7173         int i;
7174
7175         /* Here begins a magic sequence of register writes to enable
7176          * auto-downclocking.
7177          *
7178          * Perhaps there might be some value in exposing these to
7179          * userspace...
7180          */
7181         I915_WRITE(GEN6_RC_STATE, 0);
7182         mutex_lock(&dev_priv->dev->struct_mutex);
7183         gen6_gt_force_wake_get(dev_priv);
7184
7185         /* disable the counters and set deterministic thresholds */
7186         I915_WRITE(GEN6_RC_CONTROL, 0);
7187
7188         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7189         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7190         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7191         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7192         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7193
7194         for (i = 0; i < I915_NUM_RINGS; i++)
7195                 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
7196
7197         I915_WRITE(GEN6_RC_SLEEP, 0);
7198         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7199         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7200         I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7201         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7202
7203         if (i915_enable_rc6)
7204                 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7205                         GEN6_RC_CTL_RC6_ENABLE;
7206
7207         I915_WRITE(GEN6_RC_CONTROL,
7208                    rc6_mask |
7209                    GEN6_RC_CTL_EI_MODE(1) |
7210                    GEN6_RC_CTL_HW_ENABLE);
7211
7212         I915_WRITE(GEN6_RPNSWREQ,
7213                    GEN6_FREQUENCY(10) |
7214                    GEN6_OFFSET(0) |
7215                    GEN6_AGGRESSIVE_TURBO);
7216         I915_WRITE(GEN6_RC_VIDEO_FREQ,
7217                    GEN6_FREQUENCY(12));
7218
7219         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7220         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7221                    18 << 24 |
7222                    6 << 16);
7223         I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
7224         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
7225         I915_WRITE(GEN6_RP_UP_EI, 100000);
7226         I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
7227         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7228         I915_WRITE(GEN6_RP_CONTROL,
7229                    GEN6_RP_MEDIA_TURBO |
7230                    GEN6_RP_USE_NORMAL_FREQ |
7231                    GEN6_RP_MEDIA_IS_GFX |
7232                    GEN6_RP_ENABLE |
7233                    GEN6_RP_UP_BUSY_AVG |
7234                    GEN6_RP_DOWN_IDLE_CONT);
7235
7236         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7237                      500))
7238                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7239
7240         I915_WRITE(GEN6_PCODE_DATA, 0);
7241         I915_WRITE(GEN6_PCODE_MAILBOX,
7242                    GEN6_PCODE_READY |
7243                    GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7244         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7245                      500))
7246                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7247
7248         min_freq = (rp_state_cap & 0xff0000) >> 16;
7249         max_freq = rp_state_cap & 0xff;
7250         cur_freq = (gt_perf_status & 0xff00) >> 8;
7251
7252         /* Check for overclock support */
7253         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7254                      500))
7255                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7256         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
7257         pcu_mbox = I915_READ(GEN6_PCODE_DATA);
7258         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7259                      500))
7260                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7261         if (pcu_mbox & (1<<31)) { /* OC supported */
7262                 max_freq = pcu_mbox & 0xff;
7263                 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
7264         }
7265
7266         /* In units of 100MHz */
7267         dev_priv->max_delay = max_freq;
7268         dev_priv->min_delay = min_freq;
7269         dev_priv->cur_delay = cur_freq;
7270
7271         /* requires MSI enabled */
7272         I915_WRITE(GEN6_PMIER,
7273                    GEN6_PM_MBOX_EVENT |
7274                    GEN6_PM_THERMAL_EVENT |
7275                    GEN6_PM_RP_DOWN_TIMEOUT |
7276                    GEN6_PM_RP_UP_THRESHOLD |
7277                    GEN6_PM_RP_DOWN_THRESHOLD |
7278                    GEN6_PM_RP_UP_EI_EXPIRED |
7279                    GEN6_PM_RP_DOWN_EI_EXPIRED);
7280         spin_lock_irq(&dev_priv->rps_lock);
7281         WARN_ON(dev_priv->pm_iir != 0);
7282         I915_WRITE(GEN6_PMIMR, 0);
7283         spin_unlock_irq(&dev_priv->rps_lock);
7284         /* enable all PM interrupts */
7285         I915_WRITE(GEN6_PMINTRMSK, 0);
7286
7287         gen6_gt_force_wake_put(dev_priv);
7288         mutex_unlock(&dev_priv->dev->struct_mutex);
7289 }
7290
7291 static void ironlake_init_clock_gating(struct drm_device *dev)
7292 {
7293         struct drm_i915_private *dev_priv = dev->dev_private;
7294         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7295
7296         /* Required for FBC */
7297         dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7298                 DPFCRUNIT_CLOCK_GATE_DISABLE |
7299                 DPFDUNIT_CLOCK_GATE_DISABLE;
7300         /* Required for CxSR */
7301         dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7302
7303         I915_WRITE(PCH_3DCGDIS0,
7304                    MARIUNIT_CLOCK_GATE_DISABLE |
7305                    SVSMUNIT_CLOCK_GATE_DISABLE);
7306         I915_WRITE(PCH_3DCGDIS1,
7307                    VFMUNIT_CLOCK_GATE_DISABLE);
7308
7309         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7310
7311         /*
7312          * According to the spec the following bits should be set in
7313          * order to enable memory self-refresh
7314          * The bit 22/21 of 0x42004
7315          * The bit 5 of 0x42020
7316          * The bit 15 of 0x45000
7317          */
7318         I915_WRITE(ILK_DISPLAY_CHICKEN2,
7319                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
7320                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7321         I915_WRITE(ILK_DSPCLK_GATE,
7322                    (I915_READ(ILK_DSPCLK_GATE) |
7323                     ILK_DPARB_CLK_GATE));
7324         I915_WRITE(DISP_ARB_CTL,
7325                    (I915_READ(DISP_ARB_CTL) |
7326                     DISP_FBC_WM_DIS));
7327         I915_WRITE(WM3_LP_ILK, 0);
7328         I915_WRITE(WM2_LP_ILK, 0);
7329         I915_WRITE(WM1_LP_ILK, 0);
7330
7331         /*
7332          * Based on the document from hardware guys the following bits
7333          * should be set unconditionally in order to enable FBC.
7334          * The bit 22 of 0x42000
7335          * The bit 22 of 0x42004
7336          * The bit 7,8,9 of 0x42020.
7337          */
7338         if (IS_IRONLAKE_M(dev)) {
7339                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7340                            I915_READ(ILK_DISPLAY_CHICKEN1) |
7341                            ILK_FBCQ_DIS);
7342                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7343                            I915_READ(ILK_DISPLAY_CHICKEN2) |
7344                            ILK_DPARB_GATE);
7345                 I915_WRITE(ILK_DSPCLK_GATE,
7346                            I915_READ(ILK_DSPCLK_GATE) |
7347                            ILK_DPFC_DIS1 |
7348                            ILK_DPFC_DIS2 |
7349                            ILK_CLK_FBC);
7350         }
7351
7352         I915_WRITE(ILK_DISPLAY_CHICKEN2,
7353                    I915_READ(ILK_DISPLAY_CHICKEN2) |
7354                    ILK_ELPIN_409_SELECT);
7355         I915_WRITE(_3D_CHICKEN2,
7356                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7357                    _3D_CHICKEN2_WM_READ_PIPELINED);
7358 }
7359
7360 static void gen6_init_clock_gating(struct drm_device *dev)
7361 {
7362         struct drm_i915_private *dev_priv = dev->dev_private;
7363         int pipe;
7364         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7365
7366         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7367
7368         I915_WRITE(ILK_DISPLAY_CHICKEN2,
7369                    I915_READ(ILK_DISPLAY_CHICKEN2) |
7370                    ILK_ELPIN_409_SELECT);
7371
7372         I915_WRITE(WM3_LP_ILK, 0);
7373         I915_WRITE(WM2_LP_ILK, 0);
7374         I915_WRITE(WM1_LP_ILK, 0);
7375
7376         /*
7377          * According to the spec the following bits should be
7378          * set in order to enable memory self-refresh and fbc:
7379          * The bit21 and bit22 of 0x42000
7380          * The bit21 and bit22 of 0x42004
7381          * The bit5 and bit7 of 0x42020
7382          * The bit14 of 0x70180
7383          * The bit14 of 0x71180
7384          */
7385         I915_WRITE(ILK_DISPLAY_CHICKEN1,
7386                    I915_READ(ILK_DISPLAY_CHICKEN1) |
7387                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7388         I915_WRITE(ILK_DISPLAY_CHICKEN2,
7389                    I915_READ(ILK_DISPLAY_CHICKEN2) |
7390                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7391         I915_WRITE(ILK_DSPCLK_GATE,
7392                    I915_READ(ILK_DSPCLK_GATE) |
7393                    ILK_DPARB_CLK_GATE  |
7394                    ILK_DPFD_CLK_GATE);
7395
7396         for_each_pipe(pipe)
7397                 I915_WRITE(DSPCNTR(pipe),
7398                            I915_READ(DSPCNTR(pipe)) |
7399                            DISPPLANE_TRICKLE_FEED_DISABLE);
7400 }
7401
7402 static void ivybridge_init_clock_gating(struct drm_device *dev)
7403 {
7404         struct drm_i915_private *dev_priv = dev->dev_private;
7405         int pipe;
7406         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7407
7408         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7409
7410         I915_WRITE(WM3_LP_ILK, 0);
7411         I915_WRITE(WM2_LP_ILK, 0);
7412         I915_WRITE(WM1_LP_ILK, 0);
7413
7414         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7415
7416         for_each_pipe(pipe)
7417                 I915_WRITE(DSPCNTR(pipe),
7418                            I915_READ(DSPCNTR(pipe)) |
7419                            DISPPLANE_TRICKLE_FEED_DISABLE);
7420 }
7421
7422 static void g4x_init_clock_gating(struct drm_device *dev)
7423 {
7424         struct drm_i915_private *dev_priv = dev->dev_private;
7425         uint32_t dspclk_gate;
7426
7427         I915_WRITE(RENCLK_GATE_D1, 0);
7428         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7429                    GS_UNIT_CLOCK_GATE_DISABLE |
7430                    CL_UNIT_CLOCK_GATE_DISABLE);
7431         I915_WRITE(RAMCLK_GATE_D, 0);
7432         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7433                 OVRUNIT_CLOCK_GATE_DISABLE |
7434                 OVCUNIT_CLOCK_GATE_DISABLE;
7435         if (IS_GM45(dev))
7436                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7437         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7438 }
7439
7440 static void crestline_init_clock_gating(struct drm_device *dev)
7441 {
7442         struct drm_i915_private *dev_priv = dev->dev_private;
7443
7444         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7445         I915_WRITE(RENCLK_GATE_D2, 0);
7446         I915_WRITE(DSPCLK_GATE_D, 0);
7447         I915_WRITE(RAMCLK_GATE_D, 0);
7448         I915_WRITE16(DEUC, 0);
7449 }
7450
7451 static void broadwater_init_clock_gating(struct drm_device *dev)
7452 {
7453         struct drm_i915_private *dev_priv = dev->dev_private;
7454
7455         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7456                    I965_RCC_CLOCK_GATE_DISABLE |
7457                    I965_RCPB_CLOCK_GATE_DISABLE |
7458                    I965_ISC_CLOCK_GATE_DISABLE |
7459                    I965_FBC_CLOCK_GATE_DISABLE);
7460         I915_WRITE(RENCLK_GATE_D2, 0);
7461 }
7462
7463 static void gen3_init_clock_gating(struct drm_device *dev)
7464 {
7465         struct drm_i915_private *dev_priv = dev->dev_private;
7466         u32 dstate = I915_READ(D_STATE);
7467
7468         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7469                 DSTATE_DOT_CLOCK_GATING;
7470         I915_WRITE(D_STATE, dstate);
7471 }
7472
7473 static void i85x_init_clock_gating(struct drm_device *dev)
7474 {
7475         struct drm_i915_private *dev_priv = dev->dev_private;
7476
7477         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7478 }
7479
7480 static void i830_init_clock_gating(struct drm_device *dev)
7481 {
7482         struct drm_i915_private *dev_priv = dev->dev_private;
7483
7484         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7485 }
7486
7487 static void ibx_init_clock_gating(struct drm_device *dev)
7488 {
7489         struct drm_i915_private *dev_priv = dev->dev_private;
7490
7491         /*
7492          * On Ibex Peak and Cougar Point, we need to disable clock
7493          * gating for the panel power sequencer or it will fail to
7494          * start up when no ports are active.
7495          */
7496         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7497 }
7498
7499 static void cpt_init_clock_gating(struct drm_device *dev)
7500 {
7501         struct drm_i915_private *dev_priv = dev->dev_private;
7502
7503         /*
7504          * On Ibex Peak and Cougar Point, we need to disable clock
7505          * gating for the panel power sequencer or it will fail to
7506          * start up when no ports are active.
7507          */
7508         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7509         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7510                    DPLS_EDP_PPS_FIX_DIS);
7511 }
7512
7513 static void ironlake_teardown_rc6(struct drm_device *dev)
7514 {
7515         struct drm_i915_private *dev_priv = dev->dev_private;
7516
7517         if (dev_priv->renderctx) {
7518                 i915_gem_object_unpin(dev_priv->renderctx);
7519                 drm_gem_object_unreference(&dev_priv->renderctx->base);
7520                 dev_priv->renderctx = NULL;
7521         }
7522
7523         if (dev_priv->pwrctx) {
7524                 i915_gem_object_unpin(dev_priv->pwrctx);
7525                 drm_gem_object_unreference(&dev_priv->pwrctx->base);
7526                 dev_priv->pwrctx = NULL;
7527         }
7528 }
7529
7530 static void ironlake_disable_rc6(struct drm_device *dev)
7531 {
7532         struct drm_i915_private *dev_priv = dev->dev_private;
7533
7534         if (I915_READ(PWRCTXA)) {
7535                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7536                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7537                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7538                          50);
7539
7540                 I915_WRITE(PWRCTXA, 0);
7541                 POSTING_READ(PWRCTXA);
7542
7543                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7544                 POSTING_READ(RSTDBYCTL);
7545         }
7546
7547         ironlake_teardown_rc6(dev);
7548 }
7549
7550 static int ironlake_setup_rc6(struct drm_device *dev)
7551 {
7552         struct drm_i915_private *dev_priv = dev->dev_private;
7553
7554         if (dev_priv->renderctx == NULL)
7555                 dev_priv->renderctx = intel_alloc_context_page(dev);
7556         if (!dev_priv->renderctx)
7557                 return -ENOMEM;
7558
7559         if (dev_priv->pwrctx == NULL)
7560                 dev_priv->pwrctx = intel_alloc_context_page(dev);
7561         if (!dev_priv->pwrctx) {
7562                 ironlake_teardown_rc6(dev);
7563                 return -ENOMEM;
7564         }
7565
7566         return 0;
7567 }
7568
7569 void ironlake_enable_rc6(struct drm_device *dev)
7570 {
7571         struct drm_i915_private *dev_priv = dev->dev_private;
7572         int ret;
7573
7574         /* rc6 disabled by default due to repeated reports of hanging during
7575          * boot and resume.
7576          */
7577         if (!i915_enable_rc6)
7578                 return;
7579
7580         mutex_lock(&dev->struct_mutex);
7581         ret = ironlake_setup_rc6(dev);
7582         if (ret) {
7583                 mutex_unlock(&dev->struct_mutex);
7584                 return;
7585         }
7586
7587         /*
7588          * GPU can automatically power down the render unit if given a page
7589          * to save state.
7590          */
7591         ret = BEGIN_LP_RING(6);
7592         if (ret) {
7593                 ironlake_teardown_rc6(dev);
7594                 mutex_unlock(&dev->struct_mutex);
7595                 return;
7596         }
7597
7598         OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7599         OUT_RING(MI_SET_CONTEXT);
7600         OUT_RING(dev_priv->renderctx->gtt_offset |
7601                  MI_MM_SPACE_GTT |
7602                  MI_SAVE_EXT_STATE_EN |
7603                  MI_RESTORE_EXT_STATE_EN |
7604                  MI_RESTORE_INHIBIT);
7605         OUT_RING(MI_SUSPEND_FLUSH);
7606         OUT_RING(MI_NOOP);
7607         OUT_RING(MI_FLUSH);
7608         ADVANCE_LP_RING();
7609
7610         /*
7611          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7612          * does an implicit flush, combined with MI_FLUSH above, it should be
7613          * safe to assume that renderctx is valid
7614          */
7615         ret = intel_wait_ring_idle(LP_RING(dev_priv));
7616         if (ret) {
7617                 DRM_ERROR("failed to enable ironlake power power savings\n");
7618                 ironlake_teardown_rc6(dev);
7619                 mutex_unlock(&dev->struct_mutex);
7620                 return;
7621         }
7622
7623         I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7624         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7625         mutex_unlock(&dev->struct_mutex);
7626 }
7627
7628 void intel_init_clock_gating(struct drm_device *dev)
7629 {
7630         struct drm_i915_private *dev_priv = dev->dev_private;
7631
7632         dev_priv->display.init_clock_gating(dev);
7633
7634         if (dev_priv->display.init_pch_clock_gating)
7635                 dev_priv->display.init_pch_clock_gating(dev);
7636 }
7637
7638 /* Set up chip specific display functions */
7639 static void intel_init_display(struct drm_device *dev)
7640 {
7641         struct drm_i915_private *dev_priv = dev->dev_private;
7642
7643         /* We always want a DPMS function */
7644         if (HAS_PCH_SPLIT(dev)) {
7645                 dev_priv->display.dpms = ironlake_crtc_dpms;
7646                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7647         } else {
7648                 dev_priv->display.dpms = i9xx_crtc_dpms;
7649                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7650         }
7651
7652         if (I915_HAS_FBC(dev)) {
7653                 if (HAS_PCH_SPLIT(dev)) {
7654                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7655                         dev_priv->display.enable_fbc = ironlake_enable_fbc;
7656                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
7657                 } else if (IS_GM45(dev)) {
7658                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7659                         dev_priv->display.enable_fbc = g4x_enable_fbc;
7660                         dev_priv->display.disable_fbc = g4x_disable_fbc;
7661                 } else if (IS_CRESTLINE(dev)) {
7662                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7663                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
7664                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
7665                 }
7666                 /* 855GM needs testing */
7667         }
7668
7669         /* Returns the core display clock speed */
7670         if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
7671                 dev_priv->display.get_display_clock_speed =
7672                         i945_get_display_clock_speed;
7673         else if (IS_I915G(dev))
7674                 dev_priv->display.get_display_clock_speed =
7675                         i915_get_display_clock_speed;
7676         else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7677                 dev_priv->display.get_display_clock_speed =
7678                         i9xx_misc_get_display_clock_speed;
7679         else if (IS_I915GM(dev))
7680                 dev_priv->display.get_display_clock_speed =
7681                         i915gm_get_display_clock_speed;
7682         else if (IS_I865G(dev))
7683                 dev_priv->display.get_display_clock_speed =
7684                         i865_get_display_clock_speed;
7685         else if (IS_I85X(dev))
7686                 dev_priv->display.get_display_clock_speed =
7687                         i855_get_display_clock_speed;
7688         else /* 852, 830 */
7689                 dev_priv->display.get_display_clock_speed =
7690                         i830_get_display_clock_speed;
7691
7692         /* For FIFO watermark updates */
7693         if (HAS_PCH_SPLIT(dev)) {
7694                 if (HAS_PCH_IBX(dev))
7695                         dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7696                 else if (HAS_PCH_CPT(dev))
7697                         dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7698
7699                 if (IS_GEN5(dev)) {
7700                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7701                                 dev_priv->display.update_wm = ironlake_update_wm;
7702                         else {
7703                                 DRM_DEBUG_KMS("Failed to get proper latency. "
7704                                               "Disable CxSR\n");
7705                                 dev_priv->display.update_wm = NULL;
7706                         }
7707                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7708                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7709                 } else if (IS_GEN6(dev)) {
7710                         if (SNB_READ_WM0_LATENCY()) {
7711                                 dev_priv->display.update_wm = sandybridge_update_wm;
7712                         } else {
7713                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
7714                                               "Disable CxSR\n");
7715                                 dev_priv->display.update_wm = NULL;
7716                         }
7717                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7718                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7719                 } else if (IS_IVYBRIDGE(dev)) {
7720                         /* FIXME: detect B0+ stepping and use auto training */
7721                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7722                         if (SNB_READ_WM0_LATENCY()) {
7723                                 dev_priv->display.update_wm = sandybridge_update_wm;
7724                         } else {
7725                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
7726                                               "Disable CxSR\n");
7727                                 dev_priv->display.update_wm = NULL;
7728                         }
7729                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7730
7731                 } else
7732                         dev_priv->display.update_wm = NULL;
7733         } else if (IS_PINEVIEW(dev)) {
7734                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7735                                             dev_priv->is_ddr3,
7736                                             dev_priv->fsb_freq,
7737                                             dev_priv->mem_freq)) {
7738                         DRM_INFO("failed to find known CxSR latency "
7739                                  "(found ddr%s fsb freq %d, mem freq %d), "
7740                                  "disabling CxSR\n",
7741                                  (dev_priv->is_ddr3 == 1) ? "3": "2",
7742                                  dev_priv->fsb_freq, dev_priv->mem_freq);
7743                         /* Disable CxSR and never update its watermark again */
7744                         pineview_disable_cxsr(dev);
7745                         dev_priv->display.update_wm = NULL;
7746                 } else
7747                         dev_priv->display.update_wm = pineview_update_wm;
7748                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7749         } else if (IS_G4X(dev)) {
7750                 dev_priv->display.update_wm = g4x_update_wm;
7751                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7752         } else if (IS_GEN4(dev)) {
7753                 dev_priv->display.update_wm = i965_update_wm;
7754                 if (IS_CRESTLINE(dev))
7755                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7756                 else if (IS_BROADWATER(dev))
7757                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7758         } else if (IS_GEN3(dev)) {
7759                 dev_priv->display.update_wm = i9xx_update_wm;
7760                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7761                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7762         } else if (IS_I865G(dev)) {
7763                 dev_priv->display.update_wm = i830_update_wm;
7764                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7765                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7766         } else if (IS_I85X(dev)) {
7767                 dev_priv->display.update_wm = i9xx_update_wm;
7768                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7769                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7770         } else {
7771                 dev_priv->display.update_wm = i830_update_wm;
7772                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7773                 if (IS_845G(dev))
7774                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
7775                 else
7776                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
7777         }
7778
7779         /* Default just returns -ENODEV to indicate unsupported */
7780         dev_priv->display.queue_flip = intel_default_queue_flip;
7781
7782         switch (INTEL_INFO(dev)->gen) {
7783         case 2:
7784                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
7785                 break;
7786
7787         case 3:
7788                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
7789                 break;
7790
7791         case 4:
7792         case 5:
7793                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7794                 break;
7795
7796         case 6:
7797                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7798                 break;
7799         case 7:
7800                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7801                 break;
7802         }
7803 }
7804
7805 /*
7806  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7807  * resume, or other times.  This quirk makes sure that's the case for
7808  * affected systems.
7809  */
7810 static void quirk_pipea_force (struct drm_device *dev)
7811 {
7812         struct drm_i915_private *dev_priv = dev->dev_private;
7813
7814         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
7815         DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7816 }
7817
7818 /*
7819  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7820  */
7821 static void quirk_ssc_force_disable(struct drm_device *dev)
7822 {
7823         struct drm_i915_private *dev_priv = dev->dev_private;
7824         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
7825 }
7826
7827 struct intel_quirk {
7828         int device;
7829         int subsystem_vendor;
7830         int subsystem_device;
7831         void (*hook)(struct drm_device *dev);
7832 };
7833
7834 struct intel_quirk intel_quirks[] = {
7835         /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
7836         { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
7837         /* HP Mini needs pipe A force quirk (LP: #322104) */
7838         { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
7839
7840         /* Thinkpad R31 needs pipe A force quirk */
7841         { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7842         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7843         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7844
7845         /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7846         { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
7847         /* ThinkPad X40 needs pipe A force quirk */
7848
7849         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7850         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7851
7852         /* 855 & before need to leave pipe A & dpll A up */
7853         { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7854         { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7855
7856         /* Lenovo U160 cannot use SSC on LVDS */
7857         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
7858 };
7859
7860 static void intel_init_quirks(struct drm_device *dev)
7861 {
7862         struct pci_dev *d = dev->pdev;
7863         int i;
7864
7865         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7866                 struct intel_quirk *q = &intel_quirks[i];
7867
7868                 if (d->device == q->device &&
7869                     (d->subsystem_vendor == q->subsystem_vendor ||
7870                      q->subsystem_vendor == PCI_ANY_ID) &&
7871                     (d->subsystem_device == q->subsystem_device ||
7872                      q->subsystem_device == PCI_ANY_ID))
7873                         q->hook(dev);
7874         }
7875 }
7876
7877 /* Disable the VGA plane that we never use */
7878 static void i915_disable_vga(struct drm_device *dev)
7879 {
7880         struct drm_i915_private *dev_priv = dev->dev_private;
7881         u8 sr1;
7882         u32 vga_reg;
7883
7884         if (HAS_PCH_SPLIT(dev))
7885                 vga_reg = CPU_VGACNTRL;
7886         else
7887                 vga_reg = VGACNTRL;
7888
7889         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
7890         outb(1, VGA_SR_INDEX);
7891         sr1 = inb(VGA_SR_DATA);
7892         outb(sr1 | 1<<5, VGA_SR_DATA);
7893         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7894         udelay(300);
7895
7896         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7897         POSTING_READ(vga_reg);
7898 }
7899
7900 void intel_modeset_init(struct drm_device *dev)
7901 {
7902         struct drm_i915_private *dev_priv = dev->dev_private;
7903         int i;
7904
7905         drm_mode_config_init(dev);
7906
7907         dev->mode_config.min_width = 0;
7908         dev->mode_config.min_height = 0;
7909
7910         dev->mode_config.funcs = (void *)&intel_mode_funcs;
7911
7912         intel_init_quirks(dev);
7913
7914         intel_init_display(dev);
7915
7916         if (IS_GEN2(dev)) {
7917                 dev->mode_config.max_width = 2048;
7918                 dev->mode_config.max_height = 2048;
7919         } else if (IS_GEN3(dev)) {
7920                 dev->mode_config.max_width = 4096;
7921                 dev->mode_config.max_height = 4096;
7922         } else {
7923                 dev->mode_config.max_width = 8192;
7924                 dev->mode_config.max_height = 8192;
7925         }
7926         dev->mode_config.fb_base = dev->agp->base;
7927
7928         DRM_DEBUG_KMS("%d display pipe%s available.\n",
7929                       dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7930
7931         for (i = 0; i < dev_priv->num_pipe; i++) {
7932                 intel_crtc_init(dev, i);
7933         }
7934
7935         /* Just disable it once at startup */
7936         i915_disable_vga(dev);
7937         intel_setup_outputs(dev);
7938
7939         intel_init_clock_gating(dev);
7940
7941         if (IS_IRONLAKE_M(dev)) {
7942                 ironlake_enable_drps(dev);
7943                 intel_init_emon(dev);
7944         }
7945
7946         if (IS_GEN6(dev) || IS_GEN7(dev))
7947                 gen6_enable_rps(dev_priv);
7948
7949         INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7950         setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7951                     (unsigned long)dev);
7952 }
7953
7954 void intel_modeset_gem_init(struct drm_device *dev)
7955 {
7956         if (IS_IRONLAKE_M(dev))
7957                 ironlake_enable_rc6(dev);
7958
7959         intel_setup_overlay(dev);
7960 }
7961
7962 void intel_modeset_cleanup(struct drm_device *dev)
7963 {
7964         struct drm_i915_private *dev_priv = dev->dev_private;
7965         struct drm_crtc *crtc;
7966         struct intel_crtc *intel_crtc;
7967
7968         drm_kms_helper_poll_fini(dev);
7969         mutex_lock(&dev->struct_mutex);
7970
7971         intel_unregister_dsm_handler();
7972
7973
7974         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7975                 /* Skip inactive CRTCs */
7976                 if (!crtc->fb)
7977                         continue;
7978
7979                 intel_crtc = to_intel_crtc(crtc);
7980                 intel_increase_pllclock(crtc);
7981         }
7982
7983         if (dev_priv->display.disable_fbc)
7984                 dev_priv->display.disable_fbc(dev);
7985
7986         if (IS_IRONLAKE_M(dev))
7987                 ironlake_disable_drps(dev);
7988         if (IS_GEN6(dev) || IS_GEN7(dev))
7989                 gen6_disable_rps(dev);
7990
7991         if (IS_IRONLAKE_M(dev))
7992                 ironlake_disable_rc6(dev);
7993
7994         mutex_unlock(&dev->struct_mutex);
7995
7996         /* Disable the irq before mode object teardown, for the irq might
7997          * enqueue unpin/hotplug work. */
7998         drm_irq_uninstall(dev);
7999         cancel_work_sync(&dev_priv->hotplug_work);
8000
8001         /* Shut off idle work before the crtcs get freed. */
8002         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8003                 intel_crtc = to_intel_crtc(crtc);
8004                 del_timer_sync(&intel_crtc->idle_timer);
8005         }
8006         del_timer_sync(&dev_priv->idle_timer);
8007         cancel_work_sync(&dev_priv->idle_work);
8008
8009         drm_mode_config_cleanup(dev);
8010 }
8011
8012 /*
8013  * Return which encoder is currently attached for connector.
8014  */
8015 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
8016 {
8017         return &intel_attached_encoder(connector)->base;
8018 }
8019
8020 void intel_connector_attach_encoder(struct intel_connector *connector,
8021                                     struct intel_encoder *encoder)
8022 {
8023         connector->encoder = encoder;
8024         drm_mode_connector_attach_encoder(&connector->base,
8025                                           &encoder->base);
8026 }
8027
8028 /*
8029  * set vga decode state - true == enable VGA decode
8030  */
8031 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
8032 {
8033         struct drm_i915_private *dev_priv = dev->dev_private;
8034         u16 gmch_ctrl;
8035
8036         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
8037         if (state)
8038                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
8039         else
8040                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
8041         pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
8042         return 0;
8043 }
8044
8045 #ifdef CONFIG_DEBUG_FS
8046 #include <linux/seq_file.h>
8047
8048 struct intel_display_error_state {
8049         struct intel_cursor_error_state {
8050                 u32 control;
8051                 u32 position;
8052                 u32 base;
8053                 u32 size;
8054         } cursor[2];
8055
8056         struct intel_pipe_error_state {
8057                 u32 conf;
8058                 u32 source;
8059
8060                 u32 htotal;
8061                 u32 hblank;
8062                 u32 hsync;
8063                 u32 vtotal;
8064                 u32 vblank;
8065                 u32 vsync;
8066         } pipe[2];
8067
8068         struct intel_plane_error_state {
8069                 u32 control;
8070                 u32 stride;
8071                 u32 size;
8072                 u32 pos;
8073                 u32 addr;
8074                 u32 surface;
8075                 u32 tile_offset;
8076         } plane[2];
8077 };
8078
8079 struct intel_display_error_state *
8080 intel_display_capture_error_state(struct drm_device *dev)
8081 {
8082         drm_i915_private_t *dev_priv = dev->dev_private;
8083         struct intel_display_error_state *error;
8084         int i;
8085
8086         error = kmalloc(sizeof(*error), GFP_ATOMIC);
8087         if (error == NULL)
8088                 return NULL;
8089
8090         for (i = 0; i < 2; i++) {
8091                 error->cursor[i].control = I915_READ(CURCNTR(i));
8092                 error->cursor[i].position = I915_READ(CURPOS(i));
8093                 error->cursor[i].base = I915_READ(CURBASE(i));
8094
8095                 error->plane[i].control = I915_READ(DSPCNTR(i));
8096                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
8097                 error->plane[i].size = I915_READ(DSPSIZE(i));
8098                 error->plane[i].pos= I915_READ(DSPPOS(i));
8099                 error->plane[i].addr = I915_READ(DSPADDR(i));
8100                 if (INTEL_INFO(dev)->gen >= 4) {
8101                         error->plane[i].surface = I915_READ(DSPSURF(i));
8102                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8103                 }
8104
8105                 error->pipe[i].conf = I915_READ(PIPECONF(i));
8106                 error->pipe[i].source = I915_READ(PIPESRC(i));
8107                 error->pipe[i].htotal = I915_READ(HTOTAL(i));
8108                 error->pipe[i].hblank = I915_READ(HBLANK(i));
8109                 error->pipe[i].hsync = I915_READ(HSYNC(i));
8110                 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
8111                 error->pipe[i].vblank = I915_READ(VBLANK(i));
8112                 error->pipe[i].vsync = I915_READ(VSYNC(i));
8113         }
8114
8115         return error;
8116 }
8117
8118 void
8119 intel_display_print_error_state(struct seq_file *m,
8120                                 struct drm_device *dev,
8121                                 struct intel_display_error_state *error)
8122 {
8123         int i;
8124
8125         for (i = 0; i < 2; i++) {
8126                 seq_printf(m, "Pipe [%d]:\n", i);
8127                 seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
8128                 seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
8129                 seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
8130                 seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
8131                 seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
8132                 seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
8133                 seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
8134                 seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
8135
8136                 seq_printf(m, "Plane [%d]:\n", i);
8137                 seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
8138                 seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
8139                 seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
8140                 seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
8141                 seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
8142                 if (INTEL_INFO(dev)->gen >= 4) {
8143                         seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
8144                         seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
8145                 }
8146
8147                 seq_printf(m, "Cursor [%d]:\n", i);
8148                 seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
8149                 seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
8150                 seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
8151         }
8152 }
8153 #endif