]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
Merge remote-tracking branch 'drm-intel/for-linux-next'
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41         [HPD_CRT] = SDE_CRT_HOTPLUG,
42         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_gen4[] = {
66         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* For display hotplug interrupt */
84 static void
85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87         assert_spin_locked(&dev_priv->irq_lock);
88
89         if (dev_priv->pc8.irqs_disabled) {
90                 WARN(1, "IRQs disabled\n");
91                 dev_priv->pc8.regsave.deimr &= ~mask;
92                 return;
93         }
94
95         if ((dev_priv->irq_mask & mask) != 0) {
96                 dev_priv->irq_mask &= ~mask;
97                 I915_WRITE(DEIMR, dev_priv->irq_mask);
98                 POSTING_READ(DEIMR);
99         }
100 }
101
102 static void
103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 {
105         assert_spin_locked(&dev_priv->irq_lock);
106
107         if (dev_priv->pc8.irqs_disabled) {
108                 WARN(1, "IRQs disabled\n");
109                 dev_priv->pc8.regsave.deimr |= mask;
110                 return;
111         }
112
113         if ((dev_priv->irq_mask & mask) != mask) {
114                 dev_priv->irq_mask |= mask;
115                 I915_WRITE(DEIMR, dev_priv->irq_mask);
116                 POSTING_READ(DEIMR);
117         }
118 }
119
120 /**
121  * ilk_update_gt_irq - update GTIMR
122  * @dev_priv: driver private
123  * @interrupt_mask: mask of interrupt bits to update
124  * @enabled_irq_mask: mask of interrupt bits to enable
125  */
126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127                               uint32_t interrupt_mask,
128                               uint32_t enabled_irq_mask)
129 {
130         assert_spin_locked(&dev_priv->irq_lock);
131
132         if (dev_priv->pc8.irqs_disabled) {
133                 WARN(1, "IRQs disabled\n");
134                 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135                 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136                                                 interrupt_mask);
137                 return;
138         }
139
140         dev_priv->gt_irq_mask &= ~interrupt_mask;
141         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143         POSTING_READ(GTIMR);
144 }
145
146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147 {
148         ilk_update_gt_irq(dev_priv, mask, mask);
149 }
150
151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152 {
153         ilk_update_gt_irq(dev_priv, mask, 0);
154 }
155
156 /**
157   * snb_update_pm_irq - update GEN6_PMIMR
158   * @dev_priv: driver private
159   * @interrupt_mask: mask of interrupt bits to update
160   * @enabled_irq_mask: mask of interrupt bits to enable
161   */
162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163                               uint32_t interrupt_mask,
164                               uint32_t enabled_irq_mask)
165 {
166         uint32_t new_val;
167
168         assert_spin_locked(&dev_priv->irq_lock);
169
170         if (dev_priv->pc8.irqs_disabled) {
171                 WARN(1, "IRQs disabled\n");
172                 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173                 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174                                                      interrupt_mask);
175                 return;
176         }
177
178         new_val = dev_priv->pm_irq_mask;
179         new_val &= ~interrupt_mask;
180         new_val |= (~enabled_irq_mask & interrupt_mask);
181
182         if (new_val != dev_priv->pm_irq_mask) {
183                 dev_priv->pm_irq_mask = new_val;
184                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
185                 POSTING_READ(GEN6_PMIMR);
186         }
187 }
188
189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190 {
191         snb_update_pm_irq(dev_priv, mask, mask);
192 }
193
194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195 {
196         snb_update_pm_irq(dev_priv, mask, 0);
197 }
198
199 static bool ivb_can_enable_err_int(struct drm_device *dev)
200 {
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         struct intel_crtc *crtc;
203         enum pipe pipe;
204
205         assert_spin_locked(&dev_priv->irq_lock);
206
207         for_each_pipe(pipe) {
208                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210                 if (crtc->cpu_fifo_underrun_disabled)
211                         return false;
212         }
213
214         return true;
215 }
216
217 static bool cpt_can_enable_serr_int(struct drm_device *dev)
218 {
219         struct drm_i915_private *dev_priv = dev->dev_private;
220         enum pipe pipe;
221         struct intel_crtc *crtc;
222
223         assert_spin_locked(&dev_priv->irq_lock);
224
225         for_each_pipe(pipe) {
226                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228                 if (crtc->pch_fifo_underrun_disabled)
229                         return false;
230         }
231
232         return true;
233 }
234
235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236                                                  enum pipe pipe, bool enable)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240                                           DE_PIPEB_FIFO_UNDERRUN;
241
242         if (enable)
243                 ironlake_enable_display_irq(dev_priv, bit);
244         else
245                 ironlake_disable_display_irq(dev_priv, bit);
246 }
247
248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
249                                                   enum pipe pipe, bool enable)
250 {
251         struct drm_i915_private *dev_priv = dev->dev_private;
252         if (enable) {
253                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
255                 if (!ivb_can_enable_err_int(dev))
256                         return;
257
258                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259         } else {
260                 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262                 /* Change the state _after_ we've read out the current one. */
263                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
264
265                 if (!was_enabled &&
266                     (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267                         DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268                                       pipe_name(pipe));
269                 }
270         }
271 }
272
273 /**
274  * ibx_display_interrupt_update - update SDEIMR
275  * @dev_priv: driver private
276  * @interrupt_mask: mask of interrupt bits to update
277  * @enabled_irq_mask: mask of interrupt bits to enable
278  */
279 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
280                                          uint32_t interrupt_mask,
281                                          uint32_t enabled_irq_mask)
282 {
283         uint32_t sdeimr = I915_READ(SDEIMR);
284         sdeimr &= ~interrupt_mask;
285         sdeimr |= (~enabled_irq_mask & interrupt_mask);
286
287         assert_spin_locked(&dev_priv->irq_lock);
288
289         if (dev_priv->pc8.irqs_disabled &&
290             (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
291                 WARN(1, "IRQs disabled\n");
292                 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
293                 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
294                                                  interrupt_mask);
295                 return;
296         }
297
298         I915_WRITE(SDEIMR, sdeimr);
299         POSTING_READ(SDEIMR);
300 }
301 #define ibx_enable_display_interrupt(dev_priv, bits) \
302         ibx_display_interrupt_update((dev_priv), (bits), (bits))
303 #define ibx_disable_display_interrupt(dev_priv, bits) \
304         ibx_display_interrupt_update((dev_priv), (bits), 0)
305
306 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
307                                             enum transcoder pch_transcoder,
308                                             bool enable)
309 {
310         struct drm_i915_private *dev_priv = dev->dev_private;
311         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
312                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
313
314         if (enable)
315                 ibx_enable_display_interrupt(dev_priv, bit);
316         else
317                 ibx_disable_display_interrupt(dev_priv, bit);
318 }
319
320 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
321                                             enum transcoder pch_transcoder,
322                                             bool enable)
323 {
324         struct drm_i915_private *dev_priv = dev->dev_private;
325
326         if (enable) {
327                 I915_WRITE(SERR_INT,
328                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
329
330                 if (!cpt_can_enable_serr_int(dev))
331                         return;
332
333                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
334         } else {
335                 uint32_t tmp = I915_READ(SERR_INT);
336                 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
337
338                 /* Change the state _after_ we've read out the current one. */
339                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
340
341                 if (!was_enabled &&
342                     (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
343                         DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
344                                       transcoder_name(pch_transcoder));
345                 }
346         }
347 }
348
349 /**
350  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
351  * @dev: drm device
352  * @pipe: pipe
353  * @enable: true if we want to report FIFO underrun errors, false otherwise
354  *
355  * This function makes us disable or enable CPU fifo underruns for a specific
356  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
357  * reporting for one pipe may also disable all the other CPU error interruts for
358  * the other pipes, due to the fact that there's just one interrupt mask/enable
359  * bit for all the pipes.
360  *
361  * Returns the previous state of underrun reporting.
362  */
363 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
364                                            enum pipe pipe, bool enable)
365 {
366         struct drm_i915_private *dev_priv = dev->dev_private;
367         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
368         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
369         unsigned long flags;
370         bool ret;
371
372         spin_lock_irqsave(&dev_priv->irq_lock, flags);
373
374         ret = !intel_crtc->cpu_fifo_underrun_disabled;
375
376         if (enable == ret)
377                 goto done;
378
379         intel_crtc->cpu_fifo_underrun_disabled = !enable;
380
381         if (IS_GEN5(dev) || IS_GEN6(dev))
382                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
383         else if (IS_GEN7(dev))
384                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
385
386 done:
387         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
388         return ret;
389 }
390
391 /**
392  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
393  * @dev: drm device
394  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
395  * @enable: true if we want to report FIFO underrun errors, false otherwise
396  *
397  * This function makes us disable or enable PCH fifo underruns for a specific
398  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
399  * underrun reporting for one transcoder may also disable all the other PCH
400  * error interruts for the other transcoders, due to the fact that there's just
401  * one interrupt mask/enable bit for all the transcoders.
402  *
403  * Returns the previous state of underrun reporting.
404  */
405 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
406                                            enum transcoder pch_transcoder,
407                                            bool enable)
408 {
409         struct drm_i915_private *dev_priv = dev->dev_private;
410         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
411         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
412         unsigned long flags;
413         bool ret;
414
415         /*
416          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
417          * has only one pch transcoder A that all pipes can use. To avoid racy
418          * pch transcoder -> pipe lookups from interrupt code simply store the
419          * underrun statistics in crtc A. Since we never expose this anywhere
420          * nor use it outside of the fifo underrun code here using the "wrong"
421          * crtc on LPT won't cause issues.
422          */
423
424         spin_lock_irqsave(&dev_priv->irq_lock, flags);
425
426         ret = !intel_crtc->pch_fifo_underrun_disabled;
427
428         if (enable == ret)
429                 goto done;
430
431         intel_crtc->pch_fifo_underrun_disabled = !enable;
432
433         if (HAS_PCH_IBX(dev))
434                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
435         else
436                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
437
438 done:
439         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
440         return ret;
441 }
442
443
444 void
445 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
446 {
447         u32 reg = PIPESTAT(pipe);
448         u32 pipestat = I915_READ(reg) & 0x7fff0000;
449
450         assert_spin_locked(&dev_priv->irq_lock);
451
452         if ((pipestat & mask) == mask)
453                 return;
454
455         /* Enable the interrupt, clear any pending status */
456         pipestat |= mask | (mask >> 16);
457         I915_WRITE(reg, pipestat);
458         POSTING_READ(reg);
459 }
460
461 void
462 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
463 {
464         u32 reg = PIPESTAT(pipe);
465         u32 pipestat = I915_READ(reg) & 0x7fff0000;
466
467         assert_spin_locked(&dev_priv->irq_lock);
468
469         if ((pipestat & mask) == 0)
470                 return;
471
472         pipestat &= ~mask;
473         I915_WRITE(reg, pipestat);
474         POSTING_READ(reg);
475 }
476
477 /**
478  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
479  */
480 static void i915_enable_asle_pipestat(struct drm_device *dev)
481 {
482         drm_i915_private_t *dev_priv = dev->dev_private;
483         unsigned long irqflags;
484
485         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
486                 return;
487
488         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
489
490         i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
491         if (INTEL_INFO(dev)->gen >= 4)
492                 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
493
494         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
495 }
496
497 /**
498  * i915_pipe_enabled - check if a pipe is enabled
499  * @dev: DRM device
500  * @pipe: pipe to check
501  *
502  * Reading certain registers when the pipe is disabled can hang the chip.
503  * Use this routine to make sure the PLL is running and the pipe is active
504  * before reading such registers if unsure.
505  */
506 static int
507 i915_pipe_enabled(struct drm_device *dev, int pipe)
508 {
509         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
510
511         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
512                 /* Locking is horribly broken here, but whatever. */
513                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
514                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
515
516                 return intel_crtc->active;
517         } else {
518                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
519         }
520 }
521
522 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
523 {
524         /* Gen2 doesn't have a hardware frame counter */
525         return 0;
526 }
527
528 /* Called from drm generic code, passed a 'crtc', which
529  * we use as a pipe index
530  */
531 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
532 {
533         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
534         unsigned long high_frame;
535         unsigned long low_frame;
536         u32 high1, high2, low, pixel, vbl_start;
537
538         if (!i915_pipe_enabled(dev, pipe)) {
539                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
540                                 "pipe %c\n", pipe_name(pipe));
541                 return 0;
542         }
543
544         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
545                 struct intel_crtc *intel_crtc =
546                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
547                 const struct drm_display_mode *mode =
548                         &intel_crtc->config.adjusted_mode;
549
550                 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
551         } else {
552                 enum transcoder cpu_transcoder =
553                         intel_pipe_to_cpu_transcoder(dev_priv, pipe);
554                 u32 htotal;
555
556                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
557                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
558
559                 vbl_start *= htotal;
560         }
561
562         high_frame = PIPEFRAME(pipe);
563         low_frame = PIPEFRAMEPIXEL(pipe);
564
565         /*
566          * High & low register fields aren't synchronized, so make sure
567          * we get a low value that's stable across two reads of the high
568          * register.
569          */
570         do {
571                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
572                 low   = I915_READ(low_frame);
573                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
574         } while (high1 != high2);
575
576         high1 >>= PIPE_FRAME_HIGH_SHIFT;
577         pixel = low & PIPE_PIXEL_MASK;
578         low >>= PIPE_FRAME_LOW_SHIFT;
579
580         /*
581          * The frame counter increments at beginning of active.
582          * Cook up a vblank counter by also checking the pixel
583          * counter against vblank start.
584          */
585         return ((high1 << 8) | low) + (pixel >= vbl_start);
586 }
587
588 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
589 {
590         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
591         int reg = PIPE_FRMCOUNT_GM45(pipe);
592
593         if (!i915_pipe_enabled(dev, pipe)) {
594                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
595                                  "pipe %c\n", pipe_name(pipe));
596                 return 0;
597         }
598
599         return I915_READ(reg);
600 }
601
602 static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
603 {
604         struct drm_i915_private *dev_priv = dev->dev_private;
605         uint32_t status;
606
607         if (IS_VALLEYVIEW(dev)) {
608                 status = pipe == PIPE_A ?
609                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
610                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
611
612                 return I915_READ(VLV_ISR) & status;
613         } else if (IS_GEN2(dev)) {
614                 status = pipe == PIPE_A ?
615                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
616                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
617
618                 return I915_READ16(ISR) & status;
619         } else if (INTEL_INFO(dev)->gen < 5) {
620                 status = pipe == PIPE_A ?
621                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
622                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
623
624                 return I915_READ(ISR) & status;
625         } else if (INTEL_INFO(dev)->gen < 7) {
626                 status = pipe == PIPE_A ?
627                         DE_PIPEA_VBLANK :
628                         DE_PIPEB_VBLANK;
629
630                 return I915_READ(DEISR) & status;
631         } else {
632                 switch (pipe) {
633                 default:
634                 case PIPE_A:
635                         status = DE_PIPEA_VBLANK_IVB;
636                         break;
637                 case PIPE_B:
638                         status = DE_PIPEB_VBLANK_IVB;
639                         break;
640                 case PIPE_C:
641                         status = DE_PIPEC_VBLANK_IVB;
642                         break;
643                 }
644
645                 return I915_READ(DEISR) & status;
646         }
647 }
648
649 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
650                              int *vpos, int *hpos)
651 {
652         struct drm_i915_private *dev_priv = dev->dev_private;
653         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
654         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
655         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
656         int position;
657         int vbl_start, vbl_end, htotal, vtotal;
658         bool in_vbl = true;
659         int ret = 0;
660
661         if (!intel_crtc->active) {
662                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
663                                  "pipe %c\n", pipe_name(pipe));
664                 return 0;
665         }
666
667         htotal = mode->crtc_htotal;
668         vtotal = mode->crtc_vtotal;
669         vbl_start = mode->crtc_vblank_start;
670         vbl_end = mode->crtc_vblank_end;
671
672         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
673
674         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
675                 /* No obvious pixelcount register. Only query vertical
676                  * scanout position from Display scan line register.
677                  */
678                 if (IS_GEN2(dev))
679                         position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
680                 else
681                         position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
682
683                 /*
684                  * The scanline counter increments at the leading edge
685                  * of hsync, ie. it completely misses the active portion
686                  * of the line. Fix up the counter at both edges of vblank
687                  * to get a more accurate picture whether we're in vblank
688                  * or not.
689                  */
690                 in_vbl = intel_pipe_in_vblank(dev, pipe);
691                 if ((in_vbl && position == vbl_start - 1) ||
692                     (!in_vbl && position == vbl_end - 1))
693                         position = (position + 1) % vtotal;
694         } else {
695                 /* Have access to pixelcount since start of frame.
696                  * We can split this into vertical and horizontal
697                  * scanout position.
698                  */
699                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
700
701                 /* convert to pixel counts */
702                 vbl_start *= htotal;
703                 vbl_end *= htotal;
704                 vtotal *= htotal;
705         }
706
707         in_vbl = position >= vbl_start && position < vbl_end;
708
709         /*
710          * While in vblank, position will be negative
711          * counting up towards 0 at vbl_end. And outside
712          * vblank, position will be positive counting
713          * up since vbl_end.
714          */
715         if (position >= vbl_start)
716                 position -= vbl_end;
717         else
718                 position += vtotal - vbl_end;
719
720         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
721                 *vpos = position;
722                 *hpos = 0;
723         } else {
724                 *vpos = position / htotal;
725                 *hpos = position - (*vpos * htotal);
726         }
727
728         /* In vblank? */
729         if (in_vbl)
730                 ret |= DRM_SCANOUTPOS_INVBL;
731
732         return ret;
733 }
734
735 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
736                               int *max_error,
737                               struct timeval *vblank_time,
738                               unsigned flags)
739 {
740         struct drm_crtc *crtc;
741
742         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
743                 DRM_ERROR("Invalid crtc %d\n", pipe);
744                 return -EINVAL;
745         }
746
747         /* Get drm_crtc to timestamp: */
748         crtc = intel_get_crtc_for_pipe(dev, pipe);
749         if (crtc == NULL) {
750                 DRM_ERROR("Invalid crtc %d\n", pipe);
751                 return -EINVAL;
752         }
753
754         if (!crtc->enabled) {
755                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
756                 return -EBUSY;
757         }
758
759         /* Helper routine in DRM core does all the work: */
760         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
761                                                      vblank_time, flags,
762                                                      crtc);
763 }
764
765 static bool intel_hpd_irq_event(struct drm_device *dev,
766                                 struct drm_connector *connector)
767 {
768         enum drm_connector_status old_status;
769
770         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
771         old_status = connector->status;
772
773         connector->status = connector->funcs->detect(connector, false);
774         if (old_status == connector->status)
775                 return false;
776
777         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
778                       connector->base.id,
779                       drm_get_connector_name(connector),
780                       drm_get_connector_status_name(old_status),
781                       drm_get_connector_status_name(connector->status));
782
783         return true;
784 }
785
786 /*
787  * Handle hotplug events outside the interrupt handler proper.
788  */
789 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
790
791 static void i915_hotplug_work_func(struct work_struct *work)
792 {
793         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
794                                                     hotplug_work);
795         struct drm_device *dev = dev_priv->dev;
796         struct drm_mode_config *mode_config = &dev->mode_config;
797         struct intel_connector *intel_connector;
798         struct intel_encoder *intel_encoder;
799         struct drm_connector *connector;
800         unsigned long irqflags;
801         bool hpd_disabled = false;
802         bool changed = false;
803         u32 hpd_event_bits;
804
805         /* HPD irq before everything is fully set up. */
806         if (!dev_priv->enable_hotplug_processing)
807                 return;
808
809         mutex_lock(&mode_config->mutex);
810         DRM_DEBUG_KMS("running encoder hotplug functions\n");
811
812         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
813
814         hpd_event_bits = dev_priv->hpd_event_bits;
815         dev_priv->hpd_event_bits = 0;
816         list_for_each_entry(connector, &mode_config->connector_list, head) {
817                 intel_connector = to_intel_connector(connector);
818                 intel_encoder = intel_connector->encoder;
819                 if (intel_encoder->hpd_pin > HPD_NONE &&
820                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
821                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
822                         DRM_INFO("HPD interrupt storm detected on connector %s: "
823                                  "switching from hotplug detection to polling\n",
824                                 drm_get_connector_name(connector));
825                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
826                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
827                                 | DRM_CONNECTOR_POLL_DISCONNECT;
828                         hpd_disabled = true;
829                 }
830                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
831                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
832                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
833                 }
834         }
835          /* if there were no outputs to poll, poll was disabled,
836           * therefore make sure it's enabled when disabling HPD on
837           * some connectors */
838         if (hpd_disabled) {
839                 drm_kms_helper_poll_enable(dev);
840                 mod_timer(&dev_priv->hotplug_reenable_timer,
841                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
842         }
843
844         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
845
846         list_for_each_entry(connector, &mode_config->connector_list, head) {
847                 intel_connector = to_intel_connector(connector);
848                 intel_encoder = intel_connector->encoder;
849                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
850                         if (intel_encoder->hot_plug)
851                                 intel_encoder->hot_plug(intel_encoder);
852                         if (intel_hpd_irq_event(dev, connector))
853                                 changed = true;
854                 }
855         }
856         mutex_unlock(&mode_config->mutex);
857
858         if (changed)
859                 drm_kms_helper_hotplug_event(dev);
860 }
861
862 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
863 {
864         drm_i915_private_t *dev_priv = dev->dev_private;
865         u32 busy_up, busy_down, max_avg, min_avg;
866         u8 new_delay;
867
868         spin_lock(&mchdev_lock);
869
870         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
871
872         new_delay = dev_priv->ips.cur_delay;
873
874         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
875         busy_up = I915_READ(RCPREVBSYTUPAVG);
876         busy_down = I915_READ(RCPREVBSYTDNAVG);
877         max_avg = I915_READ(RCBMAXAVG);
878         min_avg = I915_READ(RCBMINAVG);
879
880         /* Handle RCS change request from hw */
881         if (busy_up > max_avg) {
882                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
883                         new_delay = dev_priv->ips.cur_delay - 1;
884                 if (new_delay < dev_priv->ips.max_delay)
885                         new_delay = dev_priv->ips.max_delay;
886         } else if (busy_down < min_avg) {
887                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
888                         new_delay = dev_priv->ips.cur_delay + 1;
889                 if (new_delay > dev_priv->ips.min_delay)
890                         new_delay = dev_priv->ips.min_delay;
891         }
892
893         if (ironlake_set_drps(dev, new_delay))
894                 dev_priv->ips.cur_delay = new_delay;
895
896         spin_unlock(&mchdev_lock);
897
898         return;
899 }
900
901 static void notify_ring(struct drm_device *dev,
902                         struct intel_ring_buffer *ring)
903 {
904         if (ring->obj == NULL)
905                 return;
906
907         trace_i915_gem_request_complete(ring);
908
909         wake_up_all(&ring->irq_queue);
910         i915_queue_hangcheck(dev);
911 }
912
913 static void gen6_pm_rps_work(struct work_struct *work)
914 {
915         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
916                                                     rps.work);
917         u32 pm_iir;
918         int new_delay, adj;
919
920         spin_lock_irq(&dev_priv->irq_lock);
921         pm_iir = dev_priv->rps.pm_iir;
922         dev_priv->rps.pm_iir = 0;
923         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
924         snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
925         spin_unlock_irq(&dev_priv->irq_lock);
926
927         /* Make sure we didn't queue anything we're not going to process. */
928         WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
929
930         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
931                 return;
932
933         mutex_lock(&dev_priv->rps.hw_lock);
934
935         adj = dev_priv->rps.last_adj;
936         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
937                 if (adj > 0)
938                         adj *= 2;
939                 else
940                         adj = 1;
941                 new_delay = dev_priv->rps.cur_delay + adj;
942
943                 /*
944                  * For better performance, jump directly
945                  * to RPe if we're below it.
946                  */
947                 if (new_delay < dev_priv->rps.rpe_delay)
948                         new_delay = dev_priv->rps.rpe_delay;
949         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
950                 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
951                         new_delay = dev_priv->rps.rpe_delay;
952                 else
953                         new_delay = dev_priv->rps.min_delay;
954                 adj = 0;
955         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
956                 if (adj < 0)
957                         adj *= 2;
958                 else
959                         adj = -1;
960                 new_delay = dev_priv->rps.cur_delay + adj;
961         } else { /* unknown event */
962                 new_delay = dev_priv->rps.cur_delay;
963         }
964
965         /* sysfs frequency interfaces may have snuck in while servicing the
966          * interrupt
967          */
968         if (new_delay < (int)dev_priv->rps.min_delay)
969                 new_delay = dev_priv->rps.min_delay;
970         if (new_delay > (int)dev_priv->rps.max_delay)
971                 new_delay = dev_priv->rps.max_delay;
972         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
973
974         if (IS_VALLEYVIEW(dev_priv->dev))
975                 valleyview_set_rps(dev_priv->dev, new_delay);
976         else
977                 gen6_set_rps(dev_priv->dev, new_delay);
978
979         mutex_unlock(&dev_priv->rps.hw_lock);
980 }
981
982
983 /**
984  * ivybridge_parity_work - Workqueue called when a parity error interrupt
985  * occurred.
986  * @work: workqueue struct
987  *
988  * Doesn't actually do anything except notify userspace. As a consequence of
989  * this event, userspace should try to remap the bad rows since statistically
990  * it is likely the same row is more likely to go bad again.
991  */
992 static void ivybridge_parity_work(struct work_struct *work)
993 {
994         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
995                                                     l3_parity.error_work);
996         u32 error_status, row, bank, subbank;
997         char *parity_event[6];
998         uint32_t misccpctl;
999         unsigned long flags;
1000         uint8_t slice = 0;
1001
1002         /* We must turn off DOP level clock gating to access the L3 registers.
1003          * In order to prevent a get/put style interface, acquire struct mutex
1004          * any time we access those registers.
1005          */
1006         mutex_lock(&dev_priv->dev->struct_mutex);
1007
1008         /* If we've screwed up tracking, just let the interrupt fire again */
1009         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1010                 goto out;
1011
1012         misccpctl = I915_READ(GEN7_MISCCPCTL);
1013         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1014         POSTING_READ(GEN7_MISCCPCTL);
1015
1016         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1017                 u32 reg;
1018
1019                 slice--;
1020                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1021                         break;
1022
1023                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1024
1025                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1026
1027                 error_status = I915_READ(reg);
1028                 row = GEN7_PARITY_ERROR_ROW(error_status);
1029                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1030                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1031
1032                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1033                 POSTING_READ(reg);
1034
1035                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1036                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1037                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1038                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1039                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1040                 parity_event[5] = NULL;
1041
1042                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1043                                    KOBJ_CHANGE, parity_event);
1044
1045                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1046                           slice, row, bank, subbank);
1047
1048                 kfree(parity_event[4]);
1049                 kfree(parity_event[3]);
1050                 kfree(parity_event[2]);
1051                 kfree(parity_event[1]);
1052         }
1053
1054         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1055
1056 out:
1057         WARN_ON(dev_priv->l3_parity.which_slice);
1058         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1059         ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1060         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1061
1062         mutex_unlock(&dev_priv->dev->struct_mutex);
1063 }
1064
1065 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1066 {
1067         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1068
1069         if (!HAS_L3_DPF(dev))
1070                 return;
1071
1072         spin_lock(&dev_priv->irq_lock);
1073         ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1074         spin_unlock(&dev_priv->irq_lock);
1075
1076         iir &= GT_PARITY_ERROR(dev);
1077         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1078                 dev_priv->l3_parity.which_slice |= 1 << 1;
1079
1080         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1081                 dev_priv->l3_parity.which_slice |= 1 << 0;
1082
1083         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1084 }
1085
1086 static void ilk_gt_irq_handler(struct drm_device *dev,
1087                                struct drm_i915_private *dev_priv,
1088                                u32 gt_iir)
1089 {
1090         if (gt_iir &
1091             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1092                 notify_ring(dev, &dev_priv->ring[RCS]);
1093         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1094                 notify_ring(dev, &dev_priv->ring[VCS]);
1095 }
1096
1097 static void snb_gt_irq_handler(struct drm_device *dev,
1098                                struct drm_i915_private *dev_priv,
1099                                u32 gt_iir)
1100 {
1101
1102         if (gt_iir &
1103             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1104                 notify_ring(dev, &dev_priv->ring[RCS]);
1105         if (gt_iir & GT_BSD_USER_INTERRUPT)
1106                 notify_ring(dev, &dev_priv->ring[VCS]);
1107         if (gt_iir & GT_BLT_USER_INTERRUPT)
1108                 notify_ring(dev, &dev_priv->ring[BCS]);
1109
1110         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1111                       GT_BSD_CS_ERROR_INTERRUPT |
1112                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1113                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1114                 i915_handle_error(dev, false);
1115         }
1116
1117         if (gt_iir & GT_PARITY_ERROR(dev))
1118                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1119 }
1120
1121 #define HPD_STORM_DETECT_PERIOD 1000
1122 #define HPD_STORM_THRESHOLD 5
1123
1124 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1125                                          u32 hotplug_trigger,
1126                                          const u32 *hpd)
1127 {
1128         drm_i915_private_t *dev_priv = dev->dev_private;
1129         int i;
1130         bool storm_detected = false;
1131
1132         if (!hotplug_trigger)
1133                 return;
1134
1135         spin_lock(&dev_priv->irq_lock);
1136         for (i = 1; i < HPD_NUM_PINS; i++) {
1137
1138                 WARN(((hpd[i] & hotplug_trigger) &&
1139                       dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1140                      "Received HPD interrupt although disabled\n");
1141
1142                 if (!(hpd[i] & hotplug_trigger) ||
1143                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1144                         continue;
1145
1146                 dev_priv->hpd_event_bits |= (1 << i);
1147                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1148                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1149                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1150                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1151                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1152                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1153                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1154                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1155                         dev_priv->hpd_event_bits &= ~(1 << i);
1156                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1157                         storm_detected = true;
1158                 } else {
1159                         dev_priv->hpd_stats[i].hpd_cnt++;
1160                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1161                                       dev_priv->hpd_stats[i].hpd_cnt);
1162                 }
1163         }
1164
1165         if (storm_detected)
1166                 dev_priv->display.hpd_irq_setup(dev);
1167         spin_unlock(&dev_priv->irq_lock);
1168
1169         /*
1170          * Our hotplug handler can grab modeset locks (by calling down into the
1171          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1172          * queue for otherwise the flush_work in the pageflip code will
1173          * deadlock.
1174          */
1175         schedule_work(&dev_priv->hotplug_work);
1176 }
1177
1178 static void gmbus_irq_handler(struct drm_device *dev)
1179 {
1180         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1181
1182         wake_up_all(&dev_priv->gmbus_wait_queue);
1183 }
1184
1185 static void dp_aux_irq_handler(struct drm_device *dev)
1186 {
1187         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1188
1189         wake_up_all(&dev_priv->gmbus_wait_queue);
1190 }
1191
1192 #if defined(CONFIG_DEBUG_FS)
1193 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1194                                          uint32_t crc0, uint32_t crc1,
1195                                          uint32_t crc2, uint32_t crc3,
1196                                          uint32_t crc4)
1197 {
1198         struct drm_i915_private *dev_priv = dev->dev_private;
1199         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1200         struct intel_pipe_crc_entry *entry;
1201         int head, tail;
1202
1203         spin_lock(&pipe_crc->lock);
1204
1205         if (!pipe_crc->entries) {
1206                 spin_unlock(&pipe_crc->lock);
1207                 DRM_ERROR("spurious interrupt\n");
1208                 return;
1209         }
1210
1211         head = pipe_crc->head;
1212         tail = pipe_crc->tail;
1213
1214         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1215                 spin_unlock(&pipe_crc->lock);
1216                 DRM_ERROR("CRC buffer overflowing\n");
1217                 return;
1218         }
1219
1220         entry = &pipe_crc->entries[head];
1221
1222         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1223         entry->crc[0] = crc0;
1224         entry->crc[1] = crc1;
1225         entry->crc[2] = crc2;
1226         entry->crc[3] = crc3;
1227         entry->crc[4] = crc4;
1228
1229         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1230         pipe_crc->head = head;
1231
1232         spin_unlock(&pipe_crc->lock);
1233
1234         wake_up_interruptible(&pipe_crc->wq);
1235 }
1236 #else
1237 static inline void
1238 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1239                              uint32_t crc0, uint32_t crc1,
1240                              uint32_t crc2, uint32_t crc3,
1241                              uint32_t crc4) {}
1242 #endif
1243
1244
1245 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1246 {
1247         struct drm_i915_private *dev_priv = dev->dev_private;
1248
1249         display_pipe_crc_irq_handler(dev, pipe,
1250                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1251                                      0, 0, 0, 0);
1252 }
1253
1254 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1255 {
1256         struct drm_i915_private *dev_priv = dev->dev_private;
1257
1258         display_pipe_crc_irq_handler(dev, pipe,
1259                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1260                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1261                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1262                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1263                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1264 }
1265
1266 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1267 {
1268         struct drm_i915_private *dev_priv = dev->dev_private;
1269         uint32_t res1, res2;
1270
1271         if (INTEL_INFO(dev)->gen >= 3)
1272                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1273         else
1274                 res1 = 0;
1275
1276         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1277                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1278         else
1279                 res2 = 0;
1280
1281         display_pipe_crc_irq_handler(dev, pipe,
1282                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1283                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1284                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1285                                      res1, res2);
1286 }
1287
1288 /* The RPS events need forcewake, so we add them to a work queue and mask their
1289  * IMR bits until the work is done. Other interrupts can be processed without
1290  * the work queue. */
1291 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1292 {
1293         if (pm_iir & GEN6_PM_RPS_EVENTS) {
1294                 spin_lock(&dev_priv->irq_lock);
1295                 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1296                 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1297                 spin_unlock(&dev_priv->irq_lock);
1298
1299                 queue_work(dev_priv->wq, &dev_priv->rps.work);
1300         }
1301
1302         if (HAS_VEBOX(dev_priv->dev)) {
1303                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1304                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1305
1306                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1307                         DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1308                         i915_handle_error(dev_priv->dev, false);
1309                 }
1310         }
1311 }
1312
1313 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1314 {
1315         struct drm_device *dev = (struct drm_device *) arg;
1316         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1317         u32 iir, gt_iir, pm_iir;
1318         irqreturn_t ret = IRQ_NONE;
1319         unsigned long irqflags;
1320         int pipe;
1321         u32 pipe_stats[I915_MAX_PIPES];
1322
1323         atomic_inc(&dev_priv->irq_received);
1324
1325         while (true) {
1326                 iir = I915_READ(VLV_IIR);
1327                 gt_iir = I915_READ(GTIIR);
1328                 pm_iir = I915_READ(GEN6_PMIIR);
1329
1330                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1331                         goto out;
1332
1333                 ret = IRQ_HANDLED;
1334
1335                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1336
1337                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1338                 for_each_pipe(pipe) {
1339                         int reg = PIPESTAT(pipe);
1340                         pipe_stats[pipe] = I915_READ(reg);
1341
1342                         /*
1343                          * Clear the PIPE*STAT regs before the IIR
1344                          */
1345                         if (pipe_stats[pipe] & 0x8000ffff) {
1346                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1347                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1348                                                          pipe_name(pipe));
1349                                 I915_WRITE(reg, pipe_stats[pipe]);
1350                         }
1351                 }
1352                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1353
1354                 for_each_pipe(pipe) {
1355                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1356                                 drm_handle_vblank(dev, pipe);
1357
1358                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1359                                 intel_prepare_page_flip(dev, pipe);
1360                                 intel_finish_page_flip(dev, pipe);
1361                         }
1362
1363                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1364                                 i9xx_pipe_crc_irq_handler(dev, pipe);
1365                 }
1366
1367                 /* Consume port.  Then clear IIR or we'll miss events */
1368                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1369                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1370                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1371
1372                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1373                                          hotplug_status);
1374
1375                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1376
1377                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1378                         I915_READ(PORT_HOTPLUG_STAT);
1379                 }
1380
1381                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1382                         gmbus_irq_handler(dev);
1383
1384                 if (pm_iir)
1385                         gen6_rps_irq_handler(dev_priv, pm_iir);
1386
1387                 I915_WRITE(GTIIR, gt_iir);
1388                 I915_WRITE(GEN6_PMIIR, pm_iir);
1389                 I915_WRITE(VLV_IIR, iir);
1390         }
1391
1392 out:
1393         return ret;
1394 }
1395
1396 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1397 {
1398         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1399         int pipe;
1400         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1401
1402         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1403
1404         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1405                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1406                                SDE_AUDIO_POWER_SHIFT);
1407                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1408                                  port_name(port));
1409         }
1410
1411         if (pch_iir & SDE_AUX_MASK)
1412                 dp_aux_irq_handler(dev);
1413
1414         if (pch_iir & SDE_GMBUS)
1415                 gmbus_irq_handler(dev);
1416
1417         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1418                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1419
1420         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1421                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1422
1423         if (pch_iir & SDE_POISON)
1424                 DRM_ERROR("PCH poison interrupt\n");
1425
1426         if (pch_iir & SDE_FDI_MASK)
1427                 for_each_pipe(pipe)
1428                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1429                                          pipe_name(pipe),
1430                                          I915_READ(FDI_RX_IIR(pipe)));
1431
1432         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1433                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1434
1435         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1436                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1437
1438         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1439                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1440                                                           false))
1441                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1442
1443         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1444                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1445                                                           false))
1446                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1447 }
1448
1449 static void ivb_err_int_handler(struct drm_device *dev)
1450 {
1451         struct drm_i915_private *dev_priv = dev->dev_private;
1452         u32 err_int = I915_READ(GEN7_ERR_INT);
1453         enum pipe pipe;
1454
1455         if (err_int & ERR_INT_POISON)
1456                 DRM_ERROR("Poison interrupt\n");
1457
1458         for_each_pipe(pipe) {
1459                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1460                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1461                                                                   false))
1462                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1463                                                  pipe_name(pipe));
1464                 }
1465
1466                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1467                         if (IS_IVYBRIDGE(dev))
1468                                 ivb_pipe_crc_irq_handler(dev, pipe);
1469                         else
1470                                 hsw_pipe_crc_irq_handler(dev, pipe);
1471                 }
1472         }
1473
1474         I915_WRITE(GEN7_ERR_INT, err_int);
1475 }
1476
1477 static void cpt_serr_int_handler(struct drm_device *dev)
1478 {
1479         struct drm_i915_private *dev_priv = dev->dev_private;
1480         u32 serr_int = I915_READ(SERR_INT);
1481
1482         if (serr_int & SERR_INT_POISON)
1483                 DRM_ERROR("PCH poison interrupt\n");
1484
1485         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1486                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1487                                                           false))
1488                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1489
1490         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1491                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1492                                                           false))
1493                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1494
1495         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1496                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1497                                                           false))
1498                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1499
1500         I915_WRITE(SERR_INT, serr_int);
1501 }
1502
1503 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1504 {
1505         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1506         int pipe;
1507         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1508
1509         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1510
1511         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1512                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1513                                SDE_AUDIO_POWER_SHIFT_CPT);
1514                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1515                                  port_name(port));
1516         }
1517
1518         if (pch_iir & SDE_AUX_MASK_CPT)
1519                 dp_aux_irq_handler(dev);
1520
1521         if (pch_iir & SDE_GMBUS_CPT)
1522                 gmbus_irq_handler(dev);
1523
1524         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1525                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1526
1527         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1528                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1529
1530         if (pch_iir & SDE_FDI_MASK_CPT)
1531                 for_each_pipe(pipe)
1532                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1533                                          pipe_name(pipe),
1534                                          I915_READ(FDI_RX_IIR(pipe)));
1535
1536         if (pch_iir & SDE_ERROR_CPT)
1537                 cpt_serr_int_handler(dev);
1538 }
1539
1540 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1541 {
1542         struct drm_i915_private *dev_priv = dev->dev_private;
1543
1544         if (de_iir & DE_AUX_CHANNEL_A)
1545                 dp_aux_irq_handler(dev);
1546
1547         if (de_iir & DE_GSE)
1548                 intel_opregion_asle_intr(dev);
1549
1550         if (de_iir & DE_PIPEA_VBLANK)
1551                 drm_handle_vblank(dev, 0);
1552
1553         if (de_iir & DE_PIPEB_VBLANK)
1554                 drm_handle_vblank(dev, 1);
1555
1556         if (de_iir & DE_POISON)
1557                 DRM_ERROR("Poison interrupt\n");
1558
1559         if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1560                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1561                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1562
1563         if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1564                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1565                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1566
1567         if (de_iir & DE_PIPEA_CRC_DONE)
1568                 i9xx_pipe_crc_irq_handler(dev, PIPE_A);
1569
1570         if (de_iir & DE_PIPEB_CRC_DONE)
1571                 i9xx_pipe_crc_irq_handler(dev, PIPE_B);
1572
1573         if (de_iir & DE_PLANEA_FLIP_DONE) {
1574                 intel_prepare_page_flip(dev, 0);
1575                 intel_finish_page_flip_plane(dev, 0);
1576         }
1577
1578         if (de_iir & DE_PLANEB_FLIP_DONE) {
1579                 intel_prepare_page_flip(dev, 1);
1580                 intel_finish_page_flip_plane(dev, 1);
1581         }
1582
1583         /* check event from PCH */
1584         if (de_iir & DE_PCH_EVENT) {
1585                 u32 pch_iir = I915_READ(SDEIIR);
1586
1587                 if (HAS_PCH_CPT(dev))
1588                         cpt_irq_handler(dev, pch_iir);
1589                 else
1590                         ibx_irq_handler(dev, pch_iir);
1591
1592                 /* should clear PCH hotplug event before clear CPU irq */
1593                 I915_WRITE(SDEIIR, pch_iir);
1594         }
1595
1596         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1597                 ironlake_rps_change_irq_handler(dev);
1598 }
1599
1600 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1601 {
1602         struct drm_i915_private *dev_priv = dev->dev_private;
1603         int i;
1604
1605         if (de_iir & DE_ERR_INT_IVB)
1606                 ivb_err_int_handler(dev);
1607
1608         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1609                 dp_aux_irq_handler(dev);
1610
1611         if (de_iir & DE_GSE_IVB)
1612                 intel_opregion_asle_intr(dev);
1613
1614         for (i = 0; i < 3; i++) {
1615                 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1616                         drm_handle_vblank(dev, i);
1617                 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1618                         intel_prepare_page_flip(dev, i);
1619                         intel_finish_page_flip_plane(dev, i);
1620                 }
1621         }
1622
1623         /* check event from PCH */
1624         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1625                 u32 pch_iir = I915_READ(SDEIIR);
1626
1627                 cpt_irq_handler(dev, pch_iir);
1628
1629                 /* clear PCH hotplug event before clear CPU irq */
1630                 I915_WRITE(SDEIIR, pch_iir);
1631         }
1632 }
1633
1634 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1635 {
1636         struct drm_device *dev = (struct drm_device *) arg;
1637         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1638         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1639         irqreturn_t ret = IRQ_NONE;
1640
1641         atomic_inc(&dev_priv->irq_received);
1642
1643         /* We get interrupts on unclaimed registers, so check for this before we
1644          * do any I915_{READ,WRITE}. */
1645         intel_uncore_check_errors(dev);
1646
1647         /* disable master interrupt before clearing iir  */
1648         de_ier = I915_READ(DEIER);
1649         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1650         POSTING_READ(DEIER);
1651
1652         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1653          * interrupts will will be stored on its back queue, and then we'll be
1654          * able to process them after we restore SDEIER (as soon as we restore
1655          * it, we'll get an interrupt if SDEIIR still has something to process
1656          * due to its back queue). */
1657         if (!HAS_PCH_NOP(dev)) {
1658                 sde_ier = I915_READ(SDEIER);
1659                 I915_WRITE(SDEIER, 0);
1660                 POSTING_READ(SDEIER);
1661         }
1662
1663         gt_iir = I915_READ(GTIIR);
1664         if (gt_iir) {
1665                 if (INTEL_INFO(dev)->gen >= 6)
1666                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1667                 else
1668                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1669                 I915_WRITE(GTIIR, gt_iir);
1670                 ret = IRQ_HANDLED;
1671         }
1672
1673         de_iir = I915_READ(DEIIR);
1674         if (de_iir) {
1675                 if (INTEL_INFO(dev)->gen >= 7)
1676                         ivb_display_irq_handler(dev, de_iir);
1677                 else
1678                         ilk_display_irq_handler(dev, de_iir);
1679                 I915_WRITE(DEIIR, de_iir);
1680                 ret = IRQ_HANDLED;
1681         }
1682
1683         if (INTEL_INFO(dev)->gen >= 6) {
1684                 u32 pm_iir = I915_READ(GEN6_PMIIR);
1685                 if (pm_iir) {
1686                         gen6_rps_irq_handler(dev_priv, pm_iir);
1687                         I915_WRITE(GEN6_PMIIR, pm_iir);
1688                         ret = IRQ_HANDLED;
1689                 }
1690         }
1691
1692         I915_WRITE(DEIER, de_ier);
1693         POSTING_READ(DEIER);
1694         if (!HAS_PCH_NOP(dev)) {
1695                 I915_WRITE(SDEIER, sde_ier);
1696                 POSTING_READ(SDEIER);
1697         }
1698
1699         return ret;
1700 }
1701
1702 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1703                                bool reset_completed)
1704 {
1705         struct intel_ring_buffer *ring;
1706         int i;
1707
1708         /*
1709          * Notify all waiters for GPU completion events that reset state has
1710          * been changed, and that they need to restart their wait after
1711          * checking for potential errors (and bail out to drop locks if there is
1712          * a gpu reset pending so that i915_error_work_func can acquire them).
1713          */
1714
1715         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1716         for_each_ring(ring, dev_priv, i)
1717                 wake_up_all(&ring->irq_queue);
1718
1719         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1720         wake_up_all(&dev_priv->pending_flip_queue);
1721
1722         /*
1723          * Signal tasks blocked in i915_gem_wait_for_error that the pending
1724          * reset state is cleared.
1725          */
1726         if (reset_completed)
1727                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1728 }
1729
1730 /**
1731  * i915_error_work_func - do process context error handling work
1732  * @work: work struct
1733  *
1734  * Fire an error uevent so userspace can see that a hang or error
1735  * was detected.
1736  */
1737 static void i915_error_work_func(struct work_struct *work)
1738 {
1739         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1740                                                     work);
1741         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1742                                                     gpu_error);
1743         struct drm_device *dev = dev_priv->dev;
1744         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1745         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1746         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1747         int ret;
1748
1749         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1750
1751         /*
1752          * Note that there's only one work item which does gpu resets, so we
1753          * need not worry about concurrent gpu resets potentially incrementing
1754          * error->reset_counter twice. We only need to take care of another
1755          * racing irq/hangcheck declaring the gpu dead for a second time. A
1756          * quick check for that is good enough: schedule_work ensures the
1757          * correct ordering between hang detection and this work item, and since
1758          * the reset in-progress bit is only ever set by code outside of this
1759          * work we don't need to worry about any other races.
1760          */
1761         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1762                 DRM_DEBUG_DRIVER("resetting chip\n");
1763                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1764                                    reset_event);
1765
1766                 /*
1767                  * All state reset _must_ be completed before we update the
1768                  * reset counter, for otherwise waiters might miss the reset
1769                  * pending state and not properly drop locks, resulting in
1770                  * deadlocks with the reset work.
1771                  */
1772                 ret = i915_reset(dev);
1773
1774                 intel_display_handle_reset(dev);
1775
1776                 if (ret == 0) {
1777                         /*
1778                          * After all the gem state is reset, increment the reset
1779                          * counter and wake up everyone waiting for the reset to
1780                          * complete.
1781                          *
1782                          * Since unlock operations are a one-sided barrier only,
1783                          * we need to insert a barrier here to order any seqno
1784                          * updates before
1785                          * the counter increment.
1786                          */
1787                         smp_mb__before_atomic_inc();
1788                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1789
1790                         kobject_uevent_env(&dev->primary->kdev->kobj,
1791                                            KOBJ_CHANGE, reset_done_event);
1792                 } else {
1793                         atomic_set(&error->reset_counter, I915_WEDGED);
1794                 }
1795
1796                 /*
1797                  * Note: The wake_up also serves as a memory barrier so that
1798                  * waiters see the update value of the reset counter atomic_t.
1799                  */
1800                 i915_error_wake_up(dev_priv, true);
1801         }
1802 }
1803
1804 static void i915_report_and_clear_eir(struct drm_device *dev)
1805 {
1806         struct drm_i915_private *dev_priv = dev->dev_private;
1807         uint32_t instdone[I915_NUM_INSTDONE_REG];
1808         u32 eir = I915_READ(EIR);
1809         int pipe, i;
1810
1811         if (!eir)
1812                 return;
1813
1814         pr_err("render error detected, EIR: 0x%08x\n", eir);
1815
1816         i915_get_extra_instdone(dev, instdone);
1817
1818         if (IS_G4X(dev)) {
1819                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1820                         u32 ipeir = I915_READ(IPEIR_I965);
1821
1822                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1823                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1824                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1825                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1826                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1827                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1828                         I915_WRITE(IPEIR_I965, ipeir);
1829                         POSTING_READ(IPEIR_I965);
1830                 }
1831                 if (eir & GM45_ERROR_PAGE_TABLE) {
1832                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1833                         pr_err("page table error\n");
1834                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1835                         I915_WRITE(PGTBL_ER, pgtbl_err);
1836                         POSTING_READ(PGTBL_ER);
1837                 }
1838         }
1839
1840         if (!IS_GEN2(dev)) {
1841                 if (eir & I915_ERROR_PAGE_TABLE) {
1842                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1843                         pr_err("page table error\n");
1844                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1845                         I915_WRITE(PGTBL_ER, pgtbl_err);
1846                         POSTING_READ(PGTBL_ER);
1847                 }
1848         }
1849
1850         if (eir & I915_ERROR_MEMORY_REFRESH) {
1851                 pr_err("memory refresh error:\n");
1852                 for_each_pipe(pipe)
1853                         pr_err("pipe %c stat: 0x%08x\n",
1854                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1855                 /* pipestat has already been acked */
1856         }
1857         if (eir & I915_ERROR_INSTRUCTION) {
1858                 pr_err("instruction error\n");
1859                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1860                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1861                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1862                 if (INTEL_INFO(dev)->gen < 4) {
1863                         u32 ipeir = I915_READ(IPEIR);
1864
1865                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1866                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1867                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1868                         I915_WRITE(IPEIR, ipeir);
1869                         POSTING_READ(IPEIR);
1870                 } else {
1871                         u32 ipeir = I915_READ(IPEIR_I965);
1872
1873                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1874                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1875                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1876                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1877                         I915_WRITE(IPEIR_I965, ipeir);
1878                         POSTING_READ(IPEIR_I965);
1879                 }
1880         }
1881
1882         I915_WRITE(EIR, eir);
1883         POSTING_READ(EIR);
1884         eir = I915_READ(EIR);
1885         if (eir) {
1886                 /*
1887                  * some errors might have become stuck,
1888                  * mask them.
1889                  */
1890                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1891                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1892                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1893         }
1894 }
1895
1896 /**
1897  * i915_handle_error - handle an error interrupt
1898  * @dev: drm device
1899  *
1900  * Do some basic checking of regsiter state at error interrupt time and
1901  * dump it to the syslog.  Also call i915_capture_error_state() to make
1902  * sure we get a record and make it available in debugfs.  Fire a uevent
1903  * so userspace knows something bad happened (should trigger collection
1904  * of a ring dump etc.).
1905  */
1906 void i915_handle_error(struct drm_device *dev, bool wedged)
1907 {
1908         struct drm_i915_private *dev_priv = dev->dev_private;
1909
1910         i915_capture_error_state(dev);
1911         i915_report_and_clear_eir(dev);
1912
1913         if (wedged) {
1914                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1915                                 &dev_priv->gpu_error.reset_counter);
1916
1917                 /*
1918                  * Wakeup waiting processes so that the reset work function
1919                  * i915_error_work_func doesn't deadlock trying to grab various
1920                  * locks. By bumping the reset counter first, the woken
1921                  * processes will see a reset in progress and back off,
1922                  * releasing their locks and then wait for the reset completion.
1923                  * We must do this for _all_ gpu waiters that might hold locks
1924                  * that the reset work needs to acquire.
1925                  *
1926                  * Note: The wake_up serves as the required memory barrier to
1927                  * ensure that the waiters see the updated value of the reset
1928                  * counter atomic_t.
1929                  */
1930                 i915_error_wake_up(dev_priv, false);
1931         }
1932
1933         /*
1934          * Our reset work can grab modeset locks (since it needs to reset the
1935          * state of outstanding pagelips). Hence it must not be run on our own
1936          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1937          * code will deadlock.
1938          */
1939         schedule_work(&dev_priv->gpu_error.work);
1940 }
1941
1942 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1943 {
1944         drm_i915_private_t *dev_priv = dev->dev_private;
1945         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1946         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1947         struct drm_i915_gem_object *obj;
1948         struct intel_unpin_work *work;
1949         unsigned long flags;
1950         bool stall_detected;
1951
1952         /* Ignore early vblank irqs */
1953         if (intel_crtc == NULL)
1954                 return;
1955
1956         spin_lock_irqsave(&dev->event_lock, flags);
1957         work = intel_crtc->unpin_work;
1958
1959         if (work == NULL ||
1960             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1961             !work->enable_stall_check) {
1962                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1963                 spin_unlock_irqrestore(&dev->event_lock, flags);
1964                 return;
1965         }
1966
1967         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1968         obj = work->pending_flip_obj;
1969         if (INTEL_INFO(dev)->gen >= 4) {
1970                 int dspsurf = DSPSURF(intel_crtc->plane);
1971                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1972                                         i915_gem_obj_ggtt_offset(obj);
1973         } else {
1974                 int dspaddr = DSPADDR(intel_crtc->plane);
1975                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
1976                                                         crtc->y * crtc->fb->pitches[0] +
1977                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1978         }
1979
1980         spin_unlock_irqrestore(&dev->event_lock, flags);
1981
1982         if (stall_detected) {
1983                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1984                 intel_prepare_page_flip(dev, intel_crtc->plane);
1985         }
1986 }
1987
1988 /* Called from drm generic code, passed 'crtc' which
1989  * we use as a pipe index
1990  */
1991 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1992 {
1993         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1994         unsigned long irqflags;
1995
1996         if (!i915_pipe_enabled(dev, pipe))
1997                 return -EINVAL;
1998
1999         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2000         if (INTEL_INFO(dev)->gen >= 4)
2001                 i915_enable_pipestat(dev_priv, pipe,
2002                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2003         else
2004                 i915_enable_pipestat(dev_priv, pipe,
2005                                      PIPE_VBLANK_INTERRUPT_ENABLE);
2006
2007         /* maintain vblank delivery even in deep C-states */
2008         if (dev_priv->info->gen == 3)
2009                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2010         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2011
2012         return 0;
2013 }
2014
2015 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2016 {
2017         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2018         unsigned long irqflags;
2019         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2020                                                      DE_PIPE_VBLANK_ILK(pipe);
2021
2022         if (!i915_pipe_enabled(dev, pipe))
2023                 return -EINVAL;
2024
2025         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2026         ironlake_enable_display_irq(dev_priv, bit);
2027         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2028
2029         return 0;
2030 }
2031
2032 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2033 {
2034         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2035         unsigned long irqflags;
2036         u32 imr;
2037
2038         if (!i915_pipe_enabled(dev, pipe))
2039                 return -EINVAL;
2040
2041         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2042         imr = I915_READ(VLV_IMR);
2043         if (pipe == 0)
2044                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2045         else
2046                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2047         I915_WRITE(VLV_IMR, imr);
2048         i915_enable_pipestat(dev_priv, pipe,
2049                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
2050         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2051
2052         return 0;
2053 }
2054
2055 /* Called from drm generic code, passed 'crtc' which
2056  * we use as a pipe index
2057  */
2058 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2059 {
2060         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2061         unsigned long irqflags;
2062
2063         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2064         if (dev_priv->info->gen == 3)
2065                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2066
2067         i915_disable_pipestat(dev_priv, pipe,
2068                               PIPE_VBLANK_INTERRUPT_ENABLE |
2069                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2070         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2071 }
2072
2073 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2074 {
2075         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2076         unsigned long irqflags;
2077         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2078                                                      DE_PIPE_VBLANK_ILK(pipe);
2079
2080         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2081         ironlake_disable_display_irq(dev_priv, bit);
2082         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2083 }
2084
2085 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2086 {
2087         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2088         unsigned long irqflags;
2089         u32 imr;
2090
2091         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2092         i915_disable_pipestat(dev_priv, pipe,
2093                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2094         imr = I915_READ(VLV_IMR);
2095         if (pipe == 0)
2096                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2097         else
2098                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2099         I915_WRITE(VLV_IMR, imr);
2100         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2101 }
2102
2103 static u32
2104 ring_last_seqno(struct intel_ring_buffer *ring)
2105 {
2106         return list_entry(ring->request_list.prev,
2107                           struct drm_i915_gem_request, list)->seqno;
2108 }
2109
2110 static bool
2111 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2112 {
2113         return (list_empty(&ring->request_list) ||
2114                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2115 }
2116
2117 static struct intel_ring_buffer *
2118 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2119 {
2120         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2121         u32 cmd, ipehr, acthd, acthd_min;
2122
2123         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2124         if ((ipehr & ~(0x3 << 16)) !=
2125             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2126                 return NULL;
2127
2128         /* ACTHD is likely pointing to the dword after the actual command,
2129          * so scan backwards until we find the MBOX.
2130          */
2131         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2132         acthd_min = max((int)acthd - 3 * 4, 0);
2133         do {
2134                 cmd = ioread32(ring->virtual_start + acthd);
2135                 if (cmd == ipehr)
2136                         break;
2137
2138                 acthd -= 4;
2139                 if (acthd < acthd_min)
2140                         return NULL;
2141         } while (1);
2142
2143         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2144         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2145 }
2146
2147 static int semaphore_passed(struct intel_ring_buffer *ring)
2148 {
2149         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2150         struct intel_ring_buffer *signaller;
2151         u32 seqno, ctl;
2152
2153         ring->hangcheck.deadlock = true;
2154
2155         signaller = semaphore_waits_for(ring, &seqno);
2156         if (signaller == NULL || signaller->hangcheck.deadlock)
2157                 return -1;
2158
2159         /* cursory check for an unkickable deadlock */
2160         ctl = I915_READ_CTL(signaller);
2161         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2162                 return -1;
2163
2164         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2165 }
2166
2167 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2168 {
2169         struct intel_ring_buffer *ring;
2170         int i;
2171
2172         for_each_ring(ring, dev_priv, i)
2173                 ring->hangcheck.deadlock = false;
2174 }
2175
2176 static enum intel_ring_hangcheck_action
2177 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2178 {
2179         struct drm_device *dev = ring->dev;
2180         struct drm_i915_private *dev_priv = dev->dev_private;
2181         u32 tmp;
2182
2183         if (ring->hangcheck.acthd != acthd)
2184                 return HANGCHECK_ACTIVE;
2185
2186         if (IS_GEN2(dev))
2187                 return HANGCHECK_HUNG;
2188
2189         /* Is the chip hanging on a WAIT_FOR_EVENT?
2190          * If so we can simply poke the RB_WAIT bit
2191          * and break the hang. This should work on
2192          * all but the second generation chipsets.
2193          */
2194         tmp = I915_READ_CTL(ring);
2195         if (tmp & RING_WAIT) {
2196                 DRM_ERROR("Kicking stuck wait on %s\n",
2197                           ring->name);
2198                 i915_handle_error(dev, false);
2199                 I915_WRITE_CTL(ring, tmp);
2200                 return HANGCHECK_KICK;
2201         }
2202
2203         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2204                 switch (semaphore_passed(ring)) {
2205                 default:
2206                         return HANGCHECK_HUNG;
2207                 case 1:
2208                         DRM_ERROR("Kicking stuck semaphore on %s\n",
2209                                   ring->name);
2210                         i915_handle_error(dev, false);
2211                         I915_WRITE_CTL(ring, tmp);
2212                         return HANGCHECK_KICK;
2213                 case 0:
2214                         return HANGCHECK_WAIT;
2215                 }
2216         }
2217
2218         return HANGCHECK_HUNG;
2219 }
2220
2221 /**
2222  * This is called when the chip hasn't reported back with completed
2223  * batchbuffers in a long time. We keep track per ring seqno progress and
2224  * if there are no progress, hangcheck score for that ring is increased.
2225  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2226  * we kick the ring. If we see no progress on three subsequent calls
2227  * we assume chip is wedged and try to fix it by resetting the chip.
2228  */
2229 static void i915_hangcheck_elapsed(unsigned long data)
2230 {
2231         struct drm_device *dev = (struct drm_device *)data;
2232         drm_i915_private_t *dev_priv = dev->dev_private;
2233         struct intel_ring_buffer *ring;
2234         int i;
2235         int busy_count = 0, rings_hung = 0;
2236         bool stuck[I915_NUM_RINGS] = { 0 };
2237 #define BUSY 1
2238 #define KICK 5
2239 #define HUNG 20
2240 #define FIRE 30
2241
2242         if (!i915_enable_hangcheck)
2243                 return;
2244
2245         for_each_ring(ring, dev_priv, i) {
2246                 u32 seqno, acthd;
2247                 bool busy = true;
2248
2249                 semaphore_clear_deadlocks(dev_priv);
2250
2251                 seqno = ring->get_seqno(ring, false);
2252                 acthd = intel_ring_get_active_head(ring);
2253
2254                 if (ring->hangcheck.seqno == seqno) {
2255                         if (ring_idle(ring, seqno)) {
2256                                 ring->hangcheck.action = HANGCHECK_IDLE;
2257
2258                                 if (waitqueue_active(&ring->irq_queue)) {
2259                                         /* Issue a wake-up to catch stuck h/w. */
2260                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2261                                                 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2262                                                           ring->name);
2263                                                 wake_up_all(&ring->irq_queue);
2264                                         }
2265                                         /* Safeguard against driver failure */
2266                                         ring->hangcheck.score += BUSY;
2267                                 } else
2268                                         busy = false;
2269                         } else {
2270                                 /* We always increment the hangcheck score
2271                                  * if the ring is busy and still processing
2272                                  * the same request, so that no single request
2273                                  * can run indefinitely (such as a chain of
2274                                  * batches). The only time we do not increment
2275                                  * the hangcheck score on this ring, if this
2276                                  * ring is in a legitimate wait for another
2277                                  * ring. In that case the waiting ring is a
2278                                  * victim and we want to be sure we catch the
2279                                  * right culprit. Then every time we do kick
2280                                  * the ring, add a small increment to the
2281                                  * score so that we can catch a batch that is
2282                                  * being repeatedly kicked and so responsible
2283                                  * for stalling the machine.
2284                                  */
2285                                 ring->hangcheck.action = ring_stuck(ring,
2286                                                                     acthd);
2287
2288                                 switch (ring->hangcheck.action) {
2289                                 case HANGCHECK_IDLE:
2290                                 case HANGCHECK_WAIT:
2291                                         break;
2292                                 case HANGCHECK_ACTIVE:
2293                                         ring->hangcheck.score += BUSY;
2294                                         break;
2295                                 case HANGCHECK_KICK:
2296                                         ring->hangcheck.score += KICK;
2297                                         break;
2298                                 case HANGCHECK_HUNG:
2299                                         ring->hangcheck.score += HUNG;
2300                                         stuck[i] = true;
2301                                         break;
2302                                 }
2303                         }
2304                 } else {
2305                         ring->hangcheck.action = HANGCHECK_ACTIVE;
2306
2307                         /* Gradually reduce the count so that we catch DoS
2308                          * attempts across multiple batches.
2309                          */
2310                         if (ring->hangcheck.score > 0)
2311                                 ring->hangcheck.score--;
2312                 }
2313
2314                 ring->hangcheck.seqno = seqno;
2315                 ring->hangcheck.acthd = acthd;
2316                 busy_count += busy;
2317         }
2318
2319         for_each_ring(ring, dev_priv, i) {
2320                 if (ring->hangcheck.score > FIRE) {
2321                         DRM_INFO("%s on %s\n",
2322                                  stuck[i] ? "stuck" : "no progress",
2323                                  ring->name);
2324                         rings_hung++;
2325                 }
2326         }
2327
2328         if (rings_hung)
2329                 return i915_handle_error(dev, true);
2330
2331         if (busy_count)
2332                 /* Reset timer case chip hangs without another request
2333                  * being added */
2334                 i915_queue_hangcheck(dev);
2335 }
2336
2337 void i915_queue_hangcheck(struct drm_device *dev)
2338 {
2339         struct drm_i915_private *dev_priv = dev->dev_private;
2340         if (!i915_enable_hangcheck)
2341                 return;
2342
2343         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2344                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2345 }
2346
2347 static void ibx_irq_preinstall(struct drm_device *dev)
2348 {
2349         struct drm_i915_private *dev_priv = dev->dev_private;
2350
2351         if (HAS_PCH_NOP(dev))
2352                 return;
2353
2354         /* south display irq */
2355         I915_WRITE(SDEIMR, 0xffffffff);
2356         /*
2357          * SDEIER is also touched by the interrupt handler to work around missed
2358          * PCH interrupts. Hence we can't update it after the interrupt handler
2359          * is enabled - instead we unconditionally enable all PCH interrupt
2360          * sources here, but then only unmask them as needed with SDEIMR.
2361          */
2362         I915_WRITE(SDEIER, 0xffffffff);
2363         POSTING_READ(SDEIER);
2364 }
2365
2366 static void gen5_gt_irq_preinstall(struct drm_device *dev)
2367 {
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369
2370         /* and GT */
2371         I915_WRITE(GTIMR, 0xffffffff);
2372         I915_WRITE(GTIER, 0x0);
2373         POSTING_READ(GTIER);
2374
2375         if (INTEL_INFO(dev)->gen >= 6) {
2376                 /* and PM */
2377                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2378                 I915_WRITE(GEN6_PMIER, 0x0);
2379                 POSTING_READ(GEN6_PMIER);
2380         }
2381 }
2382
2383 /* drm_dma.h hooks
2384 */
2385 static void ironlake_irq_preinstall(struct drm_device *dev)
2386 {
2387         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2388
2389         atomic_set(&dev_priv->irq_received, 0);
2390
2391         I915_WRITE(HWSTAM, 0xeffe);
2392
2393         I915_WRITE(DEIMR, 0xffffffff);
2394         I915_WRITE(DEIER, 0x0);
2395         POSTING_READ(DEIER);
2396
2397         gen5_gt_irq_preinstall(dev);
2398
2399         ibx_irq_preinstall(dev);
2400 }
2401
2402 static void valleyview_irq_preinstall(struct drm_device *dev)
2403 {
2404         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2405         int pipe;
2406
2407         atomic_set(&dev_priv->irq_received, 0);
2408
2409         /* VLV magic */
2410         I915_WRITE(VLV_IMR, 0);
2411         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2412         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2413         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2414
2415         /* and GT */
2416         I915_WRITE(GTIIR, I915_READ(GTIIR));
2417         I915_WRITE(GTIIR, I915_READ(GTIIR));
2418
2419         gen5_gt_irq_preinstall(dev);
2420
2421         I915_WRITE(DPINVGTT, 0xff);
2422
2423         I915_WRITE(PORT_HOTPLUG_EN, 0);
2424         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2425         for_each_pipe(pipe)
2426                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2427         I915_WRITE(VLV_IIR, 0xffffffff);
2428         I915_WRITE(VLV_IMR, 0xffffffff);
2429         I915_WRITE(VLV_IER, 0x0);
2430         POSTING_READ(VLV_IER);
2431 }
2432
2433 static void ibx_hpd_irq_setup(struct drm_device *dev)
2434 {
2435         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2436         struct drm_mode_config *mode_config = &dev->mode_config;
2437         struct intel_encoder *intel_encoder;
2438         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2439
2440         if (HAS_PCH_IBX(dev)) {
2441                 hotplug_irqs = SDE_HOTPLUG_MASK;
2442                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2443                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2444                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2445         } else {
2446                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2447                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2448                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2449                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2450         }
2451
2452         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2453
2454         /*
2455          * Enable digital hotplug on the PCH, and configure the DP short pulse
2456          * duration to 2ms (which is the minimum in the Display Port spec)
2457          *
2458          * This register is the same on all known PCH chips.
2459          */
2460         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2461         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2462         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2463         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2464         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2465         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2466 }
2467
2468 static void ibx_irq_postinstall(struct drm_device *dev)
2469 {
2470         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2471         u32 mask;
2472
2473         if (HAS_PCH_NOP(dev))
2474                 return;
2475
2476         if (HAS_PCH_IBX(dev)) {
2477                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2478                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2479         } else {
2480                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2481
2482                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2483         }
2484
2485         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2486         I915_WRITE(SDEIMR, ~mask);
2487 }
2488
2489 static void gen5_gt_irq_postinstall(struct drm_device *dev)
2490 {
2491         struct drm_i915_private *dev_priv = dev->dev_private;
2492         u32 pm_irqs, gt_irqs;
2493
2494         pm_irqs = gt_irqs = 0;
2495
2496         dev_priv->gt_irq_mask = ~0;
2497         if (HAS_L3_DPF(dev)) {
2498                 /* L3 parity interrupt is always unmasked. */
2499                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2500                 gt_irqs |= GT_PARITY_ERROR(dev);
2501         }
2502
2503         gt_irqs |= GT_RENDER_USER_INTERRUPT;
2504         if (IS_GEN5(dev)) {
2505                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2506                            ILK_BSD_USER_INTERRUPT;
2507         } else {
2508                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2509         }
2510
2511         I915_WRITE(GTIIR, I915_READ(GTIIR));
2512         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2513         I915_WRITE(GTIER, gt_irqs);
2514         POSTING_READ(GTIER);
2515
2516         if (INTEL_INFO(dev)->gen >= 6) {
2517                 pm_irqs |= GEN6_PM_RPS_EVENTS;
2518
2519                 if (HAS_VEBOX(dev))
2520                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2521
2522                 dev_priv->pm_irq_mask = 0xffffffff;
2523                 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2524                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2525                 I915_WRITE(GEN6_PMIER, pm_irqs);
2526                 POSTING_READ(GEN6_PMIER);
2527         }
2528 }
2529
2530 static int ironlake_irq_postinstall(struct drm_device *dev)
2531 {
2532         unsigned long irqflags;
2533         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2534         u32 display_mask, extra_mask;
2535
2536         if (INTEL_INFO(dev)->gen >= 7) {
2537                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2538                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2539                                 DE_PLANEB_FLIP_DONE_IVB |
2540                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2541                                 DE_ERR_INT_IVB);
2542                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2543                               DE_PIPEA_VBLANK_IVB);
2544
2545                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2546         } else {
2547                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2548                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2549                                 DE_AUX_CHANNEL_A |
2550                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2551                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2552                                 DE_POISON);
2553                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2554         }
2555
2556         dev_priv->irq_mask = ~display_mask;
2557
2558         /* should always can generate irq */
2559         I915_WRITE(DEIIR, I915_READ(DEIIR));
2560         I915_WRITE(DEIMR, dev_priv->irq_mask);
2561         I915_WRITE(DEIER, display_mask | extra_mask);
2562         POSTING_READ(DEIER);
2563
2564         gen5_gt_irq_postinstall(dev);
2565
2566         ibx_irq_postinstall(dev);
2567
2568         if (IS_IRONLAKE_M(dev)) {
2569                 /* Enable PCU event interrupts
2570                  *
2571                  * spinlocking not required here for correctness since interrupt
2572                  * setup is guaranteed to run in single-threaded context. But we
2573                  * need it to make the assert_spin_locked happy. */
2574                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2575                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2576                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2577         }
2578
2579         return 0;
2580 }
2581
2582 static int valleyview_irq_postinstall(struct drm_device *dev)
2583 {
2584         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2585         u32 enable_mask;
2586         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2587                 PIPE_CRC_DONE_ENABLE;
2588         unsigned long irqflags;
2589
2590         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2591         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2592                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2593                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2594                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2595
2596         /*
2597          *Leave vblank interrupts masked initially.  enable/disable will
2598          * toggle them based on usage.
2599          */
2600         dev_priv->irq_mask = (~enable_mask) |
2601                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2602                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2603
2604         I915_WRITE(PORT_HOTPLUG_EN, 0);
2605         POSTING_READ(PORT_HOTPLUG_EN);
2606
2607         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2608         I915_WRITE(VLV_IER, enable_mask);
2609         I915_WRITE(VLV_IIR, 0xffffffff);
2610         I915_WRITE(PIPESTAT(0), 0xffff);
2611         I915_WRITE(PIPESTAT(1), 0xffff);
2612         POSTING_READ(VLV_IER);
2613
2614         /* Interrupt setup is already guaranteed to be single-threaded, this is
2615          * just to make the assert_spin_locked check happy. */
2616         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2617         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2618         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2619         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2620         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2621
2622         I915_WRITE(VLV_IIR, 0xffffffff);
2623         I915_WRITE(VLV_IIR, 0xffffffff);
2624
2625         gen5_gt_irq_postinstall(dev);
2626
2627         /* ack & enable invalid PTE error interrupts */
2628 #if 0 /* FIXME: add support to irq handler for checking these bits */
2629         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2630         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2631 #endif
2632
2633         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2634
2635         return 0;
2636 }
2637
2638 static void valleyview_irq_uninstall(struct drm_device *dev)
2639 {
2640         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2641         int pipe;
2642
2643         if (!dev_priv)
2644                 return;
2645
2646         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2647
2648         for_each_pipe(pipe)
2649                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2650
2651         I915_WRITE(HWSTAM, 0xffffffff);
2652         I915_WRITE(PORT_HOTPLUG_EN, 0);
2653         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2654         for_each_pipe(pipe)
2655                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2656         I915_WRITE(VLV_IIR, 0xffffffff);
2657         I915_WRITE(VLV_IMR, 0xffffffff);
2658         I915_WRITE(VLV_IER, 0x0);
2659         POSTING_READ(VLV_IER);
2660 }
2661
2662 static void ironlake_irq_uninstall(struct drm_device *dev)
2663 {
2664         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2665
2666         if (!dev_priv)
2667                 return;
2668
2669         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2670
2671         I915_WRITE(HWSTAM, 0xffffffff);
2672
2673         I915_WRITE(DEIMR, 0xffffffff);
2674         I915_WRITE(DEIER, 0x0);
2675         I915_WRITE(DEIIR, I915_READ(DEIIR));
2676         if (IS_GEN7(dev))
2677                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2678
2679         I915_WRITE(GTIMR, 0xffffffff);
2680         I915_WRITE(GTIER, 0x0);
2681         I915_WRITE(GTIIR, I915_READ(GTIIR));
2682
2683         if (HAS_PCH_NOP(dev))
2684                 return;
2685
2686         I915_WRITE(SDEIMR, 0xffffffff);
2687         I915_WRITE(SDEIER, 0x0);
2688         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2689         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2690                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2691 }
2692
2693 static void i8xx_irq_preinstall(struct drm_device * dev)
2694 {
2695         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2696         int pipe;
2697
2698         atomic_set(&dev_priv->irq_received, 0);
2699
2700         for_each_pipe(pipe)
2701                 I915_WRITE(PIPESTAT(pipe), 0);
2702         I915_WRITE16(IMR, 0xffff);
2703         I915_WRITE16(IER, 0x0);
2704         POSTING_READ16(IER);
2705 }
2706
2707 static int i8xx_irq_postinstall(struct drm_device *dev)
2708 {
2709         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2710         unsigned long irqflags;
2711
2712         I915_WRITE16(EMR,
2713                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2714
2715         /* Unmask the interrupts that we always want on. */
2716         dev_priv->irq_mask =
2717                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2718                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2719                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2720                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2721                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2722         I915_WRITE16(IMR, dev_priv->irq_mask);
2723
2724         I915_WRITE16(IER,
2725                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2726                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2727                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2728                      I915_USER_INTERRUPT);
2729         POSTING_READ16(IER);
2730
2731         /* Interrupt setup is already guaranteed to be single-threaded, this is
2732          * just to make the assert_spin_locked check happy. */
2733         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2734         i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
2735         i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
2736         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2737
2738         return 0;
2739 }
2740
2741 /*
2742  * Returns true when a page flip has completed.
2743  */
2744 static bool i8xx_handle_vblank(struct drm_device *dev,
2745                                int pipe, u16 iir)
2746 {
2747         drm_i915_private_t *dev_priv = dev->dev_private;
2748         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2749
2750         if (!drm_handle_vblank(dev, pipe))
2751                 return false;
2752
2753         if ((iir & flip_pending) == 0)
2754                 return false;
2755
2756         intel_prepare_page_flip(dev, pipe);
2757
2758         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2759          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2760          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2761          * the flip is completed (no longer pending). Since this doesn't raise
2762          * an interrupt per se, we watch for the change at vblank.
2763          */
2764         if (I915_READ16(ISR) & flip_pending)
2765                 return false;
2766
2767         intel_finish_page_flip(dev, pipe);
2768
2769         return true;
2770 }
2771
2772 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2773 {
2774         struct drm_device *dev = (struct drm_device *) arg;
2775         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2776         u16 iir, new_iir;
2777         u32 pipe_stats[2];
2778         unsigned long irqflags;
2779         int pipe;
2780         u16 flip_mask =
2781                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2782                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2783
2784         atomic_inc(&dev_priv->irq_received);
2785
2786         iir = I915_READ16(IIR);
2787         if (iir == 0)
2788                 return IRQ_NONE;
2789
2790         while (iir & ~flip_mask) {
2791                 /* Can't rely on pipestat interrupt bit in iir as it might
2792                  * have been cleared after the pipestat interrupt was received.
2793                  * It doesn't set the bit in iir again, but it still produces
2794                  * interrupts (for non-MSI).
2795                  */
2796                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2797                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2798                         i915_handle_error(dev, false);
2799
2800                 for_each_pipe(pipe) {
2801                         int reg = PIPESTAT(pipe);
2802                         pipe_stats[pipe] = I915_READ(reg);
2803
2804                         /*
2805                          * Clear the PIPE*STAT regs before the IIR
2806                          */
2807                         if (pipe_stats[pipe] & 0x8000ffff) {
2808                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2809                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2810                                                          pipe_name(pipe));
2811                                 I915_WRITE(reg, pipe_stats[pipe]);
2812                         }
2813                 }
2814                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2815
2816                 I915_WRITE16(IIR, iir & ~flip_mask);
2817                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2818
2819                 i915_update_dri1_breadcrumb(dev);
2820
2821                 if (iir & I915_USER_INTERRUPT)
2822                         notify_ring(dev, &dev_priv->ring[RCS]);
2823
2824                 for_each_pipe(pipe) {
2825                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2826                             i8xx_handle_vblank(dev, pipe, iir))
2827                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2828
2829                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2830                                 i9xx_pipe_crc_irq_handler(dev, pipe);
2831                 }
2832
2833                 iir = new_iir;
2834         }
2835
2836         return IRQ_HANDLED;
2837 }
2838
2839 static void i8xx_irq_uninstall(struct drm_device * dev)
2840 {
2841         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2842         int pipe;
2843
2844         for_each_pipe(pipe) {
2845                 /* Clear enable bits; then clear status bits */
2846                 I915_WRITE(PIPESTAT(pipe), 0);
2847                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2848         }
2849         I915_WRITE16(IMR, 0xffff);
2850         I915_WRITE16(IER, 0x0);
2851         I915_WRITE16(IIR, I915_READ16(IIR));
2852 }
2853
2854 static void i915_irq_preinstall(struct drm_device * dev)
2855 {
2856         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2857         int pipe;
2858
2859         atomic_set(&dev_priv->irq_received, 0);
2860
2861         if (I915_HAS_HOTPLUG(dev)) {
2862                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2863                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2864         }
2865
2866         I915_WRITE16(HWSTAM, 0xeffe);
2867         for_each_pipe(pipe)
2868                 I915_WRITE(PIPESTAT(pipe), 0);
2869         I915_WRITE(IMR, 0xffffffff);
2870         I915_WRITE(IER, 0x0);
2871         POSTING_READ(IER);
2872 }
2873
2874 static int i915_irq_postinstall(struct drm_device *dev)
2875 {
2876         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2877         u32 enable_mask;
2878         unsigned long irqflags;
2879
2880         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2881
2882         /* Unmask the interrupts that we always want on. */
2883         dev_priv->irq_mask =
2884                 ~(I915_ASLE_INTERRUPT |
2885                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2886                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2887                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2888                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2889                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2890
2891         enable_mask =
2892                 I915_ASLE_INTERRUPT |
2893                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2894                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2895                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2896                 I915_USER_INTERRUPT;
2897
2898         if (I915_HAS_HOTPLUG(dev)) {
2899                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2900                 POSTING_READ(PORT_HOTPLUG_EN);
2901
2902                 /* Enable in IER... */
2903                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2904                 /* and unmask in IMR */
2905                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2906         }
2907
2908         I915_WRITE(IMR, dev_priv->irq_mask);
2909         I915_WRITE(IER, enable_mask);
2910         POSTING_READ(IER);
2911
2912         i915_enable_asle_pipestat(dev);
2913
2914         /* Interrupt setup is already guaranteed to be single-threaded, this is
2915          * just to make the assert_spin_locked check happy. */
2916         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2917         i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
2918         i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
2919         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2920
2921         return 0;
2922 }
2923
2924 /*
2925  * Returns true when a page flip has completed.
2926  */
2927 static bool i915_handle_vblank(struct drm_device *dev,
2928                                int plane, int pipe, u32 iir)
2929 {
2930         drm_i915_private_t *dev_priv = dev->dev_private;
2931         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2932
2933         if (!drm_handle_vblank(dev, pipe))
2934                 return false;
2935
2936         if ((iir & flip_pending) == 0)
2937                 return false;
2938
2939         intel_prepare_page_flip(dev, plane);
2940
2941         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2942          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2943          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2944          * the flip is completed (no longer pending). Since this doesn't raise
2945          * an interrupt per se, we watch for the change at vblank.
2946          */
2947         if (I915_READ(ISR) & flip_pending)
2948                 return false;
2949
2950         intel_finish_page_flip(dev, pipe);
2951
2952         return true;
2953 }
2954
2955 static irqreturn_t i915_irq_handler(int irq, void *arg)
2956 {
2957         struct drm_device *dev = (struct drm_device *) arg;
2958         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2959         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2960         unsigned long irqflags;
2961         u32 flip_mask =
2962                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2963                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2964         int pipe, ret = IRQ_NONE;
2965
2966         atomic_inc(&dev_priv->irq_received);
2967
2968         iir = I915_READ(IIR);
2969         do {
2970                 bool irq_received = (iir & ~flip_mask) != 0;
2971                 bool blc_event = false;
2972
2973                 /* Can't rely on pipestat interrupt bit in iir as it might
2974                  * have been cleared after the pipestat interrupt was received.
2975                  * It doesn't set the bit in iir again, but it still produces
2976                  * interrupts (for non-MSI).
2977                  */
2978                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2979                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2980                         i915_handle_error(dev, false);
2981
2982                 for_each_pipe(pipe) {
2983                         int reg = PIPESTAT(pipe);
2984                         pipe_stats[pipe] = I915_READ(reg);
2985
2986                         /* Clear the PIPE*STAT regs before the IIR */
2987                         if (pipe_stats[pipe] & 0x8000ffff) {
2988                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2989                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2990                                                          pipe_name(pipe));
2991                                 I915_WRITE(reg, pipe_stats[pipe]);
2992                                 irq_received = true;
2993                         }
2994                 }
2995                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2996
2997                 if (!irq_received)
2998                         break;
2999
3000                 /* Consume port.  Then clear IIR or we'll miss events */
3001                 if ((I915_HAS_HOTPLUG(dev)) &&
3002                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3003                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3004                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3005
3006                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3007                                   hotplug_status);
3008
3009                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3010
3011                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3012                         POSTING_READ(PORT_HOTPLUG_STAT);
3013                 }
3014
3015                 I915_WRITE(IIR, iir & ~flip_mask);
3016                 new_iir = I915_READ(IIR); /* Flush posted writes */
3017
3018                 if (iir & I915_USER_INTERRUPT)
3019                         notify_ring(dev, &dev_priv->ring[RCS]);
3020
3021                 for_each_pipe(pipe) {
3022                         int plane = pipe;
3023                         if (IS_MOBILE(dev))
3024                                 plane = !plane;
3025
3026                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3027                             i915_handle_vblank(dev, plane, pipe, iir))
3028                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3029
3030                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3031                                 blc_event = true;
3032
3033                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3034                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3035                 }
3036
3037                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3038                         intel_opregion_asle_intr(dev);
3039
3040                 /* With MSI, interrupts are only generated when iir
3041                  * transitions from zero to nonzero.  If another bit got
3042                  * set while we were handling the existing iir bits, then
3043                  * we would never get another interrupt.
3044                  *
3045                  * This is fine on non-MSI as well, as if we hit this path
3046                  * we avoid exiting the interrupt handler only to generate
3047                  * another one.
3048                  *
3049                  * Note that for MSI this could cause a stray interrupt report
3050                  * if an interrupt landed in the time between writing IIR and
3051                  * the posting read.  This should be rare enough to never
3052                  * trigger the 99% of 100,000 interrupts test for disabling
3053                  * stray interrupts.
3054                  */
3055                 ret = IRQ_HANDLED;
3056                 iir = new_iir;
3057         } while (iir & ~flip_mask);
3058
3059         i915_update_dri1_breadcrumb(dev);
3060
3061         return ret;
3062 }
3063
3064 static void i915_irq_uninstall(struct drm_device * dev)
3065 {
3066         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3067         int pipe;
3068
3069         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3070
3071         if (I915_HAS_HOTPLUG(dev)) {
3072                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3073                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3074         }
3075
3076         I915_WRITE16(HWSTAM, 0xffff);
3077         for_each_pipe(pipe) {
3078                 /* Clear enable bits; then clear status bits */
3079                 I915_WRITE(PIPESTAT(pipe), 0);
3080                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3081         }
3082         I915_WRITE(IMR, 0xffffffff);
3083         I915_WRITE(IER, 0x0);
3084
3085         I915_WRITE(IIR, I915_READ(IIR));
3086 }
3087
3088 static void i965_irq_preinstall(struct drm_device * dev)
3089 {
3090         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3091         int pipe;
3092
3093         atomic_set(&dev_priv->irq_received, 0);
3094
3095         I915_WRITE(PORT_HOTPLUG_EN, 0);
3096         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3097
3098         I915_WRITE(HWSTAM, 0xeffe);
3099         for_each_pipe(pipe)
3100                 I915_WRITE(PIPESTAT(pipe), 0);
3101         I915_WRITE(IMR, 0xffffffff);
3102         I915_WRITE(IER, 0x0);
3103         POSTING_READ(IER);
3104 }
3105
3106 static int i965_irq_postinstall(struct drm_device *dev)
3107 {
3108         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3109         u32 enable_mask;
3110         u32 error_mask;
3111         unsigned long irqflags;
3112
3113         /* Unmask the interrupts that we always want on. */
3114         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3115                                I915_DISPLAY_PORT_INTERRUPT |
3116                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3117                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3118                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3119                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3120                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3121
3122         enable_mask = ~dev_priv->irq_mask;
3123         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3124                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3125         enable_mask |= I915_USER_INTERRUPT;
3126
3127         if (IS_G4X(dev))
3128                 enable_mask |= I915_BSD_USER_INTERRUPT;
3129
3130         /* Interrupt setup is already guaranteed to be single-threaded, this is
3131          * just to make the assert_spin_locked check happy. */
3132         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3133         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3134         i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
3135         i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
3136         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3137
3138         /*
3139          * Enable some error detection, note the instruction error mask
3140          * bit is reserved, so we leave it masked.
3141          */
3142         if (IS_G4X(dev)) {
3143                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3144                                GM45_ERROR_MEM_PRIV |
3145                                GM45_ERROR_CP_PRIV |
3146                                I915_ERROR_MEMORY_REFRESH);
3147         } else {
3148                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3149                                I915_ERROR_MEMORY_REFRESH);
3150         }
3151         I915_WRITE(EMR, error_mask);
3152
3153         I915_WRITE(IMR, dev_priv->irq_mask);
3154         I915_WRITE(IER, enable_mask);
3155         POSTING_READ(IER);
3156
3157         I915_WRITE(PORT_HOTPLUG_EN, 0);
3158         POSTING_READ(PORT_HOTPLUG_EN);
3159
3160         i915_enable_asle_pipestat(dev);
3161
3162         return 0;
3163 }
3164
3165 static void i915_hpd_irq_setup(struct drm_device *dev)
3166 {
3167         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3168         struct drm_mode_config *mode_config = &dev->mode_config;
3169         struct intel_encoder *intel_encoder;
3170         u32 hotplug_en;
3171
3172         assert_spin_locked(&dev_priv->irq_lock);
3173
3174         if (I915_HAS_HOTPLUG(dev)) {
3175                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3176                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3177                 /* Note HDMI and DP share hotplug bits */
3178                 /* enable bits are the same for all generations */
3179                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3180                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3181                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3182                 /* Programming the CRT detection parameters tends
3183                    to generate a spurious hotplug event about three
3184                    seconds later.  So just do it once.
3185                 */
3186                 if (IS_G4X(dev))
3187                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3188                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3189                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3190
3191                 /* Ignore TV since it's buggy */
3192                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3193         }
3194 }
3195
3196 static irqreturn_t i965_irq_handler(int irq, void *arg)
3197 {
3198         struct drm_device *dev = (struct drm_device *) arg;
3199         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3200         u32 iir, new_iir;
3201         u32 pipe_stats[I915_MAX_PIPES];
3202         unsigned long irqflags;
3203         int irq_received;
3204         int ret = IRQ_NONE, pipe;
3205         u32 flip_mask =
3206                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3207                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3208
3209         atomic_inc(&dev_priv->irq_received);
3210
3211         iir = I915_READ(IIR);
3212
3213         for (;;) {
3214                 bool blc_event = false;
3215
3216                 irq_received = (iir & ~flip_mask) != 0;
3217
3218                 /* Can't rely on pipestat interrupt bit in iir as it might
3219                  * have been cleared after the pipestat interrupt was received.
3220                  * It doesn't set the bit in iir again, but it still produces
3221                  * interrupts (for non-MSI).
3222                  */
3223                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3224                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3225                         i915_handle_error(dev, false);
3226
3227                 for_each_pipe(pipe) {
3228                         int reg = PIPESTAT(pipe);
3229                         pipe_stats[pipe] = I915_READ(reg);
3230
3231                         /*
3232                          * Clear the PIPE*STAT regs before the IIR
3233                          */
3234                         if (pipe_stats[pipe] & 0x8000ffff) {
3235                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3236                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3237                                                          pipe_name(pipe));
3238                                 I915_WRITE(reg, pipe_stats[pipe]);
3239                                 irq_received = 1;
3240                         }
3241                 }
3242                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3243
3244                 if (!irq_received)
3245                         break;
3246
3247                 ret = IRQ_HANDLED;
3248
3249                 /* Consume port.  Then clear IIR or we'll miss events */
3250                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3251                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3252                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3253                                                                   HOTPLUG_INT_STATUS_G4X :
3254                                                                   HOTPLUG_INT_STATUS_I915);
3255
3256                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3257                                   hotplug_status);
3258
3259                         intel_hpd_irq_handler(dev, hotplug_trigger,
3260                                               IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3261
3262                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3263                         I915_READ(PORT_HOTPLUG_STAT);
3264                 }
3265
3266                 I915_WRITE(IIR, iir & ~flip_mask);
3267                 new_iir = I915_READ(IIR); /* Flush posted writes */
3268
3269                 if (iir & I915_USER_INTERRUPT)
3270                         notify_ring(dev, &dev_priv->ring[RCS]);
3271                 if (iir & I915_BSD_USER_INTERRUPT)
3272                         notify_ring(dev, &dev_priv->ring[VCS]);
3273
3274                 for_each_pipe(pipe) {
3275                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3276                             i915_handle_vblank(dev, pipe, pipe, iir))
3277                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3278
3279                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3280                                 blc_event = true;
3281
3282                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3283                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3284                 }
3285
3286
3287                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3288                         intel_opregion_asle_intr(dev);
3289
3290                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3291                         gmbus_irq_handler(dev);
3292
3293                 /* With MSI, interrupts are only generated when iir
3294                  * transitions from zero to nonzero.  If another bit got
3295                  * set while we were handling the existing iir bits, then
3296                  * we would never get another interrupt.
3297                  *
3298                  * This is fine on non-MSI as well, as if we hit this path
3299                  * we avoid exiting the interrupt handler only to generate
3300                  * another one.
3301                  *
3302                  * Note that for MSI this could cause a stray interrupt report
3303                  * if an interrupt landed in the time between writing IIR and
3304                  * the posting read.  This should be rare enough to never
3305                  * trigger the 99% of 100,000 interrupts test for disabling
3306                  * stray interrupts.
3307                  */
3308                 iir = new_iir;
3309         }
3310
3311         i915_update_dri1_breadcrumb(dev);
3312
3313         return ret;
3314 }
3315
3316 static void i965_irq_uninstall(struct drm_device * dev)
3317 {
3318         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3319         int pipe;
3320
3321         if (!dev_priv)
3322                 return;
3323
3324         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3325
3326         I915_WRITE(PORT_HOTPLUG_EN, 0);
3327         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3328
3329         I915_WRITE(HWSTAM, 0xffffffff);
3330         for_each_pipe(pipe)
3331                 I915_WRITE(PIPESTAT(pipe), 0);
3332         I915_WRITE(IMR, 0xffffffff);
3333         I915_WRITE(IER, 0x0);
3334
3335         for_each_pipe(pipe)
3336                 I915_WRITE(PIPESTAT(pipe),
3337                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3338         I915_WRITE(IIR, I915_READ(IIR));
3339 }
3340
3341 static void i915_reenable_hotplug_timer_func(unsigned long data)
3342 {
3343         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3344         struct drm_device *dev = dev_priv->dev;
3345         struct drm_mode_config *mode_config = &dev->mode_config;
3346         unsigned long irqflags;
3347         int i;
3348
3349         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3350         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3351                 struct drm_connector *connector;
3352
3353                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3354                         continue;
3355
3356                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3357
3358                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3359                         struct intel_connector *intel_connector = to_intel_connector(connector);
3360
3361                         if (intel_connector->encoder->hpd_pin == i) {
3362                                 if (connector->polled != intel_connector->polled)
3363                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3364                                                          drm_get_connector_name(connector));
3365                                 connector->polled = intel_connector->polled;
3366                                 if (!connector->polled)
3367                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3368                         }
3369                 }
3370         }
3371         if (dev_priv->display.hpd_irq_setup)
3372                 dev_priv->display.hpd_irq_setup(dev);
3373         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3374 }
3375
3376 void intel_irq_init(struct drm_device *dev)
3377 {
3378         struct drm_i915_private *dev_priv = dev->dev_private;
3379
3380         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3381         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3382         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3383         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3384
3385         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3386                     i915_hangcheck_elapsed,
3387                     (unsigned long) dev);
3388         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3389                     (unsigned long) dev_priv);
3390
3391         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3392
3393         if (IS_GEN2(dev)) {
3394                 dev->max_vblank_count = 0;
3395                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3396         } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3397                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3398                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3399         } else {
3400                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3401                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3402         }
3403
3404         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3405                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3406                 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3407         }
3408
3409         if (IS_VALLEYVIEW(dev)) {
3410                 dev->driver->irq_handler = valleyview_irq_handler;
3411                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3412                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3413                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3414                 dev->driver->enable_vblank = valleyview_enable_vblank;
3415                 dev->driver->disable_vblank = valleyview_disable_vblank;
3416                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3417         } else if (HAS_PCH_SPLIT(dev)) {
3418                 dev->driver->irq_handler = ironlake_irq_handler;
3419                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3420                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3421                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3422                 dev->driver->enable_vblank = ironlake_enable_vblank;
3423                 dev->driver->disable_vblank = ironlake_disable_vblank;
3424                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3425         } else {
3426                 if (INTEL_INFO(dev)->gen == 2) {
3427                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3428                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3429                         dev->driver->irq_handler = i8xx_irq_handler;
3430                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3431                 } else if (INTEL_INFO(dev)->gen == 3) {
3432                         dev->driver->irq_preinstall = i915_irq_preinstall;
3433                         dev->driver->irq_postinstall = i915_irq_postinstall;
3434                         dev->driver->irq_uninstall = i915_irq_uninstall;
3435                         dev->driver->irq_handler = i915_irq_handler;
3436                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3437                 } else {
3438                         dev->driver->irq_preinstall = i965_irq_preinstall;
3439                         dev->driver->irq_postinstall = i965_irq_postinstall;
3440                         dev->driver->irq_uninstall = i965_irq_uninstall;
3441                         dev->driver->irq_handler = i965_irq_handler;
3442                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3443                 }
3444                 dev->driver->enable_vblank = i915_enable_vblank;
3445                 dev->driver->disable_vblank = i915_disable_vblank;
3446         }
3447 }
3448
3449 void intel_hpd_init(struct drm_device *dev)
3450 {
3451         struct drm_i915_private *dev_priv = dev->dev_private;
3452         struct drm_mode_config *mode_config = &dev->mode_config;
3453         struct drm_connector *connector;
3454         unsigned long irqflags;
3455         int i;
3456
3457         for (i = 1; i < HPD_NUM_PINS; i++) {
3458                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3459                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3460         }
3461         list_for_each_entry(connector, &mode_config->connector_list, head) {
3462                 struct intel_connector *intel_connector = to_intel_connector(connector);
3463                 connector->polled = intel_connector->polled;
3464                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3465                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3466         }
3467
3468         /* Interrupt setup is already guaranteed to be single-threaded, this is
3469          * just to make the assert_spin_locked checks happy. */
3470         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3471         if (dev_priv->display.hpd_irq_setup)
3472                 dev_priv->display.hpd_irq_setup(dev);
3473         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3474 }
3475
3476 /* Disable interrupts so we can allow Package C8+. */
3477 void hsw_pc8_disable_interrupts(struct drm_device *dev)
3478 {
3479         struct drm_i915_private *dev_priv = dev->dev_private;
3480         unsigned long irqflags;
3481
3482         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3483
3484         dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3485         dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3486         dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3487         dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3488         dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3489
3490         ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3491         ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3492         ilk_disable_gt_irq(dev_priv, 0xffffffff);
3493         snb_disable_pm_irq(dev_priv, 0xffffffff);
3494
3495         dev_priv->pc8.irqs_disabled = true;
3496
3497         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3498 }
3499
3500 /* Restore interrupts so we can recover from Package C8+. */
3501 void hsw_pc8_restore_interrupts(struct drm_device *dev)
3502 {
3503         struct drm_i915_private *dev_priv = dev->dev_private;
3504         unsigned long irqflags;
3505         uint32_t val, expected;
3506
3507         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3508
3509         val = I915_READ(DEIMR);
3510         expected = ~DE_PCH_EVENT_IVB;
3511         WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3512
3513         val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3514         expected = ~SDE_HOTPLUG_MASK_CPT;
3515         WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3516              val, expected);
3517
3518         val = I915_READ(GTIMR);
3519         expected = 0xffffffff;
3520         WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3521
3522         val = I915_READ(GEN6_PMIMR);
3523         expected = 0xffffffff;
3524         WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3525              expected);
3526
3527         dev_priv->pc8.irqs_disabled = false;
3528
3529         ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3530         ibx_enable_display_interrupt(dev_priv,
3531                                      ~dev_priv->pc8.regsave.sdeimr &
3532                                      ~SDE_HOTPLUG_MASK_CPT);
3533         ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3534         snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3535         I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3536
3537         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3538 }