1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
112 I915_WRITE((reg), 0xffffffff); \
114 I915_WRITE((reg), 0xffffffff); \
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
133 /* For display hotplug interrupt */
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
137 assert_spin_locked(&dev_priv->irq_lock);
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
152 assert_spin_locked(&dev_priv->irq_lock);
154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
174 assert_spin_locked(&dev_priv->irq_lock);
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
187 ilk_update_gt_irq(dev_priv, mask, mask);
190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
192 ilk_update_gt_irq(dev_priv, mask, 0);
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
207 assert_spin_locked(&dev_priv->irq_lock);
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
225 snb_update_pm_irq(dev_priv, mask, mask);
228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
230 snb_update_pm_irq(dev_priv, mask, 0);
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
239 assert_spin_locked(&dev_priv->irq_lock);
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
244 if (crtc->cpu_fifo_underrun_disabled)
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
257 * Copied from the snb function, updated with relevant register offsets
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
265 assert_spin_locked(&dev_priv->irq_lock);
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283 bdw_update_pm_irq(dev_priv, mask, mask);
286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
288 bdw_update_pm_irq(dev_priv, mask, 0);
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
293 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct intel_crtc *crtc;
297 assert_spin_locked(&dev_priv->irq_lock);
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
302 if (crtc->pch_fifo_underrun_disabled)
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
321 if (crtc->cpu_fifo_underrun_disabled)
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
339 bool enable, bool old)
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
345 assert_spin_locked(&dev_priv->irq_lock);
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
364 ironlake_enable_display_irq(dev_priv, bit);
366 ironlake_disable_display_irq(dev_priv, bit);
369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
371 bool enable, bool old)
373 struct drm_i915_private *dev_priv = dev->dev_private;
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
377 if (!ivb_can_enable_err_int(dev))
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
395 struct drm_i915_private *dev_priv = dev->dev_private;
397 assert_spin_locked(&dev_priv->irq_lock);
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
421 assert_spin_locked(&dev_priv->irq_lock);
423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
429 #define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431 #define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
443 ibx_enable_display_interrupt(dev_priv, bit);
445 ibx_disable_display_interrupt(dev_priv, bit);
448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
452 struct drm_i915_private *dev_priv = dev->dev_private;
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
458 if (!cpt_can_enable_serr_int(dev))
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
485 * Returns the previous state of underrun reporting.
487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
495 assert_spin_locked(&dev_priv->irq_lock);
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
515 struct drm_i915_private *dev_priv = dev->dev_private;
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533 return !intel_crtc->cpu_fifo_underrun_disabled;
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
548 * Returns the previous state of underrun reporting.
550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
588 u32 reg = PIPESTAT(pipe);
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
591 assert_spin_locked(&dev_priv->irq_lock);
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
599 if ((pipestat & enable_mask) == enable_mask)
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
617 assert_spin_locked(&dev_priv->irq_lock);
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
625 if ((pipestat & enable_mask) == 0)
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
630 pipestat &= ~enable_mask;
631 I915_WRITE(reg, pipestat);
635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
637 u32 enable_mask = status_mask << 16;
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
673 enable_mask = status_mask << 16;
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
687 enable_mask = status_mask << 16;
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 static void i915_enable_asle_pipestat(struct drm_device *dev)
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
713 * i915_pipe_enabled - check if a pipe is enabled
715 * @pipe: pipe to check
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
724 struct drm_i915_private *dev_priv = dev->dev_private;
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
731 return intel_crtc->active;
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
741 * Assumptions about the fictitious mode used in this example:
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
756 * | | start of vsync:
757 * | | generate vsync interrupt
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
777 * vbs = vblank_start (number)
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
789 /* Gen2 doesn't have a hardware frame counter */
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
831 /* Convert to pixel count */
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
874 return I915_READ(reg);
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
901 return (position + crtc->scanline_offset) % vtotal;
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
916 unsigned long irqflags;
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
947 /* Get optional system timestamp before query. */
949 *stime = ktime_get();
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
955 position = __intel_get_crtc_scanline(intel_crtc);
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
963 /* convert to pixel counts */
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
977 if (position >= vtotal)
978 position = vtotal - 1;
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
989 position = (position + htotal - hsync_start) % vtotal;
992 /* Get optional system timestamp after query. */
994 *etime = ktime_get();
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1000 in_vbl = position >= vbl_start && position < vbl_end;
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1011 position += vtotal - vbl_end;
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1023 ret |= DRM_SCANOUTPOS_INVBL;
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1043 struct timeval *vblank_time,
1046 struct drm_crtc *crtc;
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1075 enum drm_connector_status old_status;
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1093 static void i915_digport_work_func(struct work_struct *work)
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1117 if (long_port_mask & (1 << i)) {
1120 } else if (short_port_mask & (1 << i))
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1141 * Handle hotplug events outside the interrupt handler proper.
1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1145 static void i915_hotplug_work_func(struct work_struct *work)
1147 struct drm_i915_private *dev_priv =
1148 container_of(work, struct drm_i915_private, hotplug_work);
1149 struct drm_device *dev = dev_priv->dev;
1150 struct drm_mode_config *mode_config = &dev->mode_config;
1151 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false;
1156 bool changed = false;
1159 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1164 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0;
1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1170 intel_encoder = intel_connector->encoder;
1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1173 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT;
1180 hpd_disabled = true;
1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector->name, intel_encoder->hpd_pin);
1187 /* if there were no outputs to poll, poll was disabled,
1188 * therefore make sure it's enabled when disabling HPD on
1189 * some connectors */
1191 drm_kms_helper_poll_enable(dev);
1192 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1202 intel_encoder = intel_connector->encoder;
1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1204 if (intel_encoder->hot_plug)
1205 intel_encoder->hot_plug(intel_encoder);
1206 if (intel_hpd_irq_event(dev, connector))
1210 mutex_unlock(&mode_config->mutex);
1213 drm_kms_helper_hotplug_event(dev);
1216 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 busy_up, busy_down, max_avg, min_avg;
1222 spin_lock(&mchdev_lock);
1224 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1226 new_delay = dev_priv->ips.cur_delay;
1228 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1229 busy_up = I915_READ(RCPREVBSYTUPAVG);
1230 busy_down = I915_READ(RCPREVBSYTDNAVG);
1231 max_avg = I915_READ(RCBMAXAVG);
1232 min_avg = I915_READ(RCBMINAVG);
1234 /* Handle RCS change request from hw */
1235 if (busy_up > max_avg) {
1236 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1237 new_delay = dev_priv->ips.cur_delay - 1;
1238 if (new_delay < dev_priv->ips.max_delay)
1239 new_delay = dev_priv->ips.max_delay;
1240 } else if (busy_down < min_avg) {
1241 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1242 new_delay = dev_priv->ips.cur_delay + 1;
1243 if (new_delay > dev_priv->ips.min_delay)
1244 new_delay = dev_priv->ips.min_delay;
1247 if (ironlake_set_drps(dev, new_delay))
1248 dev_priv->ips.cur_delay = new_delay;
1250 spin_unlock(&mchdev_lock);
1255 static void notify_ring(struct drm_device *dev,
1256 struct intel_engine_cs *ring)
1258 if (!intel_ring_initialized(ring))
1261 trace_i915_gem_request_complete(ring);
1263 if (drm_core_check_feature(dev, DRIVER_MODESET))
1264 intel_notify_mmio_flip(ring);
1266 wake_up_all(&ring->irq_queue);
1267 i915_queue_hangcheck(dev);
1270 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1271 struct intel_rps_ei *rps_ei)
1273 u32 cz_ts, cz_freq_khz;
1274 u32 render_count, media_count;
1275 u32 elapsed_render, elapsed_media, elapsed_time;
1278 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1279 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1281 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1282 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1284 if (rps_ei->cz_clock == 0) {
1285 rps_ei->cz_clock = cz_ts;
1286 rps_ei->render_c0 = render_count;
1287 rps_ei->media_c0 = media_count;
1289 return dev_priv->rps.cur_freq;
1292 elapsed_time = cz_ts - rps_ei->cz_clock;
1293 rps_ei->cz_clock = cz_ts;
1295 elapsed_render = render_count - rps_ei->render_c0;
1296 rps_ei->render_c0 = render_count;
1298 elapsed_media = media_count - rps_ei->media_c0;
1299 rps_ei->media_c0 = media_count;
1301 /* Convert all the counters into common unit of milli sec */
1302 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1303 elapsed_render /= cz_freq_khz;
1304 elapsed_media /= cz_freq_khz;
1307 * Calculate overall C0 residency percentage
1308 * only if elapsed time is non zero
1312 ((max(elapsed_render, elapsed_media) * 100)
1320 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1321 * busy-ness calculated from C0 counters of render & media power wells
1322 * @dev_priv: DRM device private
1325 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1327 u32 residency_C0_up = 0, residency_C0_down = 0;
1330 dev_priv->rps.ei_interrupt_count++;
1332 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1335 if (dev_priv->rps.up_ei.cz_clock == 0) {
1336 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1337 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1338 return dev_priv->rps.cur_freq;
1343 * To down throttle, C0 residency should be less than down threshold
1344 * for continous EI intervals. So calculate down EI counters
1345 * once in VLV_INT_COUNT_FOR_DOWN_EI
1347 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1349 dev_priv->rps.ei_interrupt_count = 0;
1351 residency_C0_down = vlv_c0_residency(dev_priv,
1352 &dev_priv->rps.down_ei);
1354 residency_C0_up = vlv_c0_residency(dev_priv,
1355 &dev_priv->rps.up_ei);
1358 new_delay = dev_priv->rps.cur_freq;
1360 adj = dev_priv->rps.last_adj;
1361 /* C0 residency is greater than UP threshold. Increase Frequency */
1362 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1368 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1369 new_delay = dev_priv->rps.cur_freq + adj;
1372 * For better performance, jump directly
1373 * to RPe if we're below it.
1375 if (new_delay < dev_priv->rps.efficient_freq)
1376 new_delay = dev_priv->rps.efficient_freq;
1378 } else if (!dev_priv->rps.ei_interrupt_count &&
1379 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1385 * This means, C0 residency is less than down threshold over
1386 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1388 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1389 new_delay = dev_priv->rps.cur_freq + adj;
1395 static void gen6_pm_rps_work(struct work_struct *work)
1397 struct drm_i915_private *dev_priv =
1398 container_of(work, struct drm_i915_private, rps.work);
1402 spin_lock_irq(&dev_priv->irq_lock);
1403 pm_iir = dev_priv->rps.pm_iir;
1404 dev_priv->rps.pm_iir = 0;
1405 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1406 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1408 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1409 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1411 spin_unlock_irq(&dev_priv->irq_lock);
1413 /* Make sure we didn't queue anything we're not going to process. */
1414 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1416 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1419 mutex_lock(&dev_priv->rps.hw_lock);
1421 adj = dev_priv->rps.last_adj;
1422 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1426 /* CHV needs even encode values */
1427 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1429 new_delay = dev_priv->rps.cur_freq + adj;
1432 * For better performance, jump directly
1433 * to RPe if we're below it.
1435 if (new_delay < dev_priv->rps.efficient_freq)
1436 new_delay = dev_priv->rps.efficient_freq;
1437 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1438 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1439 new_delay = dev_priv->rps.efficient_freq;
1441 new_delay = dev_priv->rps.min_freq_softlimit;
1443 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1444 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1445 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1449 /* CHV needs even encode values */
1450 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1452 new_delay = dev_priv->rps.cur_freq + adj;
1453 } else { /* unknown event */
1454 new_delay = dev_priv->rps.cur_freq;
1457 /* sysfs frequency interfaces may have snuck in while servicing the
1460 new_delay = clamp_t(int, new_delay,
1461 dev_priv->rps.min_freq_softlimit,
1462 dev_priv->rps.max_freq_softlimit);
1464 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1466 if (IS_VALLEYVIEW(dev_priv->dev))
1467 valleyview_set_rps(dev_priv->dev, new_delay);
1469 gen6_set_rps(dev_priv->dev, new_delay);
1471 mutex_unlock(&dev_priv->rps.hw_lock);
1476 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1478 * @work: workqueue struct
1480 * Doesn't actually do anything except notify userspace. As a consequence of
1481 * this event, userspace should try to remap the bad rows since statistically
1482 * it is likely the same row is more likely to go bad again.
1484 static void ivybridge_parity_work(struct work_struct *work)
1486 struct drm_i915_private *dev_priv =
1487 container_of(work, struct drm_i915_private, l3_parity.error_work);
1488 u32 error_status, row, bank, subbank;
1489 char *parity_event[6];
1491 unsigned long flags;
1494 /* We must turn off DOP level clock gating to access the L3 registers.
1495 * In order to prevent a get/put style interface, acquire struct mutex
1496 * any time we access those registers.
1498 mutex_lock(&dev_priv->dev->struct_mutex);
1500 /* If we've screwed up tracking, just let the interrupt fire again */
1501 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1504 misccpctl = I915_READ(GEN7_MISCCPCTL);
1505 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1506 POSTING_READ(GEN7_MISCCPCTL);
1508 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1512 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1515 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1517 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1519 error_status = I915_READ(reg);
1520 row = GEN7_PARITY_ERROR_ROW(error_status);
1521 bank = GEN7_PARITY_ERROR_BANK(error_status);
1522 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1524 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1527 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1528 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1529 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1530 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1531 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1532 parity_event[5] = NULL;
1534 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1535 KOBJ_CHANGE, parity_event);
1537 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1538 slice, row, bank, subbank);
1540 kfree(parity_event[4]);
1541 kfree(parity_event[3]);
1542 kfree(parity_event[2]);
1543 kfree(parity_event[1]);
1546 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1549 WARN_ON(dev_priv->l3_parity.which_slice);
1550 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1551 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1552 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1554 mutex_unlock(&dev_priv->dev->struct_mutex);
1557 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1559 struct drm_i915_private *dev_priv = dev->dev_private;
1561 if (!HAS_L3_DPF(dev))
1564 spin_lock(&dev_priv->irq_lock);
1565 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1566 spin_unlock(&dev_priv->irq_lock);
1568 iir &= GT_PARITY_ERROR(dev);
1569 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1570 dev_priv->l3_parity.which_slice |= 1 << 1;
1572 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1573 dev_priv->l3_parity.which_slice |= 1 << 0;
1575 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1578 static void ilk_gt_irq_handler(struct drm_device *dev,
1579 struct drm_i915_private *dev_priv,
1583 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1584 notify_ring(dev, &dev_priv->ring[RCS]);
1585 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1586 notify_ring(dev, &dev_priv->ring[VCS]);
1589 static void snb_gt_irq_handler(struct drm_device *dev,
1590 struct drm_i915_private *dev_priv,
1595 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1596 notify_ring(dev, &dev_priv->ring[RCS]);
1597 if (gt_iir & GT_BSD_USER_INTERRUPT)
1598 notify_ring(dev, &dev_priv->ring[VCS]);
1599 if (gt_iir & GT_BLT_USER_INTERRUPT)
1600 notify_ring(dev, &dev_priv->ring[BCS]);
1602 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1603 GT_BSD_CS_ERROR_INTERRUPT |
1604 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1605 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1609 if (gt_iir & GT_PARITY_ERROR(dev))
1610 ivybridge_parity_error_irq_handler(dev, gt_iir);
1613 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1615 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1618 spin_lock(&dev_priv->irq_lock);
1619 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1620 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1621 spin_unlock(&dev_priv->irq_lock);
1623 queue_work(dev_priv->wq, &dev_priv->rps.work);
1626 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1627 struct drm_i915_private *dev_priv,
1630 struct intel_engine_cs *ring;
1633 irqreturn_t ret = IRQ_NONE;
1635 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1636 tmp = I915_READ(GEN8_GT_IIR(0));
1638 I915_WRITE(GEN8_GT_IIR(0), tmp);
1641 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1642 ring = &dev_priv->ring[RCS];
1643 if (rcs & GT_RENDER_USER_INTERRUPT)
1644 notify_ring(dev, ring);
1645 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1646 intel_execlists_handle_ctx_events(ring);
1648 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1649 ring = &dev_priv->ring[BCS];
1650 if (bcs & GT_RENDER_USER_INTERRUPT)
1651 notify_ring(dev, ring);
1652 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1653 intel_execlists_handle_ctx_events(ring);
1655 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1658 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1659 tmp = I915_READ(GEN8_GT_IIR(1));
1661 I915_WRITE(GEN8_GT_IIR(1), tmp);
1664 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1665 ring = &dev_priv->ring[VCS];
1666 if (vcs & GT_RENDER_USER_INTERRUPT)
1667 notify_ring(dev, ring);
1668 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1669 intel_execlists_handle_ctx_events(ring);
1671 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1672 ring = &dev_priv->ring[VCS2];
1673 if (vcs & GT_RENDER_USER_INTERRUPT)
1674 notify_ring(dev, ring);
1675 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1676 intel_execlists_handle_ctx_events(ring);
1678 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1681 if (master_ctl & GEN8_GT_PM_IRQ) {
1682 tmp = I915_READ(GEN8_GT_IIR(2));
1683 if (tmp & dev_priv->pm_rps_events) {
1684 I915_WRITE(GEN8_GT_IIR(2),
1685 tmp & dev_priv->pm_rps_events);
1687 gen8_rps_irq_handler(dev_priv, tmp);
1689 DRM_ERROR("The master control interrupt lied (PM)!\n");
1692 if (master_ctl & GEN8_GT_VECS_IRQ) {
1693 tmp = I915_READ(GEN8_GT_IIR(3));
1695 I915_WRITE(GEN8_GT_IIR(3), tmp);
1698 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1699 ring = &dev_priv->ring[VECS];
1700 if (vcs & GT_RENDER_USER_INTERRUPT)
1701 notify_ring(dev, ring);
1702 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1703 intel_execlists_handle_ctx_events(ring);
1705 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1711 #define HPD_STORM_DETECT_PERIOD 1000
1712 #define HPD_STORM_THRESHOLD 5
1714 static int ilk_port_to_hotplug_shift(enum port port)
1730 static int g4x_port_to_hotplug_shift(enum port port)
1746 static inline enum port get_port_from_pin(enum hpd_pin pin)
1756 return PORT_A; /* no hpd */
1760 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1761 u32 hotplug_trigger,
1762 u32 dig_hotplug_reg,
1765 struct drm_i915_private *dev_priv = dev->dev_private;
1768 bool storm_detected = false;
1769 bool queue_dig = false, queue_hp = false;
1771 u32 dig_port_mask = 0;
1773 if (!hotplug_trigger)
1776 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1777 hotplug_trigger, dig_hotplug_reg);
1779 spin_lock(&dev_priv->irq_lock);
1780 for (i = 1; i < HPD_NUM_PINS; i++) {
1781 if (!(hpd[i] & hotplug_trigger))
1784 port = get_port_from_pin(i);
1785 if (port && dev_priv->hpd_irq_port[port]) {
1789 dig_shift = g4x_port_to_hotplug_shift(port);
1790 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1792 dig_shift = ilk_port_to_hotplug_shift(port);
1793 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1798 long_hpd ? "long" : "short");
1799 /* for long HPD pulses we want to have the digital queue happen,
1800 but we still want HPD storm detection to function. */
1802 dev_priv->long_hpd_port_mask |= (1 << port);
1803 dig_port_mask |= hpd[i];
1805 /* for short HPD just trigger the digital queue */
1806 dev_priv->short_hpd_port_mask |= (1 << port);
1807 hotplug_trigger &= ~hpd[i];
1813 for (i = 1; i < HPD_NUM_PINS; i++) {
1814 if (hpd[i] & hotplug_trigger &&
1815 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1817 * On GMCH platforms the interrupt mask bits only
1818 * prevent irq generation, not the setting of the
1819 * hotplug bits itself. So only WARN about unexpected
1820 * interrupts on saner platforms.
1822 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1823 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1824 hotplug_trigger, i, hpd[i]);
1829 if (!(hpd[i] & hotplug_trigger) ||
1830 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1833 if (!(dig_port_mask & hpd[i])) {
1834 dev_priv->hpd_event_bits |= (1 << i);
1838 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1839 dev_priv->hpd_stats[i].hpd_last_jiffies
1840 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1841 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1842 dev_priv->hpd_stats[i].hpd_cnt = 0;
1843 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1844 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1845 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1846 dev_priv->hpd_event_bits &= ~(1 << i);
1847 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1848 storm_detected = true;
1850 dev_priv->hpd_stats[i].hpd_cnt++;
1851 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1852 dev_priv->hpd_stats[i].hpd_cnt);
1857 dev_priv->display.hpd_irq_setup(dev);
1858 spin_unlock(&dev_priv->irq_lock);
1861 * Our hotplug handler can grab modeset locks (by calling down into the
1862 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1863 * queue for otherwise the flush_work in the pageflip code will
1867 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1869 schedule_work(&dev_priv->hotplug_work);
1872 static void gmbus_irq_handler(struct drm_device *dev)
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1876 wake_up_all(&dev_priv->gmbus_wait_queue);
1879 static void dp_aux_irq_handler(struct drm_device *dev)
1881 struct drm_i915_private *dev_priv = dev->dev_private;
1883 wake_up_all(&dev_priv->gmbus_wait_queue);
1886 #if defined(CONFIG_DEBUG_FS)
1887 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1888 uint32_t crc0, uint32_t crc1,
1889 uint32_t crc2, uint32_t crc3,
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1893 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1894 struct intel_pipe_crc_entry *entry;
1897 spin_lock(&pipe_crc->lock);
1899 if (!pipe_crc->entries) {
1900 spin_unlock(&pipe_crc->lock);
1901 DRM_ERROR("spurious interrupt\n");
1905 head = pipe_crc->head;
1906 tail = pipe_crc->tail;
1908 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1909 spin_unlock(&pipe_crc->lock);
1910 DRM_ERROR("CRC buffer overflowing\n");
1914 entry = &pipe_crc->entries[head];
1916 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1917 entry->crc[0] = crc0;
1918 entry->crc[1] = crc1;
1919 entry->crc[2] = crc2;
1920 entry->crc[3] = crc3;
1921 entry->crc[4] = crc4;
1923 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1924 pipe_crc->head = head;
1926 spin_unlock(&pipe_crc->lock);
1928 wake_up_interruptible(&pipe_crc->wq);
1932 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1933 uint32_t crc0, uint32_t crc1,
1934 uint32_t crc2, uint32_t crc3,
1939 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1941 struct drm_i915_private *dev_priv = dev->dev_private;
1943 display_pipe_crc_irq_handler(dev, pipe,
1944 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1948 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1950 struct drm_i915_private *dev_priv = dev->dev_private;
1952 display_pipe_crc_irq_handler(dev, pipe,
1953 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1954 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1955 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1956 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1957 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1960 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1962 struct drm_i915_private *dev_priv = dev->dev_private;
1963 uint32_t res1, res2;
1965 if (INTEL_INFO(dev)->gen >= 3)
1966 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1970 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1971 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1975 display_pipe_crc_irq_handler(dev, pipe,
1976 I915_READ(PIPE_CRC_RES_RED(pipe)),
1977 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1978 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1982 /* The RPS events need forcewake, so we add them to a work queue and mask their
1983 * IMR bits until the work is done. Other interrupts can be processed without
1984 * the work queue. */
1985 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1987 if (pm_iir & dev_priv->pm_rps_events) {
1988 spin_lock(&dev_priv->irq_lock);
1989 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1990 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1991 spin_unlock(&dev_priv->irq_lock);
1993 queue_work(dev_priv->wq, &dev_priv->rps.work);
1996 if (HAS_VEBOX(dev_priv->dev)) {
1997 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1998 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
2000 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
2001 i915_handle_error(dev_priv->dev, false,
2002 "VEBOX CS error interrupt 0x%08x",
2008 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
2010 if (!drm_handle_vblank(dev, pipe))
2016 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2018 struct drm_i915_private *dev_priv = dev->dev_private;
2019 u32 pipe_stats[I915_MAX_PIPES] = { };
2022 spin_lock(&dev_priv->irq_lock);
2023 for_each_pipe(pipe) {
2025 u32 mask, iir_bit = 0;
2028 * PIPESTAT bits get signalled even when the interrupt is
2029 * disabled with the mask bits, and some of the status bits do
2030 * not generate interrupts at all (like the underrun bit). Hence
2031 * we need to be careful that we only handle what we want to
2035 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2036 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2040 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2043 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2046 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2050 mask |= dev_priv->pipestat_irq_mask[pipe];
2055 reg = PIPESTAT(pipe);
2056 mask |= PIPESTAT_INT_ENABLE_MASK;
2057 pipe_stats[pipe] = I915_READ(reg) & mask;
2060 * Clear the PIPE*STAT regs before the IIR
2062 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2063 PIPESTAT_INT_STATUS_MASK))
2064 I915_WRITE(reg, pipe_stats[pipe]);
2066 spin_unlock(&dev_priv->irq_lock);
2068 for_each_pipe(pipe) {
2069 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2070 intel_pipe_handle_vblank(dev, pipe);
2072 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2073 intel_prepare_page_flip(dev, pipe);
2074 intel_finish_page_flip(dev, pipe);
2077 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2078 i9xx_pipe_crc_irq_handler(dev, pipe);
2080 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2081 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2082 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2085 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2086 gmbus_irq_handler(dev);
2089 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2094 if (hotplug_status) {
2095 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2097 * Make sure hotplug status is cleared before we clear IIR, or else we
2098 * may miss hotplug events.
2100 POSTING_READ(PORT_HOTPLUG_STAT);
2103 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2105 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2107 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2109 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2112 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2113 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2114 dp_aux_irq_handler(dev);
2118 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2120 struct drm_device *dev = arg;
2121 struct drm_i915_private *dev_priv = dev->dev_private;
2122 u32 iir, gt_iir, pm_iir;
2123 irqreturn_t ret = IRQ_NONE;
2126 /* Find, clear, then process each source of interrupt */
2128 gt_iir = I915_READ(GTIIR);
2130 I915_WRITE(GTIIR, gt_iir);
2132 pm_iir = I915_READ(GEN6_PMIIR);
2134 I915_WRITE(GEN6_PMIIR, pm_iir);
2136 iir = I915_READ(VLV_IIR);
2138 /* Consume port before clearing IIR or we'll miss events */
2139 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2140 i9xx_hpd_irq_handler(dev);
2141 I915_WRITE(VLV_IIR, iir);
2144 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2150 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2152 gen6_rps_irq_handler(dev_priv, pm_iir);
2153 /* Call regardless, as some status bits might not be
2154 * signalled in iir */
2155 valleyview_pipestat_irq_handler(dev, iir);
2162 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2164 struct drm_device *dev = arg;
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 master_ctl, iir;
2167 irqreturn_t ret = IRQ_NONE;
2170 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2171 iir = I915_READ(VLV_IIR);
2173 if (master_ctl == 0 && iir == 0)
2178 I915_WRITE(GEN8_MASTER_IRQ, 0);
2180 /* Find, clear, then process each source of interrupt */
2183 /* Consume port before clearing IIR or we'll miss events */
2184 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2185 i9xx_hpd_irq_handler(dev);
2186 I915_WRITE(VLV_IIR, iir);
2189 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2191 /* Call regardless, as some status bits might not be
2192 * signalled in iir */
2193 valleyview_pipestat_irq_handler(dev, iir);
2195 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2196 POSTING_READ(GEN8_MASTER_IRQ);
2202 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2204 struct drm_i915_private *dev_priv = dev->dev_private;
2206 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2207 u32 dig_hotplug_reg;
2209 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2210 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2212 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2214 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2215 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2216 SDE_AUDIO_POWER_SHIFT);
2217 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2221 if (pch_iir & SDE_AUX_MASK)
2222 dp_aux_irq_handler(dev);
2224 if (pch_iir & SDE_GMBUS)
2225 gmbus_irq_handler(dev);
2227 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2228 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2230 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2231 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2233 if (pch_iir & SDE_POISON)
2234 DRM_ERROR("PCH poison interrupt\n");
2236 if (pch_iir & SDE_FDI_MASK)
2238 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2240 I915_READ(FDI_RX_IIR(pipe)));
2242 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2243 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2245 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2246 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2248 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2249 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2251 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2253 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2254 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2256 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2259 static void ivb_err_int_handler(struct drm_device *dev)
2261 struct drm_i915_private *dev_priv = dev->dev_private;
2262 u32 err_int = I915_READ(GEN7_ERR_INT);
2265 if (err_int & ERR_INT_POISON)
2266 DRM_ERROR("Poison interrupt\n");
2268 for_each_pipe(pipe) {
2269 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2270 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2272 DRM_ERROR("Pipe %c FIFO underrun\n",
2276 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2277 if (IS_IVYBRIDGE(dev))
2278 ivb_pipe_crc_irq_handler(dev, pipe);
2280 hsw_pipe_crc_irq_handler(dev, pipe);
2284 I915_WRITE(GEN7_ERR_INT, err_int);
2287 static void cpt_serr_int_handler(struct drm_device *dev)
2289 struct drm_i915_private *dev_priv = dev->dev_private;
2290 u32 serr_int = I915_READ(SERR_INT);
2292 if (serr_int & SERR_INT_POISON)
2293 DRM_ERROR("PCH poison interrupt\n");
2295 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2296 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2298 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2300 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2301 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2303 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2305 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2306 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2308 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2310 I915_WRITE(SERR_INT, serr_int);
2313 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2315 struct drm_i915_private *dev_priv = dev->dev_private;
2317 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2318 u32 dig_hotplug_reg;
2320 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2321 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2323 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2325 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2326 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2327 SDE_AUDIO_POWER_SHIFT_CPT);
2328 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2332 if (pch_iir & SDE_AUX_MASK_CPT)
2333 dp_aux_irq_handler(dev);
2335 if (pch_iir & SDE_GMBUS_CPT)
2336 gmbus_irq_handler(dev);
2338 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2339 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2341 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2342 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2344 if (pch_iir & SDE_FDI_MASK_CPT)
2346 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2348 I915_READ(FDI_RX_IIR(pipe)));
2350 if (pch_iir & SDE_ERROR_CPT)
2351 cpt_serr_int_handler(dev);
2354 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2359 if (de_iir & DE_AUX_CHANNEL_A)
2360 dp_aux_irq_handler(dev);
2362 if (de_iir & DE_GSE)
2363 intel_opregion_asle_intr(dev);
2365 if (de_iir & DE_POISON)
2366 DRM_ERROR("Poison interrupt\n");
2368 for_each_pipe(pipe) {
2369 if (de_iir & DE_PIPE_VBLANK(pipe))
2370 intel_pipe_handle_vblank(dev, pipe);
2372 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2373 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2374 DRM_ERROR("Pipe %c FIFO underrun\n",
2377 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2378 i9xx_pipe_crc_irq_handler(dev, pipe);
2380 /* plane/pipes map 1:1 on ilk+ */
2381 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2382 intel_prepare_page_flip(dev, pipe);
2383 intel_finish_page_flip_plane(dev, pipe);
2387 /* check event from PCH */
2388 if (de_iir & DE_PCH_EVENT) {
2389 u32 pch_iir = I915_READ(SDEIIR);
2391 if (HAS_PCH_CPT(dev))
2392 cpt_irq_handler(dev, pch_iir);
2394 ibx_irq_handler(dev, pch_iir);
2396 /* should clear PCH hotplug event before clear CPU irq */
2397 I915_WRITE(SDEIIR, pch_iir);
2400 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2401 ironlake_rps_change_irq_handler(dev);
2404 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2406 struct drm_i915_private *dev_priv = dev->dev_private;
2409 if (de_iir & DE_ERR_INT_IVB)
2410 ivb_err_int_handler(dev);
2412 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2413 dp_aux_irq_handler(dev);
2415 if (de_iir & DE_GSE_IVB)
2416 intel_opregion_asle_intr(dev);
2418 for_each_pipe(pipe) {
2419 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2420 intel_pipe_handle_vblank(dev, pipe);
2422 /* plane/pipes map 1:1 on ilk+ */
2423 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2424 intel_prepare_page_flip(dev, pipe);
2425 intel_finish_page_flip_plane(dev, pipe);
2429 /* check event from PCH */
2430 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2431 u32 pch_iir = I915_READ(SDEIIR);
2433 cpt_irq_handler(dev, pch_iir);
2435 /* clear PCH hotplug event before clear CPU irq */
2436 I915_WRITE(SDEIIR, pch_iir);
2441 * To handle irqs with the minimum potential races with fresh interrupts, we:
2442 * 1 - Disable Master Interrupt Control.
2443 * 2 - Find the source(s) of the interrupt.
2444 * 3 - Clear the Interrupt Identity bits (IIR).
2445 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2446 * 5 - Re-enable Master Interrupt Control.
2448 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2450 struct drm_device *dev = arg;
2451 struct drm_i915_private *dev_priv = dev->dev_private;
2452 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2453 irqreturn_t ret = IRQ_NONE;
2455 /* We get interrupts on unclaimed registers, so check for this before we
2456 * do any I915_{READ,WRITE}. */
2457 intel_uncore_check_errors(dev);
2459 /* disable master interrupt before clearing iir */
2460 de_ier = I915_READ(DEIER);
2461 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2462 POSTING_READ(DEIER);
2464 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2465 * interrupts will will be stored on its back queue, and then we'll be
2466 * able to process them after we restore SDEIER (as soon as we restore
2467 * it, we'll get an interrupt if SDEIIR still has something to process
2468 * due to its back queue). */
2469 if (!HAS_PCH_NOP(dev)) {
2470 sde_ier = I915_READ(SDEIER);
2471 I915_WRITE(SDEIER, 0);
2472 POSTING_READ(SDEIER);
2475 /* Find, clear, then process each source of interrupt */
2477 gt_iir = I915_READ(GTIIR);
2479 I915_WRITE(GTIIR, gt_iir);
2481 if (INTEL_INFO(dev)->gen >= 6)
2482 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2484 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2487 de_iir = I915_READ(DEIIR);
2489 I915_WRITE(DEIIR, de_iir);
2491 if (INTEL_INFO(dev)->gen >= 7)
2492 ivb_display_irq_handler(dev, de_iir);
2494 ilk_display_irq_handler(dev, de_iir);
2497 if (INTEL_INFO(dev)->gen >= 6) {
2498 u32 pm_iir = I915_READ(GEN6_PMIIR);
2500 I915_WRITE(GEN6_PMIIR, pm_iir);
2502 gen6_rps_irq_handler(dev_priv, pm_iir);
2506 I915_WRITE(DEIER, de_ier);
2507 POSTING_READ(DEIER);
2508 if (!HAS_PCH_NOP(dev)) {
2509 I915_WRITE(SDEIER, sde_ier);
2510 POSTING_READ(SDEIER);
2516 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2518 struct drm_device *dev = arg;
2519 struct drm_i915_private *dev_priv = dev->dev_private;
2521 irqreturn_t ret = IRQ_NONE;
2525 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2526 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2530 I915_WRITE(GEN8_MASTER_IRQ, 0);
2531 POSTING_READ(GEN8_MASTER_IRQ);
2533 /* Find, clear, then process each source of interrupt */
2535 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2537 if (master_ctl & GEN8_DE_MISC_IRQ) {
2538 tmp = I915_READ(GEN8_DE_MISC_IIR);
2540 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2542 if (tmp & GEN8_DE_MISC_GSE)
2543 intel_opregion_asle_intr(dev);
2545 DRM_ERROR("Unexpected DE Misc interrupt\n");
2548 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2551 if (master_ctl & GEN8_DE_PORT_IRQ) {
2552 tmp = I915_READ(GEN8_DE_PORT_IIR);
2554 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2556 if (tmp & GEN8_AUX_CHANNEL_A)
2557 dp_aux_irq_handler(dev);
2559 DRM_ERROR("Unexpected DE Port interrupt\n");
2562 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2565 for_each_pipe(pipe) {
2568 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2571 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2574 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2575 if (pipe_iir & GEN8_PIPE_VBLANK)
2576 intel_pipe_handle_vblank(dev, pipe);
2578 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2579 intel_prepare_page_flip(dev, pipe);
2580 intel_finish_page_flip_plane(dev, pipe);
2583 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2584 hsw_pipe_crc_irq_handler(dev, pipe);
2586 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2587 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2589 DRM_ERROR("Pipe %c FIFO underrun\n",
2593 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2594 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2596 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2599 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2602 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2604 * FIXME(BDW): Assume for now that the new interrupt handling
2605 * scheme also closed the SDE interrupt handling race we've seen
2606 * on older pch-split platforms. But this needs testing.
2608 u32 pch_iir = I915_READ(SDEIIR);
2610 I915_WRITE(SDEIIR, pch_iir);
2612 cpt_irq_handler(dev, pch_iir);
2614 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2618 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2619 POSTING_READ(GEN8_MASTER_IRQ);
2624 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2625 bool reset_completed)
2627 struct intel_engine_cs *ring;
2631 * Notify all waiters for GPU completion events that reset state has
2632 * been changed, and that they need to restart their wait after
2633 * checking for potential errors (and bail out to drop locks if there is
2634 * a gpu reset pending so that i915_error_work_func can acquire them).
2637 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2638 for_each_ring(ring, dev_priv, i)
2639 wake_up_all(&ring->irq_queue);
2641 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2642 wake_up_all(&dev_priv->pending_flip_queue);
2645 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2646 * reset state is cleared.
2648 if (reset_completed)
2649 wake_up_all(&dev_priv->gpu_error.reset_queue);
2653 * i915_error_work_func - do process context error handling work
2654 * @work: work struct
2656 * Fire an error uevent so userspace can see that a hang or error
2659 static void i915_error_work_func(struct work_struct *work)
2661 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2663 struct drm_i915_private *dev_priv =
2664 container_of(error, struct drm_i915_private, gpu_error);
2665 struct drm_device *dev = dev_priv->dev;
2666 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2667 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2668 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2671 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2674 * Note that there's only one work item which does gpu resets, so we
2675 * need not worry about concurrent gpu resets potentially incrementing
2676 * error->reset_counter twice. We only need to take care of another
2677 * racing irq/hangcheck declaring the gpu dead for a second time. A
2678 * quick check for that is good enough: schedule_work ensures the
2679 * correct ordering between hang detection and this work item, and since
2680 * the reset in-progress bit is only ever set by code outside of this
2681 * work we don't need to worry about any other races.
2683 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2684 DRM_DEBUG_DRIVER("resetting chip\n");
2685 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2689 * In most cases it's guaranteed that we get here with an RPM
2690 * reference held, for example because there is a pending GPU
2691 * request that won't finish until the reset is done. This
2692 * isn't the case at least when we get here by doing a
2693 * simulated reset via debugs, so get an RPM reference.
2695 intel_runtime_pm_get(dev_priv);
2697 * All state reset _must_ be completed before we update the
2698 * reset counter, for otherwise waiters might miss the reset
2699 * pending state and not properly drop locks, resulting in
2700 * deadlocks with the reset work.
2702 ret = i915_reset(dev);
2704 intel_display_handle_reset(dev);
2706 intel_runtime_pm_put(dev_priv);
2710 * After all the gem state is reset, increment the reset
2711 * counter and wake up everyone waiting for the reset to
2714 * Since unlock operations are a one-sided barrier only,
2715 * we need to insert a barrier here to order any seqno
2717 * the counter increment.
2719 smp_mb__before_atomic();
2720 atomic_inc(&dev_priv->gpu_error.reset_counter);
2722 kobject_uevent_env(&dev->primary->kdev->kobj,
2723 KOBJ_CHANGE, reset_done_event);
2725 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2729 * Note: The wake_up also serves as a memory barrier so that
2730 * waiters see the update value of the reset counter atomic_t.
2732 i915_error_wake_up(dev_priv, true);
2736 static void i915_report_and_clear_eir(struct drm_device *dev)
2738 struct drm_i915_private *dev_priv = dev->dev_private;
2739 uint32_t instdone[I915_NUM_INSTDONE_REG];
2740 u32 eir = I915_READ(EIR);
2746 pr_err("render error detected, EIR: 0x%08x\n", eir);
2748 i915_get_extra_instdone(dev, instdone);
2751 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2752 u32 ipeir = I915_READ(IPEIR_I965);
2754 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2755 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2756 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2757 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2758 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2759 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2760 I915_WRITE(IPEIR_I965, ipeir);
2761 POSTING_READ(IPEIR_I965);
2763 if (eir & GM45_ERROR_PAGE_TABLE) {
2764 u32 pgtbl_err = I915_READ(PGTBL_ER);
2765 pr_err("page table error\n");
2766 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2767 I915_WRITE(PGTBL_ER, pgtbl_err);
2768 POSTING_READ(PGTBL_ER);
2772 if (!IS_GEN2(dev)) {
2773 if (eir & I915_ERROR_PAGE_TABLE) {
2774 u32 pgtbl_err = I915_READ(PGTBL_ER);
2775 pr_err("page table error\n");
2776 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2777 I915_WRITE(PGTBL_ER, pgtbl_err);
2778 POSTING_READ(PGTBL_ER);
2782 if (eir & I915_ERROR_MEMORY_REFRESH) {
2783 pr_err("memory refresh error:\n");
2785 pr_err("pipe %c stat: 0x%08x\n",
2786 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2787 /* pipestat has already been acked */
2789 if (eir & I915_ERROR_INSTRUCTION) {
2790 pr_err("instruction error\n");
2791 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2792 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2793 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2794 if (INTEL_INFO(dev)->gen < 4) {
2795 u32 ipeir = I915_READ(IPEIR);
2797 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2798 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2799 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2800 I915_WRITE(IPEIR, ipeir);
2801 POSTING_READ(IPEIR);
2803 u32 ipeir = I915_READ(IPEIR_I965);
2805 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2806 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2807 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2808 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2809 I915_WRITE(IPEIR_I965, ipeir);
2810 POSTING_READ(IPEIR_I965);
2814 I915_WRITE(EIR, eir);
2816 eir = I915_READ(EIR);
2819 * some errors might have become stuck,
2822 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2823 I915_WRITE(EMR, I915_READ(EMR) | eir);
2824 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2829 * i915_handle_error - handle an error interrupt
2832 * Do some basic checking of regsiter state at error interrupt time and
2833 * dump it to the syslog. Also call i915_capture_error_state() to make
2834 * sure we get a record and make it available in debugfs. Fire a uevent
2835 * so userspace knows something bad happened (should trigger collection
2836 * of a ring dump etc.).
2838 void i915_handle_error(struct drm_device *dev, bool wedged,
2839 const char *fmt, ...)
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2845 va_start(args, fmt);
2846 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2849 i915_capture_error_state(dev, wedged, error_msg);
2850 i915_report_and_clear_eir(dev);
2853 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2854 &dev_priv->gpu_error.reset_counter);
2857 * Wakeup waiting processes so that the reset work function
2858 * i915_error_work_func doesn't deadlock trying to grab various
2859 * locks. By bumping the reset counter first, the woken
2860 * processes will see a reset in progress and back off,
2861 * releasing their locks and then wait for the reset completion.
2862 * We must do this for _all_ gpu waiters that might hold locks
2863 * that the reset work needs to acquire.
2865 * Note: The wake_up serves as the required memory barrier to
2866 * ensure that the waiters see the updated value of the reset
2869 i915_error_wake_up(dev_priv, false);
2873 * Our reset work can grab modeset locks (since it needs to reset the
2874 * state of outstanding pagelips). Hence it must not be run on our own
2875 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2876 * code will deadlock.
2878 schedule_work(&dev_priv->gpu_error.work);
2881 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2883 struct drm_i915_private *dev_priv = dev->dev_private;
2884 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2885 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2886 struct drm_i915_gem_object *obj;
2887 struct intel_unpin_work *work;
2888 unsigned long flags;
2889 bool stall_detected;
2891 /* Ignore early vblank irqs */
2892 if (intel_crtc == NULL)
2895 spin_lock_irqsave(&dev->event_lock, flags);
2896 work = intel_crtc->unpin_work;
2899 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2900 !work->enable_stall_check) {
2901 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2902 spin_unlock_irqrestore(&dev->event_lock, flags);
2906 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2907 obj = work->pending_flip_obj;
2908 if (INTEL_INFO(dev)->gen >= 4) {
2909 int dspsurf = DSPSURF(intel_crtc->plane);
2910 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2911 i915_gem_obj_ggtt_offset(obj);
2913 int dspaddr = DSPADDR(intel_crtc->plane);
2914 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2915 crtc->y * crtc->primary->fb->pitches[0] +
2916 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2919 spin_unlock_irqrestore(&dev->event_lock, flags);
2921 if (stall_detected) {
2922 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2923 intel_prepare_page_flip(dev, intel_crtc->plane);
2927 /* Called from drm generic code, passed 'crtc' which
2928 * we use as a pipe index
2930 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2932 struct drm_i915_private *dev_priv = dev->dev_private;
2933 unsigned long irqflags;
2935 if (!i915_pipe_enabled(dev, pipe))
2938 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2939 if (INTEL_INFO(dev)->gen >= 4)
2940 i915_enable_pipestat(dev_priv, pipe,
2941 PIPE_START_VBLANK_INTERRUPT_STATUS);
2943 i915_enable_pipestat(dev_priv, pipe,
2944 PIPE_VBLANK_INTERRUPT_STATUS);
2945 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2950 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2952 struct drm_i915_private *dev_priv = dev->dev_private;
2953 unsigned long irqflags;
2954 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2955 DE_PIPE_VBLANK(pipe);
2957 if (!i915_pipe_enabled(dev, pipe))
2960 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2961 ironlake_enable_display_irq(dev_priv, bit);
2962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2967 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2969 struct drm_i915_private *dev_priv = dev->dev_private;
2970 unsigned long irqflags;
2972 if (!i915_pipe_enabled(dev, pipe))
2975 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2976 i915_enable_pipestat(dev_priv, pipe,
2977 PIPE_START_VBLANK_INTERRUPT_STATUS);
2978 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2983 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2985 struct drm_i915_private *dev_priv = dev->dev_private;
2986 unsigned long irqflags;
2988 if (!i915_pipe_enabled(dev, pipe))
2991 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2992 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2993 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2994 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2995 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2999 /* Called from drm generic code, passed 'crtc' which
3000 * we use as a pipe index
3002 static void i915_disable_vblank(struct drm_device *dev, int pipe)
3004 struct drm_i915_private *dev_priv = dev->dev_private;
3005 unsigned long irqflags;
3007 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3008 i915_disable_pipestat(dev_priv, pipe,
3009 PIPE_VBLANK_INTERRUPT_STATUS |
3010 PIPE_START_VBLANK_INTERRUPT_STATUS);
3011 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3014 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
3016 struct drm_i915_private *dev_priv = dev->dev_private;
3017 unsigned long irqflags;
3018 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3019 DE_PIPE_VBLANK(pipe);
3021 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3022 ironlake_disable_display_irq(dev_priv, bit);
3023 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3026 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3028 struct drm_i915_private *dev_priv = dev->dev_private;
3029 unsigned long irqflags;
3031 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3032 i915_disable_pipestat(dev_priv, pipe,
3033 PIPE_START_VBLANK_INTERRUPT_STATUS);
3034 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3037 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3039 struct drm_i915_private *dev_priv = dev->dev_private;
3040 unsigned long irqflags;
3042 if (!i915_pipe_enabled(dev, pipe))
3045 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3046 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3047 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3048 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3049 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3053 ring_last_seqno(struct intel_engine_cs *ring)
3055 return list_entry(ring->request_list.prev,
3056 struct drm_i915_gem_request, list)->seqno;
3060 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3062 return (list_empty(&ring->request_list) ||
3063 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3067 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3069 if (INTEL_INFO(dev)->gen >= 8) {
3070 return (ipehr >> 23) == 0x1c;
3072 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3073 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3074 MI_SEMAPHORE_REGISTER);
3078 static struct intel_engine_cs *
3079 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3081 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3082 struct intel_engine_cs *signaller;
3085 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3086 for_each_ring(signaller, dev_priv, i) {
3087 if (ring == signaller)
3090 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3094 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3096 for_each_ring(signaller, dev_priv, i) {
3097 if(ring == signaller)
3100 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3105 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3106 ring->id, ipehr, offset);
3111 static struct intel_engine_cs *
3112 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3114 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3115 u32 cmd, ipehr, head;
3119 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3120 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3124 * HEAD is likely pointing to the dword after the actual command,
3125 * so scan backwards until we find the MBOX. But limit it to just 3
3126 * or 4 dwords depending on the semaphore wait command size.
3127 * Note that we don't care about ACTHD here since that might
3128 * point at at batch, and semaphores are always emitted into the
3129 * ringbuffer itself.
3131 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3132 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3134 for (i = backwards; i; --i) {
3136 * Be paranoid and presume the hw has gone off into the wild -
3137 * our ring is smaller than what the hardware (and hence
3138 * HEAD_ADDR) allows. Also handles wrap-around.
3140 head &= ring->buffer->size - 1;
3142 /* This here seems to blow up */
3143 cmd = ioread32(ring->buffer->virtual_start + head);
3153 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3154 if (INTEL_INFO(ring->dev)->gen >= 8) {
3155 offset = ioread32(ring->buffer->virtual_start + head + 12);
3157 offset = ioread32(ring->buffer->virtual_start + head + 8);
3159 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3162 static int semaphore_passed(struct intel_engine_cs *ring)
3164 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3165 struct intel_engine_cs *signaller;
3168 ring->hangcheck.deadlock++;
3170 signaller = semaphore_waits_for(ring, &seqno);
3171 if (signaller == NULL)
3174 /* Prevent pathological recursion due to driver bugs */
3175 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3178 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3181 /* cursory check for an unkickable deadlock */
3182 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3183 semaphore_passed(signaller) < 0)
3189 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3191 struct intel_engine_cs *ring;
3194 for_each_ring(ring, dev_priv, i)
3195 ring->hangcheck.deadlock = 0;
3198 static enum intel_ring_hangcheck_action
3199 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3201 struct drm_device *dev = ring->dev;
3202 struct drm_i915_private *dev_priv = dev->dev_private;
3205 if (acthd != ring->hangcheck.acthd) {
3206 if (acthd > ring->hangcheck.max_acthd) {
3207 ring->hangcheck.max_acthd = acthd;
3208 return HANGCHECK_ACTIVE;
3211 return HANGCHECK_ACTIVE_LOOP;
3215 return HANGCHECK_HUNG;
3217 /* Is the chip hanging on a WAIT_FOR_EVENT?
3218 * If so we can simply poke the RB_WAIT bit
3219 * and break the hang. This should work on
3220 * all but the second generation chipsets.
3222 tmp = I915_READ_CTL(ring);
3223 if (tmp & RING_WAIT) {
3224 i915_handle_error(dev, false,
3225 "Kicking stuck wait on %s",
3227 I915_WRITE_CTL(ring, tmp);
3228 return HANGCHECK_KICK;
3231 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3232 switch (semaphore_passed(ring)) {
3234 return HANGCHECK_HUNG;
3236 i915_handle_error(dev, false,
3237 "Kicking stuck semaphore on %s",
3239 I915_WRITE_CTL(ring, tmp);
3240 return HANGCHECK_KICK;
3242 return HANGCHECK_WAIT;
3246 return HANGCHECK_HUNG;
3250 * This is called when the chip hasn't reported back with completed
3251 * batchbuffers in a long time. We keep track per ring seqno progress and
3252 * if there are no progress, hangcheck score for that ring is increased.
3253 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3254 * we kick the ring. If we see no progress on three subsequent calls
3255 * we assume chip is wedged and try to fix it by resetting the chip.
3257 static void i915_hangcheck_elapsed(unsigned long data)
3259 struct drm_device *dev = (struct drm_device *)data;
3260 struct drm_i915_private *dev_priv = dev->dev_private;
3261 struct intel_engine_cs *ring;
3263 int busy_count = 0, rings_hung = 0;
3264 bool stuck[I915_NUM_RINGS] = { 0 };
3269 if (!i915.enable_hangcheck)
3272 for_each_ring(ring, dev_priv, i) {
3277 semaphore_clear_deadlocks(dev_priv);
3279 seqno = ring->get_seqno(ring, false);
3280 acthd = intel_ring_get_active_head(ring);
3282 if (ring->hangcheck.seqno == seqno) {
3283 if (ring_idle(ring, seqno)) {
3284 ring->hangcheck.action = HANGCHECK_IDLE;
3286 if (waitqueue_active(&ring->irq_queue)) {
3287 /* Issue a wake-up to catch stuck h/w. */
3288 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3289 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3290 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3293 DRM_INFO("Fake missed irq on %s\n",
3295 wake_up_all(&ring->irq_queue);
3297 /* Safeguard against driver failure */
3298 ring->hangcheck.score += BUSY;
3302 /* We always increment the hangcheck score
3303 * if the ring is busy and still processing
3304 * the same request, so that no single request
3305 * can run indefinitely (such as a chain of
3306 * batches). The only time we do not increment
3307 * the hangcheck score on this ring, if this
3308 * ring is in a legitimate wait for another
3309 * ring. In that case the waiting ring is a
3310 * victim and we want to be sure we catch the
3311 * right culprit. Then every time we do kick
3312 * the ring, add a small increment to the
3313 * score so that we can catch a batch that is
3314 * being repeatedly kicked and so responsible
3315 * for stalling the machine.
3317 ring->hangcheck.action = ring_stuck(ring,
3320 switch (ring->hangcheck.action) {
3321 case HANGCHECK_IDLE:
3322 case HANGCHECK_WAIT:
3323 case HANGCHECK_ACTIVE:
3325 case HANGCHECK_ACTIVE_LOOP:
3326 ring->hangcheck.score += BUSY;
3328 case HANGCHECK_KICK:
3329 ring->hangcheck.score += KICK;
3331 case HANGCHECK_HUNG:
3332 ring->hangcheck.score += HUNG;
3338 ring->hangcheck.action = HANGCHECK_ACTIVE;
3340 /* Gradually reduce the count so that we catch DoS
3341 * attempts across multiple batches.
3343 if (ring->hangcheck.score > 0)
3344 ring->hangcheck.score--;
3346 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3349 ring->hangcheck.seqno = seqno;
3350 ring->hangcheck.acthd = acthd;
3354 for_each_ring(ring, dev_priv, i) {
3355 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3356 DRM_INFO("%s on %s\n",
3357 stuck[i] ? "stuck" : "no progress",
3364 return i915_handle_error(dev, true, "Ring hung");
3367 /* Reset timer case chip hangs without another request
3369 i915_queue_hangcheck(dev);
3372 void i915_queue_hangcheck(struct drm_device *dev)
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3375 if (!i915.enable_hangcheck)
3378 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3379 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3382 static void ibx_irq_reset(struct drm_device *dev)
3384 struct drm_i915_private *dev_priv = dev->dev_private;
3386 if (HAS_PCH_NOP(dev))
3389 GEN5_IRQ_RESET(SDE);
3391 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3392 I915_WRITE(SERR_INT, 0xffffffff);
3396 * SDEIER is also touched by the interrupt handler to work around missed PCH
3397 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3398 * instead we unconditionally enable all PCH interrupt sources here, but then
3399 * only unmask them as needed with SDEIMR.
3401 * This function needs to be called before interrupts are enabled.
3403 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3407 if (HAS_PCH_NOP(dev))
3410 WARN_ON(I915_READ(SDEIER) != 0);
3411 I915_WRITE(SDEIER, 0xffffffff);
3412 POSTING_READ(SDEIER);
3415 static void gen5_gt_irq_reset(struct drm_device *dev)
3417 struct drm_i915_private *dev_priv = dev->dev_private;
3420 if (INTEL_INFO(dev)->gen >= 6)
3421 GEN5_IRQ_RESET(GEN6_PM);
3426 static void ironlake_irq_reset(struct drm_device *dev)
3428 struct drm_i915_private *dev_priv = dev->dev_private;
3430 I915_WRITE(HWSTAM, 0xffffffff);
3434 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3436 gen5_gt_irq_reset(dev);
3441 static void valleyview_irq_preinstall(struct drm_device *dev)
3443 struct drm_i915_private *dev_priv = dev->dev_private;
3447 I915_WRITE(VLV_IMR, 0);
3448 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3449 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3450 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3453 I915_WRITE(GTIIR, I915_READ(GTIIR));
3454 I915_WRITE(GTIIR, I915_READ(GTIIR));
3456 gen5_gt_irq_reset(dev);
3458 I915_WRITE(DPINVGTT, 0xff);
3460 I915_WRITE(PORT_HOTPLUG_EN, 0);
3461 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3463 I915_WRITE(PIPESTAT(pipe), 0xffff);
3464 I915_WRITE(VLV_IIR, 0xffffffff);
3465 I915_WRITE(VLV_IMR, 0xffffffff);
3466 I915_WRITE(VLV_IER, 0x0);
3467 POSTING_READ(VLV_IER);
3470 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3472 GEN8_IRQ_RESET_NDX(GT, 0);
3473 GEN8_IRQ_RESET_NDX(GT, 1);
3474 GEN8_IRQ_RESET_NDX(GT, 2);
3475 GEN8_IRQ_RESET_NDX(GT, 3);
3478 static void gen8_irq_reset(struct drm_device *dev)
3480 struct drm_i915_private *dev_priv = dev->dev_private;
3483 I915_WRITE(GEN8_MASTER_IRQ, 0);
3484 POSTING_READ(GEN8_MASTER_IRQ);
3486 gen8_gt_irq_reset(dev_priv);
3489 if (intel_display_power_enabled(dev_priv,
3490 POWER_DOMAIN_PIPE(pipe)))
3491 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3493 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3494 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3495 GEN5_IRQ_RESET(GEN8_PCU_);
3500 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3502 unsigned long irqflags;
3504 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3505 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3506 ~dev_priv->de_irq_mask[PIPE_B]);
3507 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3508 ~dev_priv->de_irq_mask[PIPE_C]);
3509 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3512 static void cherryview_irq_preinstall(struct drm_device *dev)
3514 struct drm_i915_private *dev_priv = dev->dev_private;
3517 I915_WRITE(GEN8_MASTER_IRQ, 0);
3518 POSTING_READ(GEN8_MASTER_IRQ);
3520 gen8_gt_irq_reset(dev_priv);
3522 GEN5_IRQ_RESET(GEN8_PCU_);
3524 POSTING_READ(GEN8_PCU_IIR);
3526 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3528 I915_WRITE(PORT_HOTPLUG_EN, 0);
3529 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3532 I915_WRITE(PIPESTAT(pipe), 0xffff);
3534 I915_WRITE(VLV_IMR, 0xffffffff);
3535 I915_WRITE(VLV_IER, 0x0);
3536 I915_WRITE(VLV_IIR, 0xffffffff);
3537 POSTING_READ(VLV_IIR);
3540 static void ibx_hpd_irq_setup(struct drm_device *dev)
3542 struct drm_i915_private *dev_priv = dev->dev_private;
3543 struct intel_encoder *intel_encoder;
3544 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3546 if (HAS_PCH_IBX(dev)) {
3547 hotplug_irqs = SDE_HOTPLUG_MASK;
3548 for_each_intel_encoder(dev, intel_encoder)
3549 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3550 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3552 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3553 for_each_intel_encoder(dev, intel_encoder)
3554 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3555 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3558 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3561 * Enable digital hotplug on the PCH, and configure the DP short pulse
3562 * duration to 2ms (which is the minimum in the Display Port spec)
3564 * This register is the same on all known PCH chips.
3566 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3567 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3568 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3569 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3570 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3571 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3574 static void ibx_irq_postinstall(struct drm_device *dev)
3576 struct drm_i915_private *dev_priv = dev->dev_private;
3579 if (HAS_PCH_NOP(dev))
3582 if (HAS_PCH_IBX(dev))
3583 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3585 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3587 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3588 I915_WRITE(SDEIMR, ~mask);
3591 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3593 struct drm_i915_private *dev_priv = dev->dev_private;
3594 u32 pm_irqs, gt_irqs;
3596 pm_irqs = gt_irqs = 0;
3598 dev_priv->gt_irq_mask = ~0;
3599 if (HAS_L3_DPF(dev)) {
3600 /* L3 parity interrupt is always unmasked. */
3601 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3602 gt_irqs |= GT_PARITY_ERROR(dev);
3605 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3607 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3608 ILK_BSD_USER_INTERRUPT;
3610 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3613 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3615 if (INTEL_INFO(dev)->gen >= 6) {
3616 pm_irqs |= dev_priv->pm_rps_events;
3619 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3621 dev_priv->pm_irq_mask = 0xffffffff;
3622 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3626 static int ironlake_irq_postinstall(struct drm_device *dev)
3628 unsigned long irqflags;
3629 struct drm_i915_private *dev_priv = dev->dev_private;
3630 u32 display_mask, extra_mask;
3632 if (INTEL_INFO(dev)->gen >= 7) {
3633 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3634 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3635 DE_PLANEB_FLIP_DONE_IVB |
3636 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3637 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3638 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3640 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3641 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3643 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3645 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3646 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3649 dev_priv->irq_mask = ~display_mask;
3651 I915_WRITE(HWSTAM, 0xeffe);
3653 ibx_irq_pre_postinstall(dev);
3655 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3657 gen5_gt_irq_postinstall(dev);
3659 ibx_irq_postinstall(dev);
3661 if (IS_IRONLAKE_M(dev)) {
3662 /* Enable PCU event interrupts
3664 * spinlocking not required here for correctness since interrupt
3665 * setup is guaranteed to run in single-threaded context. But we
3666 * need it to make the assert_spin_locked happy. */
3667 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3668 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3675 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3680 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3681 PIPE_FIFO_UNDERRUN_STATUS;
3683 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3684 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3685 POSTING_READ(PIPESTAT(PIPE_A));
3687 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3688 PIPE_CRC_DONE_INTERRUPT_STATUS;
3690 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3691 PIPE_GMBUS_INTERRUPT_STATUS);
3692 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3694 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3695 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3696 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3697 dev_priv->irq_mask &= ~iir_mask;
3699 I915_WRITE(VLV_IIR, iir_mask);
3700 I915_WRITE(VLV_IIR, iir_mask);
3701 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3702 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3703 POSTING_READ(VLV_IER);
3706 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3711 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3712 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3713 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3715 dev_priv->irq_mask |= iir_mask;
3716 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3717 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3718 I915_WRITE(VLV_IIR, iir_mask);
3719 I915_WRITE(VLV_IIR, iir_mask);
3720 POSTING_READ(VLV_IIR);
3722 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3723 PIPE_CRC_DONE_INTERRUPT_STATUS;
3725 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3726 PIPE_GMBUS_INTERRUPT_STATUS);
3727 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3729 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3730 PIPE_FIFO_UNDERRUN_STATUS;
3731 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3732 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3733 POSTING_READ(PIPESTAT(PIPE_A));
3736 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3738 assert_spin_locked(&dev_priv->irq_lock);
3740 if (dev_priv->display_irqs_enabled)
3743 dev_priv->display_irqs_enabled = true;
3745 if (dev_priv->dev->irq_enabled)
3746 valleyview_display_irqs_install(dev_priv);
3749 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3751 assert_spin_locked(&dev_priv->irq_lock);
3753 if (!dev_priv->display_irqs_enabled)
3756 dev_priv->display_irqs_enabled = false;
3758 if (dev_priv->dev->irq_enabled)
3759 valleyview_display_irqs_uninstall(dev_priv);
3762 static int valleyview_irq_postinstall(struct drm_device *dev)
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765 unsigned long irqflags;
3767 dev_priv->irq_mask = ~0;
3769 I915_WRITE(PORT_HOTPLUG_EN, 0);
3770 POSTING_READ(PORT_HOTPLUG_EN);
3772 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3773 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3774 I915_WRITE(VLV_IIR, 0xffffffff);
3775 POSTING_READ(VLV_IER);
3777 /* Interrupt setup is already guaranteed to be single-threaded, this is
3778 * just to make the assert_spin_locked check happy. */
3779 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3780 if (dev_priv->display_irqs_enabled)
3781 valleyview_display_irqs_install(dev_priv);
3782 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3784 I915_WRITE(VLV_IIR, 0xffffffff);
3785 I915_WRITE(VLV_IIR, 0xffffffff);
3787 gen5_gt_irq_postinstall(dev);
3789 /* ack & enable invalid PTE error interrupts */
3790 #if 0 /* FIXME: add support to irq handler for checking these bits */
3791 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3792 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3795 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3800 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3804 /* These are interrupts we'll toggle with the ring mask register */
3805 uint32_t gt_interrupts[] = {
3806 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3807 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3808 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3809 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3810 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3811 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3812 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3813 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3814 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3816 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3817 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3820 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3821 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3823 dev_priv->pm_irq_mask = 0xffffffff;
3826 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3828 struct drm_device *dev = dev_priv->dev;
3829 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3830 GEN8_PIPE_CDCLK_CRC_DONE |
3831 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3832 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3833 GEN8_PIPE_FIFO_UNDERRUN;
3835 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3836 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3837 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3840 if (intel_display_power_enabled(dev_priv,
3841 POWER_DOMAIN_PIPE(pipe)))
3842 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3843 dev_priv->de_irq_mask[pipe],
3846 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3849 static int gen8_irq_postinstall(struct drm_device *dev)
3851 struct drm_i915_private *dev_priv = dev->dev_private;
3853 ibx_irq_pre_postinstall(dev);
3855 gen8_gt_irq_postinstall(dev_priv);
3856 gen8_de_irq_postinstall(dev_priv);
3858 ibx_irq_postinstall(dev);
3860 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3861 POSTING_READ(GEN8_MASTER_IRQ);
3866 static int cherryview_irq_postinstall(struct drm_device *dev)
3868 struct drm_i915_private *dev_priv = dev->dev_private;
3869 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3870 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3871 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3872 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3873 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3874 PIPE_CRC_DONE_INTERRUPT_STATUS;
3875 unsigned long irqflags;
3879 * Leave vblank interrupts masked initially. enable/disable will
3880 * toggle them based on usage.
3882 dev_priv->irq_mask = ~enable_mask;
3885 I915_WRITE(PIPESTAT(pipe), 0xffff);
3887 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3888 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3890 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3891 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3893 I915_WRITE(VLV_IIR, 0xffffffff);
3894 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3895 I915_WRITE(VLV_IER, enable_mask);
3897 gen8_gt_irq_postinstall(dev_priv);
3899 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3900 POSTING_READ(GEN8_MASTER_IRQ);
3905 static void gen8_irq_uninstall(struct drm_device *dev)
3907 struct drm_i915_private *dev_priv = dev->dev_private;
3912 gen8_irq_reset(dev);
3915 static void valleyview_irq_uninstall(struct drm_device *dev)
3917 struct drm_i915_private *dev_priv = dev->dev_private;
3918 unsigned long irqflags;
3924 I915_WRITE(VLV_MASTER_IER, 0);
3927 I915_WRITE(PIPESTAT(pipe), 0xffff);
3929 I915_WRITE(HWSTAM, 0xffffffff);
3930 I915_WRITE(PORT_HOTPLUG_EN, 0);
3931 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3933 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3934 if (dev_priv->display_irqs_enabled)
3935 valleyview_display_irqs_uninstall(dev_priv);
3936 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3938 dev_priv->irq_mask = 0;
3940 I915_WRITE(VLV_IIR, 0xffffffff);
3941 I915_WRITE(VLV_IMR, 0xffffffff);
3942 I915_WRITE(VLV_IER, 0x0);
3943 POSTING_READ(VLV_IER);
3946 static void cherryview_irq_uninstall(struct drm_device *dev)
3948 struct drm_i915_private *dev_priv = dev->dev_private;
3954 I915_WRITE(GEN8_MASTER_IRQ, 0);
3955 POSTING_READ(GEN8_MASTER_IRQ);
3957 #define GEN8_IRQ_FINI_NDX(type, which) \
3959 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3960 I915_WRITE(GEN8_##type##_IER(which), 0); \
3961 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3962 POSTING_READ(GEN8_##type##_IIR(which)); \
3963 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3966 #define GEN8_IRQ_FINI(type) \
3968 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3969 I915_WRITE(GEN8_##type##_IER, 0); \
3970 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3971 POSTING_READ(GEN8_##type##_IIR); \
3972 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3975 GEN8_IRQ_FINI_NDX(GT, 0);
3976 GEN8_IRQ_FINI_NDX(GT, 1);
3977 GEN8_IRQ_FINI_NDX(GT, 2);
3978 GEN8_IRQ_FINI_NDX(GT, 3);
3982 #undef GEN8_IRQ_FINI
3983 #undef GEN8_IRQ_FINI_NDX
3985 I915_WRITE(PORT_HOTPLUG_EN, 0);
3986 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3989 I915_WRITE(PIPESTAT(pipe), 0xffff);
3991 I915_WRITE(VLV_IMR, 0xffffffff);
3992 I915_WRITE(VLV_IER, 0x0);
3993 I915_WRITE(VLV_IIR, 0xffffffff);
3994 POSTING_READ(VLV_IIR);
3997 static void ironlake_irq_uninstall(struct drm_device *dev)
3999 struct drm_i915_private *dev_priv = dev->dev_private;
4004 ironlake_irq_reset(dev);
4007 static void i8xx_irq_preinstall(struct drm_device * dev)
4009 struct drm_i915_private *dev_priv = dev->dev_private;
4013 I915_WRITE(PIPESTAT(pipe), 0);
4014 I915_WRITE16(IMR, 0xffff);
4015 I915_WRITE16(IER, 0x0);
4016 POSTING_READ16(IER);
4019 static int i8xx_irq_postinstall(struct drm_device *dev)
4021 struct drm_i915_private *dev_priv = dev->dev_private;
4022 unsigned long irqflags;
4025 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4027 /* Unmask the interrupts that we always want on. */
4028 dev_priv->irq_mask =
4029 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4030 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4031 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4032 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4033 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4034 I915_WRITE16(IMR, dev_priv->irq_mask);
4037 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4038 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4039 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4040 I915_USER_INTERRUPT);
4041 POSTING_READ16(IER);
4043 /* Interrupt setup is already guaranteed to be single-threaded, this is
4044 * just to make the assert_spin_locked check happy. */
4045 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4046 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4047 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4048 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4054 * Returns true when a page flip has completed.
4056 static bool i8xx_handle_vblank(struct drm_device *dev,
4057 int plane, int pipe, u32 iir)
4059 struct drm_i915_private *dev_priv = dev->dev_private;
4060 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4062 if (!intel_pipe_handle_vblank(dev, pipe))
4065 if ((iir & flip_pending) == 0)
4068 intel_prepare_page_flip(dev, plane);
4070 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4071 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4072 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4073 * the flip is completed (no longer pending). Since this doesn't raise
4074 * an interrupt per se, we watch for the change at vblank.
4076 if (I915_READ16(ISR) & flip_pending)
4079 intel_finish_page_flip(dev, pipe);
4084 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4086 struct drm_device *dev = arg;
4087 struct drm_i915_private *dev_priv = dev->dev_private;
4090 unsigned long irqflags;
4093 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4094 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4096 iir = I915_READ16(IIR);
4100 while (iir & ~flip_mask) {
4101 /* Can't rely on pipestat interrupt bit in iir as it might
4102 * have been cleared after the pipestat interrupt was received.
4103 * It doesn't set the bit in iir again, but it still produces
4104 * interrupts (for non-MSI).
4106 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4107 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4108 i915_handle_error(dev, false,
4109 "Command parser error, iir 0x%08x",
4112 for_each_pipe(pipe) {
4113 int reg = PIPESTAT(pipe);
4114 pipe_stats[pipe] = I915_READ(reg);
4117 * Clear the PIPE*STAT regs before the IIR
4119 if (pipe_stats[pipe] & 0x8000ffff)
4120 I915_WRITE(reg, pipe_stats[pipe]);
4122 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4124 I915_WRITE16(IIR, iir & ~flip_mask);
4125 new_iir = I915_READ16(IIR); /* Flush posted writes */
4127 i915_update_dri1_breadcrumb(dev);
4129 if (iir & I915_USER_INTERRUPT)
4130 notify_ring(dev, &dev_priv->ring[RCS]);
4132 for_each_pipe(pipe) {
4137 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4138 i8xx_handle_vblank(dev, plane, pipe, iir))
4139 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4141 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4142 i9xx_pipe_crc_irq_handler(dev, pipe);
4144 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4145 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4146 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4155 static void i8xx_irq_uninstall(struct drm_device * dev)
4157 struct drm_i915_private *dev_priv = dev->dev_private;
4160 for_each_pipe(pipe) {
4161 /* Clear enable bits; then clear status bits */
4162 I915_WRITE(PIPESTAT(pipe), 0);
4163 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4165 I915_WRITE16(IMR, 0xffff);
4166 I915_WRITE16(IER, 0x0);
4167 I915_WRITE16(IIR, I915_READ16(IIR));
4170 static void i915_irq_preinstall(struct drm_device * dev)
4172 struct drm_i915_private *dev_priv = dev->dev_private;
4175 if (I915_HAS_HOTPLUG(dev)) {
4176 I915_WRITE(PORT_HOTPLUG_EN, 0);
4177 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4180 I915_WRITE16(HWSTAM, 0xeffe);
4182 I915_WRITE(PIPESTAT(pipe), 0);
4183 I915_WRITE(IMR, 0xffffffff);
4184 I915_WRITE(IER, 0x0);
4188 static int i915_irq_postinstall(struct drm_device *dev)
4190 struct drm_i915_private *dev_priv = dev->dev_private;
4192 unsigned long irqflags;
4194 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4196 /* Unmask the interrupts that we always want on. */
4197 dev_priv->irq_mask =
4198 ~(I915_ASLE_INTERRUPT |
4199 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4200 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4201 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4202 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4203 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4206 I915_ASLE_INTERRUPT |
4207 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4208 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4209 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4210 I915_USER_INTERRUPT;
4212 if (I915_HAS_HOTPLUG(dev)) {
4213 I915_WRITE(PORT_HOTPLUG_EN, 0);
4214 POSTING_READ(PORT_HOTPLUG_EN);
4216 /* Enable in IER... */
4217 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4218 /* and unmask in IMR */
4219 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4222 I915_WRITE(IMR, dev_priv->irq_mask);
4223 I915_WRITE(IER, enable_mask);
4226 i915_enable_asle_pipestat(dev);
4228 /* Interrupt setup is already guaranteed to be single-threaded, this is
4229 * just to make the assert_spin_locked check happy. */
4230 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4231 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4232 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4233 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4239 * Returns true when a page flip has completed.
4241 static bool i915_handle_vblank(struct drm_device *dev,
4242 int plane, int pipe, u32 iir)
4244 struct drm_i915_private *dev_priv = dev->dev_private;
4245 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4247 if (!intel_pipe_handle_vblank(dev, pipe))
4250 if ((iir & flip_pending) == 0)
4253 intel_prepare_page_flip(dev, plane);
4255 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4256 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4257 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4258 * the flip is completed (no longer pending). Since this doesn't raise
4259 * an interrupt per se, we watch for the change at vblank.
4261 if (I915_READ(ISR) & flip_pending)
4264 intel_finish_page_flip(dev, pipe);
4269 static irqreturn_t i915_irq_handler(int irq, void *arg)
4271 struct drm_device *dev = arg;
4272 struct drm_i915_private *dev_priv = dev->dev_private;
4273 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4274 unsigned long irqflags;
4276 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4277 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4278 int pipe, ret = IRQ_NONE;
4280 iir = I915_READ(IIR);
4282 bool irq_received = (iir & ~flip_mask) != 0;
4283 bool blc_event = false;
4285 /* Can't rely on pipestat interrupt bit in iir as it might
4286 * have been cleared after the pipestat interrupt was received.
4287 * It doesn't set the bit in iir again, but it still produces
4288 * interrupts (for non-MSI).
4290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4291 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4292 i915_handle_error(dev, false,
4293 "Command parser error, iir 0x%08x",
4296 for_each_pipe(pipe) {
4297 int reg = PIPESTAT(pipe);
4298 pipe_stats[pipe] = I915_READ(reg);
4300 /* Clear the PIPE*STAT regs before the IIR */
4301 if (pipe_stats[pipe] & 0x8000ffff) {
4302 I915_WRITE(reg, pipe_stats[pipe]);
4303 irq_received = true;
4306 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4311 /* Consume port. Then clear IIR or we'll miss events */
4312 if (I915_HAS_HOTPLUG(dev) &&
4313 iir & I915_DISPLAY_PORT_INTERRUPT)
4314 i9xx_hpd_irq_handler(dev);
4316 I915_WRITE(IIR, iir & ~flip_mask);
4317 new_iir = I915_READ(IIR); /* Flush posted writes */
4319 if (iir & I915_USER_INTERRUPT)
4320 notify_ring(dev, &dev_priv->ring[RCS]);
4322 for_each_pipe(pipe) {
4327 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4328 i915_handle_vblank(dev, plane, pipe, iir))
4329 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4331 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4334 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4335 i9xx_pipe_crc_irq_handler(dev, pipe);
4337 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4338 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4339 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4342 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4343 intel_opregion_asle_intr(dev);
4345 /* With MSI, interrupts are only generated when iir
4346 * transitions from zero to nonzero. If another bit got
4347 * set while we were handling the existing iir bits, then
4348 * we would never get another interrupt.
4350 * This is fine on non-MSI as well, as if we hit this path
4351 * we avoid exiting the interrupt handler only to generate
4354 * Note that for MSI this could cause a stray interrupt report
4355 * if an interrupt landed in the time between writing IIR and
4356 * the posting read. This should be rare enough to never
4357 * trigger the 99% of 100,000 interrupts test for disabling
4362 } while (iir & ~flip_mask);
4364 i915_update_dri1_breadcrumb(dev);
4369 static void i915_irq_uninstall(struct drm_device * dev)
4371 struct drm_i915_private *dev_priv = dev->dev_private;
4374 if (I915_HAS_HOTPLUG(dev)) {
4375 I915_WRITE(PORT_HOTPLUG_EN, 0);
4376 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4379 I915_WRITE16(HWSTAM, 0xffff);
4380 for_each_pipe(pipe) {
4381 /* Clear enable bits; then clear status bits */
4382 I915_WRITE(PIPESTAT(pipe), 0);
4383 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4385 I915_WRITE(IMR, 0xffffffff);
4386 I915_WRITE(IER, 0x0);
4388 I915_WRITE(IIR, I915_READ(IIR));
4391 static void i965_irq_preinstall(struct drm_device * dev)
4393 struct drm_i915_private *dev_priv = dev->dev_private;
4396 I915_WRITE(PORT_HOTPLUG_EN, 0);
4397 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4399 I915_WRITE(HWSTAM, 0xeffe);
4401 I915_WRITE(PIPESTAT(pipe), 0);
4402 I915_WRITE(IMR, 0xffffffff);
4403 I915_WRITE(IER, 0x0);
4407 static int i965_irq_postinstall(struct drm_device *dev)
4409 struct drm_i915_private *dev_priv = dev->dev_private;
4412 unsigned long irqflags;
4414 /* Unmask the interrupts that we always want on. */
4415 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4416 I915_DISPLAY_PORT_INTERRUPT |
4417 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4418 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4419 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4420 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4421 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4423 enable_mask = ~dev_priv->irq_mask;
4424 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4425 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4426 enable_mask |= I915_USER_INTERRUPT;
4429 enable_mask |= I915_BSD_USER_INTERRUPT;
4431 /* Interrupt setup is already guaranteed to be single-threaded, this is
4432 * just to make the assert_spin_locked check happy. */
4433 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4434 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4435 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4436 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4437 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4440 * Enable some error detection, note the instruction error mask
4441 * bit is reserved, so we leave it masked.
4444 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4445 GM45_ERROR_MEM_PRIV |
4446 GM45_ERROR_CP_PRIV |
4447 I915_ERROR_MEMORY_REFRESH);
4449 error_mask = ~(I915_ERROR_PAGE_TABLE |
4450 I915_ERROR_MEMORY_REFRESH);
4452 I915_WRITE(EMR, error_mask);
4454 I915_WRITE(IMR, dev_priv->irq_mask);
4455 I915_WRITE(IER, enable_mask);
4458 I915_WRITE(PORT_HOTPLUG_EN, 0);
4459 POSTING_READ(PORT_HOTPLUG_EN);
4461 i915_enable_asle_pipestat(dev);
4466 static void i915_hpd_irq_setup(struct drm_device *dev)
4468 struct drm_i915_private *dev_priv = dev->dev_private;
4469 struct intel_encoder *intel_encoder;
4472 assert_spin_locked(&dev_priv->irq_lock);
4474 if (I915_HAS_HOTPLUG(dev)) {
4475 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4476 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4477 /* Note HDMI and DP share hotplug bits */
4478 /* enable bits are the same for all generations */
4479 for_each_intel_encoder(dev, intel_encoder)
4480 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4481 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4482 /* Programming the CRT detection parameters tends
4483 to generate a spurious hotplug event about three
4484 seconds later. So just do it once.
4487 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4488 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4489 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4491 /* Ignore TV since it's buggy */
4492 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4496 static irqreturn_t i965_irq_handler(int irq, void *arg)
4498 struct drm_device *dev = arg;
4499 struct drm_i915_private *dev_priv = dev->dev_private;
4501 u32 pipe_stats[I915_MAX_PIPES];
4502 unsigned long irqflags;
4503 int ret = IRQ_NONE, pipe;
4505 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4506 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4508 iir = I915_READ(IIR);
4511 bool irq_received = (iir & ~flip_mask) != 0;
4512 bool blc_event = false;
4514 /* Can't rely on pipestat interrupt bit in iir as it might
4515 * have been cleared after the pipestat interrupt was received.
4516 * It doesn't set the bit in iir again, but it still produces
4517 * interrupts (for non-MSI).
4519 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4520 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4521 i915_handle_error(dev, false,
4522 "Command parser error, iir 0x%08x",
4525 for_each_pipe(pipe) {
4526 int reg = PIPESTAT(pipe);
4527 pipe_stats[pipe] = I915_READ(reg);
4530 * Clear the PIPE*STAT regs before the IIR
4532 if (pipe_stats[pipe] & 0x8000ffff) {
4533 I915_WRITE(reg, pipe_stats[pipe]);
4534 irq_received = true;
4537 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4544 /* Consume port. Then clear IIR or we'll miss events */
4545 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4546 i9xx_hpd_irq_handler(dev);
4548 I915_WRITE(IIR, iir & ~flip_mask);
4549 new_iir = I915_READ(IIR); /* Flush posted writes */
4551 if (iir & I915_USER_INTERRUPT)
4552 notify_ring(dev, &dev_priv->ring[RCS]);
4553 if (iir & I915_BSD_USER_INTERRUPT)
4554 notify_ring(dev, &dev_priv->ring[VCS]);
4556 for_each_pipe(pipe) {
4557 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4558 i915_handle_vblank(dev, pipe, pipe, iir))
4559 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4561 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4564 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4565 i9xx_pipe_crc_irq_handler(dev, pipe);
4567 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4568 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4569 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4572 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4573 intel_opregion_asle_intr(dev);
4575 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4576 gmbus_irq_handler(dev);
4578 /* With MSI, interrupts are only generated when iir
4579 * transitions from zero to nonzero. If another bit got
4580 * set while we were handling the existing iir bits, then
4581 * we would never get another interrupt.
4583 * This is fine on non-MSI as well, as if we hit this path
4584 * we avoid exiting the interrupt handler only to generate
4587 * Note that for MSI this could cause a stray interrupt report
4588 * if an interrupt landed in the time between writing IIR and
4589 * the posting read. This should be rare enough to never
4590 * trigger the 99% of 100,000 interrupts test for disabling
4596 i915_update_dri1_breadcrumb(dev);
4601 static void i965_irq_uninstall(struct drm_device * dev)
4603 struct drm_i915_private *dev_priv = dev->dev_private;
4609 I915_WRITE(PORT_HOTPLUG_EN, 0);
4610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4612 I915_WRITE(HWSTAM, 0xffffffff);
4614 I915_WRITE(PIPESTAT(pipe), 0);
4615 I915_WRITE(IMR, 0xffffffff);
4616 I915_WRITE(IER, 0x0);
4619 I915_WRITE(PIPESTAT(pipe),
4620 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4621 I915_WRITE(IIR, I915_READ(IIR));
4624 static void intel_hpd_irq_reenable(struct work_struct *work)
4626 struct drm_i915_private *dev_priv =
4627 container_of(work, typeof(*dev_priv),
4628 hotplug_reenable_work.work);
4629 struct drm_device *dev = dev_priv->dev;
4630 struct drm_mode_config *mode_config = &dev->mode_config;
4631 unsigned long irqflags;
4634 intel_runtime_pm_get(dev_priv);
4636 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4637 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4638 struct drm_connector *connector;
4640 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4643 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4645 list_for_each_entry(connector, &mode_config->connector_list, head) {
4646 struct intel_connector *intel_connector = to_intel_connector(connector);
4648 if (intel_connector->encoder->hpd_pin == i) {
4649 if (connector->polled != intel_connector->polled)
4650 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4652 connector->polled = intel_connector->polled;
4653 if (!connector->polled)
4654 connector->polled = DRM_CONNECTOR_POLL_HPD;
4658 if (dev_priv->display.hpd_irq_setup)
4659 dev_priv->display.hpd_irq_setup(dev);
4660 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4662 intel_runtime_pm_put(dev_priv);
4665 void intel_irq_init(struct drm_device *dev)
4667 struct drm_i915_private *dev_priv = dev->dev_private;
4669 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4670 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4671 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4672 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4673 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4675 /* Let's track the enabled rps events */
4676 if (IS_VALLEYVIEW(dev))
4677 /* WaGsvRC0ResidenncyMethod:VLV */
4678 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4680 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4682 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4683 i915_hangcheck_elapsed,
4684 (unsigned long) dev);
4685 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4686 intel_hpd_irq_reenable);
4688 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4690 /* Haven't installed the IRQ handler yet */
4691 dev_priv->pm._irqs_disabled = true;
4694 dev->max_vblank_count = 0;
4695 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4696 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4697 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4698 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4700 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4701 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4704 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4705 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4706 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4709 if (IS_CHERRYVIEW(dev)) {
4710 dev->driver->irq_handler = cherryview_irq_handler;
4711 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4712 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4713 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4714 dev->driver->enable_vblank = valleyview_enable_vblank;
4715 dev->driver->disable_vblank = valleyview_disable_vblank;
4716 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4717 } else if (IS_VALLEYVIEW(dev)) {
4718 dev->driver->irq_handler = valleyview_irq_handler;
4719 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4720 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4721 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4722 dev->driver->enable_vblank = valleyview_enable_vblank;
4723 dev->driver->disable_vblank = valleyview_disable_vblank;
4724 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4725 } else if (IS_GEN8(dev)) {
4726 dev->driver->irq_handler = gen8_irq_handler;
4727 dev->driver->irq_preinstall = gen8_irq_reset;
4728 dev->driver->irq_postinstall = gen8_irq_postinstall;
4729 dev->driver->irq_uninstall = gen8_irq_uninstall;
4730 dev->driver->enable_vblank = gen8_enable_vblank;
4731 dev->driver->disable_vblank = gen8_disable_vblank;
4732 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4733 } else if (HAS_PCH_SPLIT(dev)) {
4734 dev->driver->irq_handler = ironlake_irq_handler;
4735 dev->driver->irq_preinstall = ironlake_irq_reset;
4736 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4737 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4738 dev->driver->enable_vblank = ironlake_enable_vblank;
4739 dev->driver->disable_vblank = ironlake_disable_vblank;
4740 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4742 if (INTEL_INFO(dev)->gen == 2) {
4743 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4744 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4745 dev->driver->irq_handler = i8xx_irq_handler;
4746 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4747 } else if (INTEL_INFO(dev)->gen == 3) {
4748 dev->driver->irq_preinstall = i915_irq_preinstall;
4749 dev->driver->irq_postinstall = i915_irq_postinstall;
4750 dev->driver->irq_uninstall = i915_irq_uninstall;
4751 dev->driver->irq_handler = i915_irq_handler;
4752 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4754 dev->driver->irq_preinstall = i965_irq_preinstall;
4755 dev->driver->irq_postinstall = i965_irq_postinstall;
4756 dev->driver->irq_uninstall = i965_irq_uninstall;
4757 dev->driver->irq_handler = i965_irq_handler;
4758 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4760 dev->driver->enable_vblank = i915_enable_vblank;
4761 dev->driver->disable_vblank = i915_disable_vblank;
4765 void intel_hpd_init(struct drm_device *dev)
4767 struct drm_i915_private *dev_priv = dev->dev_private;
4768 struct drm_mode_config *mode_config = &dev->mode_config;
4769 struct drm_connector *connector;
4770 unsigned long irqflags;
4773 for (i = 1; i < HPD_NUM_PINS; i++) {
4774 dev_priv->hpd_stats[i].hpd_cnt = 0;
4775 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4777 list_for_each_entry(connector, &mode_config->connector_list, head) {
4778 struct intel_connector *intel_connector = to_intel_connector(connector);
4779 connector->polled = intel_connector->polled;
4780 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4781 connector->polled = DRM_CONNECTOR_POLL_HPD;
4782 if (intel_connector->mst_port)
4783 connector->polled = DRM_CONNECTOR_POLL_HPD;
4786 /* Interrupt setup is already guaranteed to be single-threaded, this is
4787 * just to make the assert_spin_locked checks happy. */
4788 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4789 if (dev_priv->display.hpd_irq_setup)
4790 dev_priv->display.hpd_irq_setup(dev);
4791 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4794 /* Disable interrupts so we can allow runtime PM. */
4795 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4797 struct drm_i915_private *dev_priv = dev->dev_private;
4799 dev->driver->irq_uninstall(dev);
4800 dev_priv->pm._irqs_disabled = true;
4803 /* Restore interrupts so we can recover from runtime PM. */
4804 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4806 struct drm_i915_private *dev_priv = dev->dev_private;
4808 dev_priv->pm._irqs_disabled = false;
4809 dev->driver->irq_preinstall(dev);
4810 dev->driver->irq_postinstall(dev);