]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_uncore.c
Merge remote-tracking branch 'drm-intel/for-linux-next'
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43 static void
44 assert_device_not_suspended(struct drm_i915_private *dev_priv)
45 {
46         WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
47              "Device suspended\n");
48 }
49
50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
51 {
52         u32 gt_thread_status_mask;
53
54         if (IS_HASWELL(dev_priv->dev))
55                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
56         else
57                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
58
59         /* w/a for a sporadic read returning 0 by waiting for the GT
60          * thread to wake up.
61          */
62         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
63                 DRM_ERROR("GT thread status wait timed out\n");
64 }
65
66 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
67 {
68         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
69         /* something from same cacheline, but !FORCEWAKE */
70         __raw_posting_read(dev_priv, ECOBUS);
71 }
72
73 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
74                                                         int fw_engine)
75 {
76         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
77                             FORCEWAKE_ACK_TIMEOUT_MS))
78                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
79
80         __raw_i915_write32(dev_priv, FORCEWAKE, 1);
81         /* something from same cacheline, but !FORCEWAKE */
82         __raw_posting_read(dev_priv, ECOBUS);
83
84         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
85                             FORCEWAKE_ACK_TIMEOUT_MS))
86                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
87
88         /* WaRsForcewakeWaitTC0:snb */
89         __gen6_gt_wait_for_thread_c0(dev_priv);
90 }
91
92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
93 {
94         __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
95         /* something from same cacheline, but !FORCEWAKE_MT */
96         __raw_posting_read(dev_priv, ECOBUS);
97 }
98
99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
100                                                         int fw_engine)
101 {
102         u32 forcewake_ack;
103
104         if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
105                 forcewake_ack = FORCEWAKE_ACK_HSW;
106         else
107                 forcewake_ack = FORCEWAKE_MT_ACK;
108
109         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
110                             FORCEWAKE_ACK_TIMEOUT_MS))
111                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
112
113         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
114                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
115         /* something from same cacheline, but !FORCEWAKE_MT */
116         __raw_posting_read(dev_priv, ECOBUS);
117
118         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
119                             FORCEWAKE_ACK_TIMEOUT_MS))
120                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
121
122         /* WaRsForcewakeWaitTC0:ivb,hsw */
123         if (INTEL_INFO(dev_priv->dev)->gen < 8)
124                 __gen6_gt_wait_for_thread_c0(dev_priv);
125 }
126
127 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
128 {
129         u32 gtfifodbg;
130
131         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
132         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
133                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
134 }
135
136 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
137                                                         int fw_engine)
138 {
139         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
140         /* something from same cacheline, but !FORCEWAKE */
141         __raw_posting_read(dev_priv, ECOBUS);
142         gen6_gt_check_fifodbg(dev_priv);
143 }
144
145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
146                                                         int fw_engine)
147 {
148         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
149                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
150         /* something from same cacheline, but !FORCEWAKE_MT */
151         __raw_posting_read(dev_priv, ECOBUS);
152
153         if (IS_GEN7(dev_priv->dev))
154                 gen6_gt_check_fifodbg(dev_priv);
155 }
156
157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
158 {
159         int ret = 0;
160
161         /* On VLV, FIFO will be shared by both SW and HW.
162          * So, we need to read the FREE_ENTRIES everytime */
163         if (IS_VALLEYVIEW(dev_priv->dev))
164                 dev_priv->uncore.fifo_count =
165                         __raw_i915_read32(dev_priv, GTFIFOCTL) &
166                                                 GT_FIFO_FREE_ENTRIES_MASK;
167
168         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
169                 int loop = 500;
170                 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
171                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
172                         udelay(10);
173                         fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
174                 }
175                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
176                         ++ret;
177                 dev_priv->uncore.fifo_count = fifo;
178         }
179         dev_priv->uncore.fifo_count--;
180
181         return ret;
182 }
183
184 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
185 {
186         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
187                            _MASKED_BIT_DISABLE(0xffff));
188         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
189                            _MASKED_BIT_DISABLE(0xffff));
190         /* something from same cacheline, but !FORCEWAKE_VLV */
191         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
192 }
193
194 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
195                                                 int fw_engine)
196 {
197         /* Check for Render Engine */
198         if (FORCEWAKE_RENDER & fw_engine) {
199                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
200                                                 FORCEWAKE_ACK_VLV) &
201                                                 FORCEWAKE_KERNEL) == 0,
202                                         FORCEWAKE_ACK_TIMEOUT_MS))
203                         DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
204
205                 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
206                                    _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
207
208                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
209                                                 FORCEWAKE_ACK_VLV) &
210                                                 FORCEWAKE_KERNEL),
211                                         FORCEWAKE_ACK_TIMEOUT_MS))
212                         DRM_ERROR("Timed out: waiting for Render to ack.\n");
213         }
214
215         /* Check for Media Engine */
216         if (FORCEWAKE_MEDIA & fw_engine) {
217                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
218                                                 FORCEWAKE_ACK_MEDIA_VLV) &
219                                                 FORCEWAKE_KERNEL) == 0,
220                                         FORCEWAKE_ACK_TIMEOUT_MS))
221                         DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
222
223                 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
224                                    _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
225
226                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
227                                                 FORCEWAKE_ACK_MEDIA_VLV) &
228                                                 FORCEWAKE_KERNEL),
229                                         FORCEWAKE_ACK_TIMEOUT_MS))
230                         DRM_ERROR("Timed out: waiting for media to ack.\n");
231         }
232
233         /* WaRsForcewakeWaitTC0:vlv */
234         __gen6_gt_wait_for_thread_c0(dev_priv);
235
236 }
237
238 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
239                                         int fw_engine)
240 {
241
242         /* Check for Render Engine */
243         if (FORCEWAKE_RENDER & fw_engine)
244                 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
245                                         _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
246
247
248         /* Check for Media Engine */
249         if (FORCEWAKE_MEDIA & fw_engine)
250                 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
251                                 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
252
253         /* The below doubles as a POSTING_READ */
254         gen6_gt_check_fifodbg(dev_priv);
255
256 }
257
258 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
259 {
260         unsigned long irqflags;
261
262         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
263
264         if (fw_engine & FORCEWAKE_RENDER &&
265             dev_priv->uncore.fw_rendercount++ != 0)
266                 fw_engine &= ~FORCEWAKE_RENDER;
267         if (fw_engine & FORCEWAKE_MEDIA &&
268             dev_priv->uncore.fw_mediacount++ != 0)
269                 fw_engine &= ~FORCEWAKE_MEDIA;
270
271         if (fw_engine)
272                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
273
274         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
275 }
276
277 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
278 {
279         unsigned long irqflags;
280
281         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
282
283         if (fw_engine & FORCEWAKE_RENDER) {
284                 WARN_ON(!dev_priv->uncore.fw_rendercount);
285                 if (--dev_priv->uncore.fw_rendercount != 0)
286                         fw_engine &= ~FORCEWAKE_RENDER;
287         }
288
289         if (fw_engine & FORCEWAKE_MEDIA) {
290                 WARN_ON(!dev_priv->uncore.fw_mediacount);
291                 if (--dev_priv->uncore.fw_mediacount != 0)
292                         fw_engine &= ~FORCEWAKE_MEDIA;
293         }
294
295         if (fw_engine)
296                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
297
298         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
299 }
300
301 static void gen6_force_wake_timer(unsigned long arg)
302 {
303         struct drm_i915_private *dev_priv = (void *)arg;
304         unsigned long irqflags;
305
306         assert_device_not_suspended(dev_priv);
307
308         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309         WARN_ON(!dev_priv->uncore.forcewake_count);
310
311         if (--dev_priv->uncore.forcewake_count == 0)
312                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
313         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
314
315         intel_runtime_pm_put(dev_priv);
316 }
317
318 static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
319 {
320         struct drm_i915_private *dev_priv = dev->dev_private;
321         unsigned long irqflags;
322
323         del_timer_sync(&dev_priv->uncore.force_wake_timer);
324
325         /* Hold uncore.lock across reset to prevent any register access
326          * with forcewake not set correctly
327          */
328         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
329
330         if (IS_VALLEYVIEW(dev))
331                 vlv_force_wake_reset(dev_priv);
332         else if (IS_GEN6(dev) || IS_GEN7(dev))
333                 __gen6_gt_force_wake_reset(dev_priv);
334
335         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
336                 __gen7_gt_force_wake_mt_reset(dev_priv);
337
338         if (restore) { /* If reset with a user forcewake, try to restore */
339                 unsigned fw = 0;
340
341                 if (IS_VALLEYVIEW(dev)) {
342                         if (dev_priv->uncore.fw_rendercount)
343                                 fw |= FORCEWAKE_RENDER;
344
345                         if (dev_priv->uncore.fw_mediacount)
346                                 fw |= FORCEWAKE_MEDIA;
347                 } else {
348                         if (dev_priv->uncore.forcewake_count)
349                                 fw = FORCEWAKE_ALL;
350                 }
351
352                 if (fw)
353                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
354
355                 if (IS_GEN6(dev) || IS_GEN7(dev))
356                         dev_priv->uncore.fifo_count =
357                                 __raw_i915_read32(dev_priv, GTFIFOCTL) &
358                                 GT_FIFO_FREE_ENTRIES_MASK;
359         } else {
360                 dev_priv->uncore.forcewake_count = 0;
361                 dev_priv->uncore.fw_rendercount = 0;
362                 dev_priv->uncore.fw_mediacount = 0;
363         }
364
365         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
366 }
367
368 void intel_uncore_early_sanitize(struct drm_device *dev)
369 {
370         struct drm_i915_private *dev_priv = dev->dev_private;
371
372         if (HAS_FPGA_DBG_UNCLAIMED(dev))
373                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
374
375         if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
376             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
377                 /* The docs do not explain exactly how the calculation can be
378                  * made. It is somewhat guessable, but for now, it's always
379                  * 128MB.
380                  * NB: We can't write IDICR yet because we do not have gt funcs
381                  * set up */
382                 dev_priv->ellc_size = 128;
383                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
384         }
385
386         /* clear out old GT FIFO errors */
387         if (IS_GEN6(dev) || IS_GEN7(dev))
388                 __raw_i915_write32(dev_priv, GTFIFODBG,
389                                    __raw_i915_read32(dev_priv, GTFIFODBG));
390
391         intel_uncore_forcewake_reset(dev, false);
392 }
393
394 void intel_uncore_sanitize(struct drm_device *dev)
395 {
396         struct drm_i915_private *dev_priv = dev->dev_private;
397         u32 reg_val;
398
399         /* BIOS often leaves RC6 enabled, but disable it for hw init */
400         intel_disable_gt_powersave(dev);
401
402         /* Turn off power gate, require especially for the BIOS less system */
403         if (IS_VALLEYVIEW(dev)) {
404
405                 mutex_lock(&dev_priv->rps.hw_lock);
406                 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
407
408                 if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
409                                PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
410                                PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
411                         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
412
413                 mutex_unlock(&dev_priv->rps.hw_lock);
414
415         }
416 }
417
418 /*
419  * Generally this is called implicitly by the register read function. However,
420  * if some sequence requires the GT to not power down then this function should
421  * be called at the beginning of the sequence followed by a call to
422  * gen6_gt_force_wake_put() at the end of the sequence.
423  */
424 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
425 {
426         unsigned long irqflags;
427
428         if (!dev_priv->uncore.funcs.force_wake_get)
429                 return;
430
431         intel_runtime_pm_get(dev_priv);
432
433         /* Redirect to VLV specific routine */
434         if (IS_VALLEYVIEW(dev_priv->dev))
435                 return vlv_force_wake_get(dev_priv, fw_engine);
436
437         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
438         if (dev_priv->uncore.forcewake_count++ == 0)
439                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
440         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
441 }
442
443 /*
444  * see gen6_gt_force_wake_get()
445  */
446 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
447 {
448         unsigned long irqflags;
449         bool delayed = false;
450
451         if (!dev_priv->uncore.funcs.force_wake_put)
452                 return;
453
454         /* Redirect to VLV specific routine */
455         if (IS_VALLEYVIEW(dev_priv->dev)) {
456                 vlv_force_wake_put(dev_priv, fw_engine);
457                 goto out;
458         }
459
460
461         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
462         WARN_ON(!dev_priv->uncore.forcewake_count);
463
464         if (--dev_priv->uncore.forcewake_count == 0) {
465                 dev_priv->uncore.forcewake_count++;
466                 delayed = true;
467                 mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
468                                  jiffies + 1);
469         }
470         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
471
472 out:
473         if (!delayed)
474                 intel_runtime_pm_put(dev_priv);
475 }
476
477 void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
478 {
479         if (!dev_priv->uncore.funcs.force_wake_get)
480                 return;
481
482         WARN_ON(dev_priv->uncore.forcewake_count > 0);
483 }
484
485 /* We give fast paths for the really cool registers */
486 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
487          ((reg) < 0x40000 && (reg) != FORCEWAKE)
488
489 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
490         (((reg) >= 0x2000 && (reg) < 0x4000) ||\
491         ((reg) >= 0x5000 && (reg) < 0x8000) ||\
492         ((reg) >= 0xB000 && (reg) < 0x12000) ||\
493         ((reg) >= 0x2E000 && (reg) < 0x30000))
494
495 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
496         (((reg) >= 0x12000 && (reg) < 0x14000) ||\
497         ((reg) >= 0x22000 && (reg) < 0x24000) ||\
498         ((reg) >= 0x30000 && (reg) < 0x40000))
499
500 static void
501 ilk_dummy_write(struct drm_i915_private *dev_priv)
502 {
503         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
504          * the chip from rc6 before touching it for real. MI_MODE is masked,
505          * hence harmless to write 0 into. */
506         __raw_i915_write32(dev_priv, MI_MODE, 0);
507 }
508
509 static void
510 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
511 {
512         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
513                 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
514                           reg);
515                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
516         }
517 }
518
519 static void
520 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
521 {
522         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
523                 DRM_ERROR("Unclaimed write to %x\n", reg);
524                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
525         }
526 }
527
528 #define REG_READ_HEADER(x) \
529         unsigned long irqflags; \
530         u##x val = 0; \
531         assert_device_not_suspended(dev_priv); \
532         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
533
534 #define REG_READ_FOOTER \
535         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
536         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
537         return val
538
539 #define __gen4_read(x) \
540 static u##x \
541 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
542         REG_READ_HEADER(x); \
543         val = __raw_i915_read##x(dev_priv, reg); \
544         REG_READ_FOOTER; \
545 }
546
547 #define __gen5_read(x) \
548 static u##x \
549 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
550         REG_READ_HEADER(x); \
551         ilk_dummy_write(dev_priv); \
552         val = __raw_i915_read##x(dev_priv, reg); \
553         REG_READ_FOOTER; \
554 }
555
556 #define __gen6_read(x) \
557 static u##x \
558 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
559         REG_READ_HEADER(x); \
560         if (dev_priv->uncore.forcewake_count == 0 && \
561             NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
562                 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
563                                                       FORCEWAKE_ALL); \
564                 val = __raw_i915_read##x(dev_priv, reg); \
565                 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
566                                                       FORCEWAKE_ALL); \
567         } else { \
568                 val = __raw_i915_read##x(dev_priv, reg); \
569         } \
570         REG_READ_FOOTER; \
571 }
572
573 #define __vlv_read(x) \
574 static u##x \
575 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
576         unsigned fwengine = 0; \
577         REG_READ_HEADER(x); \
578         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
579                 if (dev_priv->uncore.fw_rendercount == 0) \
580                         fwengine = FORCEWAKE_RENDER; \
581         } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
582                 if (dev_priv->uncore.fw_mediacount == 0) \
583                         fwengine = FORCEWAKE_MEDIA; \
584         }  \
585         if (fwengine) \
586                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
587         val = __raw_i915_read##x(dev_priv, reg); \
588         if (fwengine) \
589                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
590         REG_READ_FOOTER; \
591 }
592
593
594 __vlv_read(8)
595 __vlv_read(16)
596 __vlv_read(32)
597 __vlv_read(64)
598 __gen6_read(8)
599 __gen6_read(16)
600 __gen6_read(32)
601 __gen6_read(64)
602 __gen5_read(8)
603 __gen5_read(16)
604 __gen5_read(32)
605 __gen5_read(64)
606 __gen4_read(8)
607 __gen4_read(16)
608 __gen4_read(32)
609 __gen4_read(64)
610
611 #undef __vlv_read
612 #undef __gen6_read
613 #undef __gen5_read
614 #undef __gen4_read
615 #undef REG_READ_FOOTER
616 #undef REG_READ_HEADER
617
618 #define REG_WRITE_HEADER \
619         unsigned long irqflags; \
620         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
621         assert_device_not_suspended(dev_priv); \
622         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
623
624 #define REG_WRITE_FOOTER \
625         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
626
627 #define __gen4_write(x) \
628 static void \
629 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
630         REG_WRITE_HEADER; \
631         __raw_i915_write##x(dev_priv, reg, val); \
632         REG_WRITE_FOOTER; \
633 }
634
635 #define __gen5_write(x) \
636 static void \
637 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
638         REG_WRITE_HEADER; \
639         ilk_dummy_write(dev_priv); \
640         __raw_i915_write##x(dev_priv, reg, val); \
641         REG_WRITE_FOOTER; \
642 }
643
644 #define __gen6_write(x) \
645 static void \
646 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
647         u32 __fifo_ret = 0; \
648         REG_WRITE_HEADER; \
649         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
650                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
651         } \
652         __raw_i915_write##x(dev_priv, reg, val); \
653         if (unlikely(__fifo_ret)) { \
654                 gen6_gt_check_fifodbg(dev_priv); \
655         } \
656         REG_WRITE_FOOTER; \
657 }
658
659 #define __hsw_write(x) \
660 static void \
661 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
662         u32 __fifo_ret = 0; \
663         REG_WRITE_HEADER; \
664         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
665                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
666         } \
667         hsw_unclaimed_reg_clear(dev_priv, reg); \
668         __raw_i915_write##x(dev_priv, reg, val); \
669         if (unlikely(__fifo_ret)) { \
670                 gen6_gt_check_fifodbg(dev_priv); \
671         } \
672         hsw_unclaimed_reg_check(dev_priv, reg); \
673         REG_WRITE_FOOTER; \
674 }
675
676 static const u32 gen8_shadowed_regs[] = {
677         FORCEWAKE_MT,
678         GEN6_RPNSWREQ,
679         GEN6_RC_VIDEO_FREQ,
680         RING_TAIL(RENDER_RING_BASE),
681         RING_TAIL(GEN6_BSD_RING_BASE),
682         RING_TAIL(VEBOX_RING_BASE),
683         RING_TAIL(BLT_RING_BASE),
684         /* TODO: Other registers are not yet used */
685 };
686
687 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
688 {
689         int i;
690         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
691                 if (reg == gen8_shadowed_regs[i])
692                         return true;
693
694         return false;
695 }
696
697 #define __gen8_write(x) \
698 static void \
699 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
700         REG_WRITE_HEADER; \
701         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
702                 if (dev_priv->uncore.forcewake_count == 0) \
703                         dev_priv->uncore.funcs.force_wake_get(dev_priv, \
704                                                               FORCEWAKE_ALL); \
705                 __raw_i915_write##x(dev_priv, reg, val); \
706                 if (dev_priv->uncore.forcewake_count == 0) \
707                         dev_priv->uncore.funcs.force_wake_put(dev_priv, \
708                                                               FORCEWAKE_ALL); \
709         } else { \
710                 __raw_i915_write##x(dev_priv, reg, val); \
711         } \
712         REG_WRITE_FOOTER; \
713 }
714
715 __gen8_write(8)
716 __gen8_write(16)
717 __gen8_write(32)
718 __gen8_write(64)
719 __hsw_write(8)
720 __hsw_write(16)
721 __hsw_write(32)
722 __hsw_write(64)
723 __gen6_write(8)
724 __gen6_write(16)
725 __gen6_write(32)
726 __gen6_write(64)
727 __gen5_write(8)
728 __gen5_write(16)
729 __gen5_write(32)
730 __gen5_write(64)
731 __gen4_write(8)
732 __gen4_write(16)
733 __gen4_write(32)
734 __gen4_write(64)
735
736 #undef __gen8_write
737 #undef __hsw_write
738 #undef __gen6_write
739 #undef __gen5_write
740 #undef __gen4_write
741 #undef REG_WRITE_FOOTER
742 #undef REG_WRITE_HEADER
743
744 void intel_uncore_init(struct drm_device *dev)
745 {
746         struct drm_i915_private *dev_priv = dev->dev_private;
747
748         setup_timer(&dev_priv->uncore.force_wake_timer,
749                     gen6_force_wake_timer, (unsigned long)dev_priv);
750
751         intel_uncore_early_sanitize(dev);
752
753         if (IS_VALLEYVIEW(dev)) {
754                 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
755                 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
756         } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
757                 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
758                 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
759         } else if (IS_IVYBRIDGE(dev)) {
760                 u32 ecobus;
761
762                 /* IVB configs may use multi-threaded forcewake */
763
764                 /* A small trick here - if the bios hasn't configured
765                  * MT forcewake, and if the device is in RC6, then
766                  * force_wake_mt_get will not wake the device and the
767                  * ECOBUS read will return zero. Which will be
768                  * (correctly) interpreted by the test below as MT
769                  * forcewake being disabled.
770                  */
771                 mutex_lock(&dev->struct_mutex);
772                 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
773                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
774                 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
775                 mutex_unlock(&dev->struct_mutex);
776
777                 if (ecobus & FORCEWAKE_MT_ENABLE) {
778                         dev_priv->uncore.funcs.force_wake_get =
779                                 __gen7_gt_force_wake_mt_get;
780                         dev_priv->uncore.funcs.force_wake_put =
781                                 __gen7_gt_force_wake_mt_put;
782                 } else {
783                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
784                         DRM_INFO("when using vblank-synced partial screen updates.\n");
785                         dev_priv->uncore.funcs.force_wake_get =
786                                 __gen6_gt_force_wake_get;
787                         dev_priv->uncore.funcs.force_wake_put =
788                                 __gen6_gt_force_wake_put;
789                 }
790         } else if (IS_GEN6(dev)) {
791                 dev_priv->uncore.funcs.force_wake_get =
792                         __gen6_gt_force_wake_get;
793                 dev_priv->uncore.funcs.force_wake_put =
794                         __gen6_gt_force_wake_put;
795         }
796
797         switch (INTEL_INFO(dev)->gen) {
798         default:
799                 dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
800                 dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
801                 dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
802                 dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
803                 dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
804                 dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
805                 dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
806                 dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
807                 break;
808         case 7:
809         case 6:
810                 if (IS_HASWELL(dev)) {
811                         dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
812                         dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
813                         dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
814                         dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
815                 } else {
816                         dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
817                         dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
818                         dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
819                         dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
820                 }
821
822                 if (IS_VALLEYVIEW(dev)) {
823                         dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
824                         dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
825                         dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
826                         dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
827                 } else {
828                         dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
829                         dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
830                         dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
831                         dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
832                 }
833                 break;
834         case 5:
835                 dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
836                 dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
837                 dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
838                 dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
839                 dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
840                 dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
841                 dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
842                 dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
843                 break;
844         case 4:
845         case 3:
846         case 2:
847                 dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
848                 dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
849                 dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
850                 dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
851                 dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
852                 dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
853                 dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
854                 dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
855                 break;
856         }
857 }
858
859 void intel_uncore_fini(struct drm_device *dev)
860 {
861         /* Paranoia: make sure we have disabled everything before we exit. */
862         intel_uncore_sanitize(dev);
863         intel_uncore_forcewake_reset(dev, false);
864 }
865
866 #define GEN_RANGE(l, h) GENMASK(h, l)
867
868 static const struct register_whitelist {
869         uint64_t offset;
870         uint32_t size;
871         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
872         uint32_t gen_bitmask;
873 } whitelist[] = {
874         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
875 };
876
877 int i915_reg_read_ioctl(struct drm_device *dev,
878                         void *data, struct drm_file *file)
879 {
880         struct drm_i915_private *dev_priv = dev->dev_private;
881         struct drm_i915_reg_read *reg = data;
882         struct register_whitelist const *entry = whitelist;
883         int i, ret = 0;
884
885         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
886                 if (entry->offset == reg->offset &&
887                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
888                         break;
889         }
890
891         if (i == ARRAY_SIZE(whitelist))
892                 return -EINVAL;
893
894         intel_runtime_pm_get(dev_priv);
895
896         switch (entry->size) {
897         case 8:
898                 reg->val = I915_READ64(reg->offset);
899                 break;
900         case 4:
901                 reg->val = I915_READ(reg->offset);
902                 break;
903         case 2:
904                 reg->val = I915_READ16(reg->offset);
905                 break;
906         case 1:
907                 reg->val = I915_READ8(reg->offset);
908                 break;
909         default:
910                 WARN_ON(1);
911                 ret = -EINVAL;
912                 goto out;
913         }
914
915 out:
916         intel_runtime_pm_put(dev_priv);
917         return ret;
918 }
919
920 int i915_get_reset_stats_ioctl(struct drm_device *dev,
921                                void *data, struct drm_file *file)
922 {
923         struct drm_i915_private *dev_priv = dev->dev_private;
924         struct drm_i915_reset_stats *args = data;
925         struct i915_ctx_hang_stats *hs;
926         struct i915_hw_context *ctx;
927         int ret;
928
929         if (args->flags || args->pad)
930                 return -EINVAL;
931
932         if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
933                 return -EPERM;
934
935         ret = mutex_lock_interruptible(&dev->struct_mutex);
936         if (ret)
937                 return ret;
938
939         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
940         if (IS_ERR(ctx)) {
941                 mutex_unlock(&dev->struct_mutex);
942                 return PTR_ERR(ctx);
943         }
944         hs = &ctx->hang_stats;
945
946         if (capable(CAP_SYS_ADMIN))
947                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
948         else
949                 args->reset_count = 0;
950
951         args->batch_active = hs->batch_active;
952         args->batch_pending = hs->batch_pending;
953
954         mutex_unlock(&dev->struct_mutex);
955
956         return 0;
957 }
958
959 static int i965_reset_complete(struct drm_device *dev)
960 {
961         u8 gdrst;
962         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
963         return (gdrst & GRDOM_RESET_ENABLE) == 0;
964 }
965
966 static int i965_do_reset(struct drm_device *dev)
967 {
968         int ret;
969
970         /*
971          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
972          * well as the reset bit (GR/bit 0).  Setting the GR bit
973          * triggers the reset; when done, the hardware will clear it.
974          */
975         pci_write_config_byte(dev->pdev, I965_GDRST,
976                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
977         ret =  wait_for(i965_reset_complete(dev), 500);
978         if (ret)
979                 return ret;
980
981         pci_write_config_byte(dev->pdev, I965_GDRST,
982                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
983
984         ret =  wait_for(i965_reset_complete(dev), 500);
985         if (ret)
986                 return ret;
987
988         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
989
990         return 0;
991 }
992
993 static int ironlake_do_reset(struct drm_device *dev)
994 {
995         struct drm_i915_private *dev_priv = dev->dev_private;
996         int ret;
997
998         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
999                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1000         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1001                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1002         if (ret)
1003                 return ret;
1004
1005         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1006                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1007         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1008                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1009         if (ret)
1010                 return ret;
1011
1012         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1013
1014         return 0;
1015 }
1016
1017 static int gen6_do_reset(struct drm_device *dev)
1018 {
1019         struct drm_i915_private *dev_priv = dev->dev_private;
1020         int     ret;
1021
1022         /* Reset the chip */
1023
1024         /* GEN6_GDRST is not in the gt power well, no need to check
1025          * for fifo space for the write or forcewake the chip for
1026          * the read
1027          */
1028         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1029
1030         /* Spin waiting for the device to ack the reset request */
1031         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1032
1033         intel_uncore_forcewake_reset(dev, true);
1034
1035         return ret;
1036 }
1037
1038 int intel_gpu_reset(struct drm_device *dev)
1039 {
1040         switch (INTEL_INFO(dev)->gen) {
1041         case 8:
1042         case 7:
1043         case 6: return gen6_do_reset(dev);
1044         case 5: return ironlake_do_reset(dev);
1045         case 4: return i965_do_reset(dev);
1046         default: return -ENODEV;
1047         }
1048 }
1049
1050 void intel_uncore_check_errors(struct drm_device *dev)
1051 {
1052         struct drm_i915_private *dev_priv = dev->dev_private;
1053
1054         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1055             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1056                 DRM_ERROR("Unclaimed register before interrupt\n");
1057                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1058         }
1059 }