]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_dma.c
Merge tag 'gpio-v3.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
45 #include <linux/pm.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/oom.h>
48
49 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
50
51 #define BEGIN_LP_RING(n) \
52         intel_ring_begin(LP_RING(dev_priv), (n))
53
54 #define OUT_RING(x) \
55         intel_ring_emit(LP_RING(dev_priv), x)
56
57 #define ADVANCE_LP_RING() \
58         __intel_ring_advance(LP_RING(dev_priv))
59
60 /**
61  * Lock test for when it's just for synchronization of ring access.
62  *
63  * In that case, we don't need to do it when GEM is initialized as nobody else
64  * has access to the ring.
65  */
66 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                      \
67         if (LP_RING(dev->dev_private)->buffer->obj == NULL)                     \
68                 LOCK_TEST_WITH_RETURN(dev, file);                       \
69 } while (0)
70
71 static inline u32
72 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
73 {
74         if (I915_NEED_GFX_HWS(dev_priv->dev))
75                 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
76         else
77                 return intel_read_status_page(LP_RING(dev_priv), reg);
78 }
79
80 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
81 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
82 #define I915_BREADCRUMB_INDEX           0x21
83
84 void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 {
86         struct drm_i915_private *dev_priv = dev->dev_private;
87         struct drm_i915_master_private *master_priv;
88
89         /*
90          * The dri breadcrumb update races against the drm master disappearing.
91          * Instead of trying to fix this (this is by far not the only ums issue)
92          * just don't do the update in kms mode.
93          */
94         if (drm_core_check_feature(dev, DRIVER_MODESET))
95                 return;
96
97         if (dev->primary->master) {
98                 master_priv = dev->primary->master->driver_priv;
99                 if (master_priv->sarea_priv)
100                         master_priv->sarea_priv->last_dispatch =
101                                 READ_BREADCRUMB(dev_priv);
102         }
103 }
104
105 static void i915_write_hws_pga(struct drm_device *dev)
106 {
107         struct drm_i915_private *dev_priv = dev->dev_private;
108         u32 addr;
109
110         addr = dev_priv->status_page_dmah->busaddr;
111         if (INTEL_INFO(dev)->gen >= 4)
112                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
113         I915_WRITE(HWS_PGA, addr);
114 }
115
116 /**
117  * Frees the hardware status page, whether it's a physical address or a virtual
118  * address set up by the X Server.
119  */
120 static void i915_free_hws(struct drm_device *dev)
121 {
122         struct drm_i915_private *dev_priv = dev->dev_private;
123         struct intel_engine_cs *ring = LP_RING(dev_priv);
124
125         if (dev_priv->status_page_dmah) {
126                 drm_pci_free(dev, dev_priv->status_page_dmah);
127                 dev_priv->status_page_dmah = NULL;
128         }
129
130         if (ring->status_page.gfx_addr) {
131                 ring->status_page.gfx_addr = 0;
132                 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
133         }
134
135         /* Need to rewrite hardware status page */
136         I915_WRITE(HWS_PGA, 0x1ffff000);
137 }
138
139 void i915_kernel_lost_context(struct drm_device * dev)
140 {
141         struct drm_i915_private *dev_priv = dev->dev_private;
142         struct drm_i915_master_private *master_priv;
143         struct intel_engine_cs *ring = LP_RING(dev_priv);
144         struct intel_ringbuffer *ringbuf = ring->buffer;
145
146         /*
147          * We should never lose context on the ring with modesetting
148          * as we don't expose it to userspace
149          */
150         if (drm_core_check_feature(dev, DRIVER_MODESET))
151                 return;
152
153         ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
154         ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
155         ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
156         if (ringbuf->space < 0)
157                 ringbuf->space += ringbuf->size;
158
159         if (!dev->primary->master)
160                 return;
161
162         master_priv = dev->primary->master->driver_priv;
163         if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
164                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
165 }
166
167 static int i915_dma_cleanup(struct drm_device * dev)
168 {
169         struct drm_i915_private *dev_priv = dev->dev_private;
170         int i;
171
172         /* Make sure interrupts are disabled here because the uninstall ioctl
173          * may not have been called from userspace and after dev_private
174          * is freed, it's too late.
175          */
176         if (dev->irq_enabled)
177                 drm_irq_uninstall(dev);
178
179         mutex_lock(&dev->struct_mutex);
180         for (i = 0; i < I915_NUM_RINGS; i++)
181                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
182         mutex_unlock(&dev->struct_mutex);
183
184         /* Clear the HWS virtual address at teardown */
185         if (I915_NEED_GFX_HWS(dev))
186                 i915_free_hws(dev);
187
188         return 0;
189 }
190
191 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
192 {
193         struct drm_i915_private *dev_priv = dev->dev_private;
194         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
195         int ret;
196
197         master_priv->sarea = drm_getsarea(dev);
198         if (master_priv->sarea) {
199                 master_priv->sarea_priv = (drm_i915_sarea_t *)
200                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
201         } else {
202                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
203         }
204
205         if (init->ring_size != 0) {
206                 if (LP_RING(dev_priv)->buffer->obj != NULL) {
207                         i915_dma_cleanup(dev);
208                         DRM_ERROR("Client tried to initialize ringbuffer in "
209                                   "GEM mode\n");
210                         return -EINVAL;
211                 }
212
213                 ret = intel_render_ring_init_dri(dev,
214                                                  init->ring_start,
215                                                  init->ring_size);
216                 if (ret) {
217                         i915_dma_cleanup(dev);
218                         return ret;
219                 }
220         }
221
222         dev_priv->dri1.cpp = init->cpp;
223         dev_priv->dri1.back_offset = init->back_offset;
224         dev_priv->dri1.front_offset = init->front_offset;
225         dev_priv->dri1.current_page = 0;
226         if (master_priv->sarea_priv)
227                 master_priv->sarea_priv->pf_current_page = 0;
228
229         /* Allow hardware batchbuffers unless told otherwise.
230          */
231         dev_priv->dri1.allow_batchbuffer = 1;
232
233         return 0;
234 }
235
236 static int i915_dma_resume(struct drm_device * dev)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         struct intel_engine_cs *ring = LP_RING(dev_priv);
240
241         DRM_DEBUG_DRIVER("%s\n", __func__);
242
243         if (ring->buffer->virtual_start == NULL) {
244                 DRM_ERROR("can not ioremap virtual address for"
245                           " ring buffer\n");
246                 return -ENOMEM;
247         }
248
249         /* Program Hardware Status Page */
250         if (!ring->status_page.page_addr) {
251                 DRM_ERROR("Can not find hardware status page\n");
252                 return -EINVAL;
253         }
254         DRM_DEBUG_DRIVER("hw status page @ %p\n",
255                                 ring->status_page.page_addr);
256         if (ring->status_page.gfx_addr != 0)
257                 intel_ring_setup_status_page(ring);
258         else
259                 i915_write_hws_pga(dev);
260
261         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
262
263         return 0;
264 }
265
266 static int i915_dma_init(struct drm_device *dev, void *data,
267                          struct drm_file *file_priv)
268 {
269         drm_i915_init_t *init = data;
270         int retcode = 0;
271
272         if (drm_core_check_feature(dev, DRIVER_MODESET))
273                 return -ENODEV;
274
275         switch (init->func) {
276         case I915_INIT_DMA:
277                 retcode = i915_initialize(dev, init);
278                 break;
279         case I915_CLEANUP_DMA:
280                 retcode = i915_dma_cleanup(dev);
281                 break;
282         case I915_RESUME_DMA:
283                 retcode = i915_dma_resume(dev);
284                 break;
285         default:
286                 retcode = -EINVAL;
287                 break;
288         }
289
290         return retcode;
291 }
292
293 /* Implement basically the same security restrictions as hardware does
294  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
295  *
296  * Most of the calculations below involve calculating the size of a
297  * particular instruction.  It's important to get the size right as
298  * that tells us where the next instruction to check is.  Any illegal
299  * instruction detected will be given a size of zero, which is a
300  * signal to abort the rest of the buffer.
301  */
302 static int validate_cmd(int cmd)
303 {
304         switch (((cmd >> 29) & 0x7)) {
305         case 0x0:
306                 switch ((cmd >> 23) & 0x3f) {
307                 case 0x0:
308                         return 1;       /* MI_NOOP */
309                 case 0x4:
310                         return 1;       /* MI_FLUSH */
311                 default:
312                         return 0;       /* disallow everything else */
313                 }
314                 break;
315         case 0x1:
316                 return 0;       /* reserved */
317         case 0x2:
318                 return (cmd & 0xff) + 2;        /* 2d commands */
319         case 0x3:
320                 if (((cmd >> 24) & 0x1f) <= 0x18)
321                         return 1;
322
323                 switch ((cmd >> 24) & 0x1f) {
324                 case 0x1c:
325                         return 1;
326                 case 0x1d:
327                         switch ((cmd >> 16) & 0xff) {
328                         case 0x3:
329                                 return (cmd & 0x1f) + 2;
330                         case 0x4:
331                                 return (cmd & 0xf) + 2;
332                         default:
333                                 return (cmd & 0xffff) + 2;
334                         }
335                 case 0x1e:
336                         if (cmd & (1 << 23))
337                                 return (cmd & 0xffff) + 1;
338                         else
339                                 return 1;
340                 case 0x1f:
341                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
342                                 return (cmd & 0x1ffff) + 2;
343                         else if (cmd & (1 << 17))       /* indirect random */
344                                 if ((cmd & 0xffff) == 0)
345                                         return 0;       /* unknown length, too hard */
346                                 else
347                                         return (((cmd & 0xffff) + 1) / 2) + 1;
348                         else
349                                 return 2;       /* indirect sequential */
350                 default:
351                         return 0;
352                 }
353         default:
354                 return 0;
355         }
356
357         return 0;
358 }
359
360 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
361 {
362         struct drm_i915_private *dev_priv = dev->dev_private;
363         int i, ret;
364
365         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
366                 return -EINVAL;
367
368         for (i = 0; i < dwords;) {
369                 int sz = validate_cmd(buffer[i]);
370                 if (sz == 0 || i + sz > dwords)
371                         return -EINVAL;
372                 i += sz;
373         }
374
375         ret = BEGIN_LP_RING((dwords+1)&~1);
376         if (ret)
377                 return ret;
378
379         for (i = 0; i < dwords; i++)
380                 OUT_RING(buffer[i]);
381         if (dwords & 1)
382                 OUT_RING(0);
383
384         ADVANCE_LP_RING();
385
386         return 0;
387 }
388
389 int
390 i915_emit_box(struct drm_device *dev,
391               struct drm_clip_rect *box,
392               int DR1, int DR4)
393 {
394         struct drm_i915_private *dev_priv = dev->dev_private;
395         int ret;
396
397         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
398             box->y2 <= 0 || box->x2 <= 0) {
399                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
400                           box->x1, box->y1, box->x2, box->y2);
401                 return -EINVAL;
402         }
403
404         if (INTEL_INFO(dev)->gen >= 4) {
405                 ret = BEGIN_LP_RING(4);
406                 if (ret)
407                         return ret;
408
409                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
410                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
411                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
412                 OUT_RING(DR4);
413         } else {
414                 ret = BEGIN_LP_RING(6);
415                 if (ret)
416                         return ret;
417
418                 OUT_RING(GFX_OP_DRAWRECT_INFO);
419                 OUT_RING(DR1);
420                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
421                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
422                 OUT_RING(DR4);
423                 OUT_RING(0);
424         }
425         ADVANCE_LP_RING();
426
427         return 0;
428 }
429
430 /* XXX: Emitting the counter should really be moved to part of the IRQ
431  * emit. For now, do it in both places:
432  */
433
434 static void i915_emit_breadcrumb(struct drm_device *dev)
435 {
436         struct drm_i915_private *dev_priv = dev->dev_private;
437         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
438
439         dev_priv->dri1.counter++;
440         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
441                 dev_priv->dri1.counter = 0;
442         if (master_priv->sarea_priv)
443                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
444
445         if (BEGIN_LP_RING(4) == 0) {
446                 OUT_RING(MI_STORE_DWORD_INDEX);
447                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
448                 OUT_RING(dev_priv->dri1.counter);
449                 OUT_RING(0);
450                 ADVANCE_LP_RING();
451         }
452 }
453
454 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
455                                    drm_i915_cmdbuffer_t *cmd,
456                                    struct drm_clip_rect *cliprects,
457                                    void *cmdbuf)
458 {
459         int nbox = cmd->num_cliprects;
460         int i = 0, count, ret;
461
462         if (cmd->sz & 0x3) {
463                 DRM_ERROR("alignment");
464                 return -EINVAL;
465         }
466
467         i915_kernel_lost_context(dev);
468
469         count = nbox ? nbox : 1;
470
471         for (i = 0; i < count; i++) {
472                 if (i < nbox) {
473                         ret = i915_emit_box(dev, &cliprects[i],
474                                             cmd->DR1, cmd->DR4);
475                         if (ret)
476                                 return ret;
477                 }
478
479                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
480                 if (ret)
481                         return ret;
482         }
483
484         i915_emit_breadcrumb(dev);
485         return 0;
486 }
487
488 static int i915_dispatch_batchbuffer(struct drm_device * dev,
489                                      drm_i915_batchbuffer_t * batch,
490                                      struct drm_clip_rect *cliprects)
491 {
492         struct drm_i915_private *dev_priv = dev->dev_private;
493         int nbox = batch->num_cliprects;
494         int i, count, ret;
495
496         if ((batch->start | batch->used) & 0x7) {
497                 DRM_ERROR("alignment");
498                 return -EINVAL;
499         }
500
501         i915_kernel_lost_context(dev);
502
503         count = nbox ? nbox : 1;
504         for (i = 0; i < count; i++) {
505                 if (i < nbox) {
506                         ret = i915_emit_box(dev, &cliprects[i],
507                                             batch->DR1, batch->DR4);
508                         if (ret)
509                                 return ret;
510                 }
511
512                 if (!IS_I830(dev) && !IS_845G(dev)) {
513                         ret = BEGIN_LP_RING(2);
514                         if (ret)
515                                 return ret;
516
517                         if (INTEL_INFO(dev)->gen >= 4) {
518                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
519                                 OUT_RING(batch->start);
520                         } else {
521                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
522                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
523                         }
524                 } else {
525                         ret = BEGIN_LP_RING(4);
526                         if (ret)
527                                 return ret;
528
529                         OUT_RING(MI_BATCH_BUFFER);
530                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
531                         OUT_RING(batch->start + batch->used - 4);
532                         OUT_RING(0);
533                 }
534                 ADVANCE_LP_RING();
535         }
536
537
538         if (IS_G4X(dev) || IS_GEN5(dev)) {
539                 if (BEGIN_LP_RING(2) == 0) {
540                         OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
541                         OUT_RING(MI_NOOP);
542                         ADVANCE_LP_RING();
543                 }
544         }
545
546         i915_emit_breadcrumb(dev);
547         return 0;
548 }
549
550 static int i915_dispatch_flip(struct drm_device * dev)
551 {
552         struct drm_i915_private *dev_priv = dev->dev_private;
553         struct drm_i915_master_private *master_priv =
554                 dev->primary->master->driver_priv;
555         int ret;
556
557         if (!master_priv->sarea_priv)
558                 return -EINVAL;
559
560         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
561                           __func__,
562                          dev_priv->dri1.current_page,
563                          master_priv->sarea_priv->pf_current_page);
564
565         i915_kernel_lost_context(dev);
566
567         ret = BEGIN_LP_RING(10);
568         if (ret)
569                 return ret;
570
571         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
572         OUT_RING(0);
573
574         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
575         OUT_RING(0);
576         if (dev_priv->dri1.current_page == 0) {
577                 OUT_RING(dev_priv->dri1.back_offset);
578                 dev_priv->dri1.current_page = 1;
579         } else {
580                 OUT_RING(dev_priv->dri1.front_offset);
581                 dev_priv->dri1.current_page = 0;
582         }
583         OUT_RING(0);
584
585         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
586         OUT_RING(0);
587
588         ADVANCE_LP_RING();
589
590         master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
591
592         if (BEGIN_LP_RING(4) == 0) {
593                 OUT_RING(MI_STORE_DWORD_INDEX);
594                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
595                 OUT_RING(dev_priv->dri1.counter);
596                 OUT_RING(0);
597                 ADVANCE_LP_RING();
598         }
599
600         master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
601         return 0;
602 }
603
604 static int i915_quiescent(struct drm_device *dev)
605 {
606         i915_kernel_lost_context(dev);
607         return intel_ring_idle(LP_RING(dev->dev_private));
608 }
609
610 static int i915_flush_ioctl(struct drm_device *dev, void *data,
611                             struct drm_file *file_priv)
612 {
613         int ret;
614
615         if (drm_core_check_feature(dev, DRIVER_MODESET))
616                 return -ENODEV;
617
618         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
619
620         mutex_lock(&dev->struct_mutex);
621         ret = i915_quiescent(dev);
622         mutex_unlock(&dev->struct_mutex);
623
624         return ret;
625 }
626
627 static int i915_batchbuffer(struct drm_device *dev, void *data,
628                             struct drm_file *file_priv)
629 {
630         struct drm_i915_private *dev_priv = dev->dev_private;
631         struct drm_i915_master_private *master_priv;
632         drm_i915_sarea_t *sarea_priv;
633         drm_i915_batchbuffer_t *batch = data;
634         int ret;
635         struct drm_clip_rect *cliprects = NULL;
636
637         if (drm_core_check_feature(dev, DRIVER_MODESET))
638                 return -ENODEV;
639
640         master_priv = dev->primary->master->driver_priv;
641         sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
642
643         if (!dev_priv->dri1.allow_batchbuffer) {
644                 DRM_ERROR("Batchbuffer ioctl disabled\n");
645                 return -EINVAL;
646         }
647
648         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
649                         batch->start, batch->used, batch->num_cliprects);
650
651         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
652
653         if (batch->num_cliprects < 0)
654                 return -EINVAL;
655
656         if (batch->num_cliprects) {
657                 cliprects = kcalloc(batch->num_cliprects,
658                                     sizeof(*cliprects),
659                                     GFP_KERNEL);
660                 if (cliprects == NULL)
661                         return -ENOMEM;
662
663                 ret = copy_from_user(cliprects, batch->cliprects,
664                                      batch->num_cliprects *
665                                      sizeof(struct drm_clip_rect));
666                 if (ret != 0) {
667                         ret = -EFAULT;
668                         goto fail_free;
669                 }
670         }
671
672         mutex_lock(&dev->struct_mutex);
673         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
674         mutex_unlock(&dev->struct_mutex);
675
676         if (sarea_priv)
677                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
678
679 fail_free:
680         kfree(cliprects);
681
682         return ret;
683 }
684
685 static int i915_cmdbuffer(struct drm_device *dev, void *data,
686                           struct drm_file *file_priv)
687 {
688         struct drm_i915_private *dev_priv = dev->dev_private;
689         struct drm_i915_master_private *master_priv;
690         drm_i915_sarea_t *sarea_priv;
691         drm_i915_cmdbuffer_t *cmdbuf = data;
692         struct drm_clip_rect *cliprects = NULL;
693         void *batch_data;
694         int ret;
695
696         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
697                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
698
699         if (drm_core_check_feature(dev, DRIVER_MODESET))
700                 return -ENODEV;
701
702         master_priv = dev->primary->master->driver_priv;
703         sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
704
705         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
706
707         if (cmdbuf->num_cliprects < 0)
708                 return -EINVAL;
709
710         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
711         if (batch_data == NULL)
712                 return -ENOMEM;
713
714         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
715         if (ret != 0) {
716                 ret = -EFAULT;
717                 goto fail_batch_free;
718         }
719
720         if (cmdbuf->num_cliprects) {
721                 cliprects = kcalloc(cmdbuf->num_cliprects,
722                                     sizeof(*cliprects), GFP_KERNEL);
723                 if (cliprects == NULL) {
724                         ret = -ENOMEM;
725                         goto fail_batch_free;
726                 }
727
728                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
729                                      cmdbuf->num_cliprects *
730                                      sizeof(struct drm_clip_rect));
731                 if (ret != 0) {
732                         ret = -EFAULT;
733                         goto fail_clip_free;
734                 }
735         }
736
737         mutex_lock(&dev->struct_mutex);
738         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
739         mutex_unlock(&dev->struct_mutex);
740         if (ret) {
741                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
742                 goto fail_clip_free;
743         }
744
745         if (sarea_priv)
746                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
747
748 fail_clip_free:
749         kfree(cliprects);
750 fail_batch_free:
751         kfree(batch_data);
752
753         return ret;
754 }
755
756 static int i915_emit_irq(struct drm_device * dev)
757 {
758         struct drm_i915_private *dev_priv = dev->dev_private;
759         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
760
761         i915_kernel_lost_context(dev);
762
763         DRM_DEBUG_DRIVER("\n");
764
765         dev_priv->dri1.counter++;
766         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
767                 dev_priv->dri1.counter = 1;
768         if (master_priv->sarea_priv)
769                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
770
771         if (BEGIN_LP_RING(4) == 0) {
772                 OUT_RING(MI_STORE_DWORD_INDEX);
773                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
774                 OUT_RING(dev_priv->dri1.counter);
775                 OUT_RING(MI_USER_INTERRUPT);
776                 ADVANCE_LP_RING();
777         }
778
779         return dev_priv->dri1.counter;
780 }
781
782 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
783 {
784         struct drm_i915_private *dev_priv = dev->dev_private;
785         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
786         int ret = 0;
787         struct intel_engine_cs *ring = LP_RING(dev_priv);
788
789         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
790                   READ_BREADCRUMB(dev_priv));
791
792         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
793                 if (master_priv->sarea_priv)
794                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
795                 return 0;
796         }
797
798         if (master_priv->sarea_priv)
799                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
800
801         if (ring->irq_get(ring)) {
802                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
803                             READ_BREADCRUMB(dev_priv) >= irq_nr);
804                 ring->irq_put(ring);
805         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
806                 ret = -EBUSY;
807
808         if (ret == -EBUSY) {
809                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
810                           READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
811         }
812
813         return ret;
814 }
815
816 /* Needs the lock as it touches the ring.
817  */
818 static int i915_irq_emit(struct drm_device *dev, void *data,
819                          struct drm_file *file_priv)
820 {
821         struct drm_i915_private *dev_priv = dev->dev_private;
822         drm_i915_irq_emit_t *emit = data;
823         int result;
824
825         if (drm_core_check_feature(dev, DRIVER_MODESET))
826                 return -ENODEV;
827
828         if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
829                 DRM_ERROR("called with no initialization\n");
830                 return -EINVAL;
831         }
832
833         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
834
835         mutex_lock(&dev->struct_mutex);
836         result = i915_emit_irq(dev);
837         mutex_unlock(&dev->struct_mutex);
838
839         if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
840                 DRM_ERROR("copy_to_user\n");
841                 return -EFAULT;
842         }
843
844         return 0;
845 }
846
847 /* Doesn't need the hardware lock.
848  */
849 static int i915_irq_wait(struct drm_device *dev, void *data,
850                          struct drm_file *file_priv)
851 {
852         struct drm_i915_private *dev_priv = dev->dev_private;
853         drm_i915_irq_wait_t *irqwait = data;
854
855         if (drm_core_check_feature(dev, DRIVER_MODESET))
856                 return -ENODEV;
857
858         if (!dev_priv) {
859                 DRM_ERROR("called with no initialization\n");
860                 return -EINVAL;
861         }
862
863         return i915_wait_irq(dev, irqwait->irq_seq);
864 }
865
866 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
867                          struct drm_file *file_priv)
868 {
869         struct drm_i915_private *dev_priv = dev->dev_private;
870         drm_i915_vblank_pipe_t *pipe = data;
871
872         if (drm_core_check_feature(dev, DRIVER_MODESET))
873                 return -ENODEV;
874
875         if (!dev_priv) {
876                 DRM_ERROR("called with no initialization\n");
877                 return -EINVAL;
878         }
879
880         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
881
882         return 0;
883 }
884
885 /**
886  * Schedule buffer swap at given vertical blank.
887  */
888 static int i915_vblank_swap(struct drm_device *dev, void *data,
889                      struct drm_file *file_priv)
890 {
891         /* The delayed swap mechanism was fundamentally racy, and has been
892          * removed.  The model was that the client requested a delayed flip/swap
893          * from the kernel, then waited for vblank before continuing to perform
894          * rendering.  The problem was that the kernel might wake the client
895          * up before it dispatched the vblank swap (since the lock has to be
896          * held while touching the ringbuffer), in which case the client would
897          * clear and start the next frame before the swap occurred, and
898          * flicker would occur in addition to likely missing the vblank.
899          *
900          * In the absence of this ioctl, userland falls back to a correct path
901          * of waiting for a vblank, then dispatching the swap on its own.
902          * Context switching to userland and back is plenty fast enough for
903          * meeting the requirements of vblank swapping.
904          */
905         return -EINVAL;
906 }
907
908 static int i915_flip_bufs(struct drm_device *dev, void *data,
909                           struct drm_file *file_priv)
910 {
911         int ret;
912
913         if (drm_core_check_feature(dev, DRIVER_MODESET))
914                 return -ENODEV;
915
916         DRM_DEBUG_DRIVER("%s\n", __func__);
917
918         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
919
920         mutex_lock(&dev->struct_mutex);
921         ret = i915_dispatch_flip(dev);
922         mutex_unlock(&dev->struct_mutex);
923
924         return ret;
925 }
926
927 static int i915_getparam(struct drm_device *dev, void *data,
928                          struct drm_file *file_priv)
929 {
930         struct drm_i915_private *dev_priv = dev->dev_private;
931         drm_i915_getparam_t *param = data;
932         int value;
933
934         if (!dev_priv) {
935                 DRM_ERROR("called with no initialization\n");
936                 return -EINVAL;
937         }
938
939         switch (param->param) {
940         case I915_PARAM_IRQ_ACTIVE:
941                 value = dev->pdev->irq ? 1 : 0;
942                 break;
943         case I915_PARAM_ALLOW_BATCHBUFFER:
944                 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
945                 break;
946         case I915_PARAM_LAST_DISPATCH:
947                 value = READ_BREADCRUMB(dev_priv);
948                 break;
949         case I915_PARAM_CHIPSET_ID:
950                 value = dev->pdev->device;
951                 break;
952         case I915_PARAM_HAS_GEM:
953                 value = 1;
954                 break;
955         case I915_PARAM_NUM_FENCES_AVAIL:
956                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
957                 break;
958         case I915_PARAM_HAS_OVERLAY:
959                 value = dev_priv->overlay ? 1 : 0;
960                 break;
961         case I915_PARAM_HAS_PAGEFLIPPING:
962                 value = 1;
963                 break;
964         case I915_PARAM_HAS_EXECBUF2:
965                 /* depends on GEM */
966                 value = 1;
967                 break;
968         case I915_PARAM_HAS_BSD:
969                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
970                 break;
971         case I915_PARAM_HAS_BLT:
972                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
973                 break;
974         case I915_PARAM_HAS_VEBOX:
975                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
976                 break;
977         case I915_PARAM_HAS_RELAXED_FENCING:
978                 value = 1;
979                 break;
980         case I915_PARAM_HAS_COHERENT_RINGS:
981                 value = 1;
982                 break;
983         case I915_PARAM_HAS_EXEC_CONSTANTS:
984                 value = INTEL_INFO(dev)->gen >= 4;
985                 break;
986         case I915_PARAM_HAS_RELAXED_DELTA:
987                 value = 1;
988                 break;
989         case I915_PARAM_HAS_GEN7_SOL_RESET:
990                 value = 1;
991                 break;
992         case I915_PARAM_HAS_LLC:
993                 value = HAS_LLC(dev);
994                 break;
995         case I915_PARAM_HAS_WT:
996                 value = HAS_WT(dev);
997                 break;
998         case I915_PARAM_HAS_ALIASING_PPGTT:
999                 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
1000                 break;
1001         case I915_PARAM_HAS_WAIT_TIMEOUT:
1002                 value = 1;
1003                 break;
1004         case I915_PARAM_HAS_SEMAPHORES:
1005                 value = i915_semaphore_is_enabled(dev);
1006                 break;
1007         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1008                 value = 1;
1009                 break;
1010         case I915_PARAM_HAS_SECURE_BATCHES:
1011                 value = capable(CAP_SYS_ADMIN);
1012                 break;
1013         case I915_PARAM_HAS_PINNED_BATCHES:
1014                 value = 1;
1015                 break;
1016         case I915_PARAM_HAS_EXEC_NO_RELOC:
1017                 value = 1;
1018                 break;
1019         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1020                 value = 1;
1021                 break;
1022         case I915_PARAM_CMD_PARSER_VERSION:
1023                 value = i915_cmd_parser_get_version();
1024                 break;
1025         default:
1026                 DRM_DEBUG("Unknown parameter %d\n", param->param);
1027                 return -EINVAL;
1028         }
1029
1030         if (copy_to_user(param->value, &value, sizeof(int))) {
1031                 DRM_ERROR("copy_to_user failed\n");
1032                 return -EFAULT;
1033         }
1034
1035         return 0;
1036 }
1037
1038 static int i915_setparam(struct drm_device *dev, void *data,
1039                          struct drm_file *file_priv)
1040 {
1041         struct drm_i915_private *dev_priv = dev->dev_private;
1042         drm_i915_setparam_t *param = data;
1043
1044         if (!dev_priv) {
1045                 DRM_ERROR("called with no initialization\n");
1046                 return -EINVAL;
1047         }
1048
1049         switch (param->param) {
1050         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1051                 break;
1052         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1053                 break;
1054         case I915_SETPARAM_ALLOW_BATCHBUFFER:
1055                 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1056                 break;
1057         case I915_SETPARAM_NUM_USED_FENCES:
1058                 if (param->value > dev_priv->num_fence_regs ||
1059                     param->value < 0)
1060                         return -EINVAL;
1061                 /* Userspace can use first N regs */
1062                 dev_priv->fence_reg_start = param->value;
1063                 break;
1064         default:
1065                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1066                                         param->param);
1067                 return -EINVAL;
1068         }
1069
1070         return 0;
1071 }
1072
1073 static int i915_set_status_page(struct drm_device *dev, void *data,
1074                                 struct drm_file *file_priv)
1075 {
1076         struct drm_i915_private *dev_priv = dev->dev_private;
1077         drm_i915_hws_addr_t *hws = data;
1078         struct intel_engine_cs *ring;
1079
1080         if (drm_core_check_feature(dev, DRIVER_MODESET))
1081                 return -ENODEV;
1082
1083         if (!I915_NEED_GFX_HWS(dev))
1084                 return -EINVAL;
1085
1086         if (!dev_priv) {
1087                 DRM_ERROR("called with no initialization\n");
1088                 return -EINVAL;
1089         }
1090
1091         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1092                 WARN(1, "tried to set status page when mode setting active\n");
1093                 return 0;
1094         }
1095
1096         DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1097
1098         ring = LP_RING(dev_priv);
1099         ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1100
1101         dev_priv->dri1.gfx_hws_cpu_addr =
1102                 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1103         if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1104                 i915_dma_cleanup(dev);
1105                 ring->status_page.gfx_addr = 0;
1106                 DRM_ERROR("can not ioremap virtual address for"
1107                                 " G33 hw status page\n");
1108                 return -ENOMEM;
1109         }
1110
1111         memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1112         I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1113
1114         DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1115                          ring->status_page.gfx_addr);
1116         DRM_DEBUG_DRIVER("load hws at %p\n",
1117                          ring->status_page.page_addr);
1118         return 0;
1119 }
1120
1121 static int i915_get_bridge_dev(struct drm_device *dev)
1122 {
1123         struct drm_i915_private *dev_priv = dev->dev_private;
1124
1125         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1126         if (!dev_priv->bridge_dev) {
1127                 DRM_ERROR("bridge device not found\n");
1128                 return -1;
1129         }
1130         return 0;
1131 }
1132
1133 #define MCHBAR_I915 0x44
1134 #define MCHBAR_I965 0x48
1135 #define MCHBAR_SIZE (4*4096)
1136
1137 #define DEVEN_REG 0x54
1138 #define   DEVEN_MCHBAR_EN (1 << 28)
1139
1140 /* Allocate space for the MCH regs if needed, return nonzero on error */
1141 static int
1142 intel_alloc_mchbar_resource(struct drm_device *dev)
1143 {
1144         struct drm_i915_private *dev_priv = dev->dev_private;
1145         int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1146         u32 temp_lo, temp_hi = 0;
1147         u64 mchbar_addr;
1148         int ret;
1149
1150         if (INTEL_INFO(dev)->gen >= 4)
1151                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1152         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1153         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1154
1155         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1156 #ifdef CONFIG_PNP
1157         if (mchbar_addr &&
1158             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1159                 return 0;
1160 #endif
1161
1162         /* Get some space for it */
1163         dev_priv->mch_res.name = "i915 MCHBAR";
1164         dev_priv->mch_res.flags = IORESOURCE_MEM;
1165         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
1166                                      &dev_priv->mch_res,
1167                                      MCHBAR_SIZE, MCHBAR_SIZE,
1168                                      PCIBIOS_MIN_MEM,
1169                                      0, pcibios_align_resource,
1170                                      dev_priv->bridge_dev);
1171         if (ret) {
1172                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
1173                 dev_priv->mch_res.start = 0;
1174                 return ret;
1175         }
1176
1177         if (INTEL_INFO(dev)->gen >= 4)
1178                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1179                                        upper_32_bits(dev_priv->mch_res.start));
1180
1181         pci_write_config_dword(dev_priv->bridge_dev, reg,
1182                                lower_32_bits(dev_priv->mch_res.start));
1183         return 0;
1184 }
1185
1186 /* Setup MCHBAR if possible, return true if we should disable it again */
1187 static void
1188 intel_setup_mchbar(struct drm_device *dev)
1189 {
1190         struct drm_i915_private *dev_priv = dev->dev_private;
1191         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1192         u32 temp;
1193         bool enabled;
1194
1195         if (IS_VALLEYVIEW(dev))
1196                 return;
1197
1198         dev_priv->mchbar_need_disable = false;
1199
1200         if (IS_I915G(dev) || IS_I915GM(dev)) {
1201                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1202                 enabled = !!(temp & DEVEN_MCHBAR_EN);
1203         } else {
1204                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1205                 enabled = temp & 1;
1206         }
1207
1208         /* If it's already enabled, don't have to do anything */
1209         if (enabled)
1210                 return;
1211
1212         if (intel_alloc_mchbar_resource(dev))
1213                 return;
1214
1215         dev_priv->mchbar_need_disable = true;
1216
1217         /* Space is allocated or reserved, so enable it. */
1218         if (IS_I915G(dev) || IS_I915GM(dev)) {
1219                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1220                                        temp | DEVEN_MCHBAR_EN);
1221         } else {
1222                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1223                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1224         }
1225 }
1226
1227 static void
1228 intel_teardown_mchbar(struct drm_device *dev)
1229 {
1230         struct drm_i915_private *dev_priv = dev->dev_private;
1231         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1232         u32 temp;
1233
1234         if (dev_priv->mchbar_need_disable) {
1235                 if (IS_I915G(dev) || IS_I915GM(dev)) {
1236                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1237                         temp &= ~DEVEN_MCHBAR_EN;
1238                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1239                 } else {
1240                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1241                         temp &= ~1;
1242                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1243                 }
1244         }
1245
1246         if (dev_priv->mch_res.start)
1247                 release_resource(&dev_priv->mch_res);
1248 }
1249
1250 /* true = enable decode, false = disable decoder */
1251 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1252 {
1253         struct drm_device *dev = cookie;
1254
1255         intel_modeset_vga_set_state(dev, state);
1256         if (state)
1257                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1258                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1259         else
1260                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1261 }
1262
1263 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1264 {
1265         struct drm_device *dev = pci_get_drvdata(pdev);
1266         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1267         if (state == VGA_SWITCHEROO_ON) {
1268                 pr_info("switched on\n");
1269                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1270                 /* i915 resume handler doesn't set to D0 */
1271                 pci_set_power_state(dev->pdev, PCI_D0);
1272                 i915_resume(dev);
1273                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1274         } else {
1275                 pr_err("switched off\n");
1276                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1277                 i915_suspend(dev, pmm);
1278                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1279         }
1280 }
1281
1282 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1283 {
1284         struct drm_device *dev = pci_get_drvdata(pdev);
1285
1286         /*
1287          * FIXME: open_count is protected by drm_global_mutex but that would lead to
1288          * locking inversion with the driver load path. And the access here is
1289          * completely racy anyway. So don't bother with locking for now.
1290          */
1291         return dev->open_count == 0;
1292 }
1293
1294 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1295         .set_gpu_state = i915_switcheroo_set_state,
1296         .reprobe = NULL,
1297         .can_switch = i915_switcheroo_can_switch,
1298 };
1299
1300 static int i915_load_modeset_init(struct drm_device *dev)
1301 {
1302         struct drm_i915_private *dev_priv = dev->dev_private;
1303         int ret;
1304
1305         ret = intel_parse_bios(dev);
1306         if (ret)
1307                 DRM_INFO("failed to find VBIOS tables\n");
1308
1309         /* If we have > 1 VGA cards, then we need to arbitrate access
1310          * to the common VGA resources.
1311          *
1312          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1313          * then we do not take part in VGA arbitration and the
1314          * vga_client_register() fails with -ENODEV.
1315          */
1316         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1317         if (ret && ret != -ENODEV)
1318                 goto out;
1319
1320         intel_register_dsm_handler();
1321
1322         ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1323         if (ret)
1324                 goto cleanup_vga_client;
1325
1326         /* Initialise stolen first so that we may reserve preallocated
1327          * objects for the BIOS to KMS transition.
1328          */
1329         ret = i915_gem_init_stolen(dev);
1330         if (ret)
1331                 goto cleanup_vga_switcheroo;
1332
1333         intel_power_domains_init_hw(dev_priv);
1334
1335         ret = drm_irq_install(dev, dev->pdev->irq);
1336         if (ret)
1337                 goto cleanup_gem_stolen;
1338
1339         /* Important: The output setup functions called by modeset_init need
1340          * working irqs for e.g. gmbus and dp aux transfers. */
1341         intel_modeset_init(dev);
1342
1343         ret = i915_gem_init(dev);
1344         if (ret)
1345                 goto cleanup_irq;
1346
1347         INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1348
1349         intel_modeset_gem_init(dev);
1350
1351         /* Always safe in the mode setting case. */
1352         /* FIXME: do pre/post-mode set stuff in core KMS code */
1353         dev->vblank_disable_allowed = true;
1354         if (INTEL_INFO(dev)->num_pipes == 0)
1355                 return 0;
1356
1357         ret = intel_fbdev_init(dev);
1358         if (ret)
1359                 goto cleanup_gem;
1360
1361         /* Only enable hotplug handling once the fbdev is fully set up. */
1362         intel_hpd_init(dev);
1363
1364         /*
1365          * Some ports require correctly set-up hpd registers for detection to
1366          * work properly (leading to ghost connected connector status), e.g. VGA
1367          * on gm45.  Hence we can only set up the initial fbdev config after hpd
1368          * irqs are fully enabled. Now we should scan for the initial config
1369          * only once hotplug handling is enabled, but due to screwed-up locking
1370          * around kms/fbdev init we can't protect the fdbev initial config
1371          * scanning against hotplug events. Hence do this first and ignore the
1372          * tiny window where we will loose hotplug notifactions.
1373          */
1374         intel_fbdev_initial_config(dev);
1375
1376         /* Only enable hotplug handling once the fbdev is fully set up. */
1377         dev_priv->enable_hotplug_processing = true;
1378
1379         drm_kms_helper_poll_init(dev);
1380
1381         return 0;
1382
1383 cleanup_gem:
1384         mutex_lock(&dev->struct_mutex);
1385         i915_gem_cleanup_ringbuffer(dev);
1386         i915_gem_context_fini(dev);
1387         mutex_unlock(&dev->struct_mutex);
1388         WARN_ON(dev_priv->mm.aliasing_ppgtt);
1389         drm_mm_takedown(&dev_priv->gtt.base.mm);
1390 cleanup_irq:
1391         drm_irq_uninstall(dev);
1392 cleanup_gem_stolen:
1393         i915_gem_cleanup_stolen(dev);
1394 cleanup_vga_switcheroo:
1395         vga_switcheroo_unregister_client(dev->pdev);
1396 cleanup_vga_client:
1397         vga_client_register(dev->pdev, NULL, NULL, NULL);
1398 out:
1399         return ret;
1400 }
1401
1402 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1403 {
1404         struct drm_i915_master_private *master_priv;
1405
1406         master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1407         if (!master_priv)
1408                 return -ENOMEM;
1409
1410         master->driver_priv = master_priv;
1411         return 0;
1412 }
1413
1414 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1415 {
1416         struct drm_i915_master_private *master_priv = master->driver_priv;
1417
1418         if (!master_priv)
1419                 return;
1420
1421         kfree(master_priv);
1422
1423         master->driver_priv = NULL;
1424 }
1425
1426 #if IS_ENABLED(CONFIG_FB)
1427 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1428 {
1429         struct apertures_struct *ap;
1430         struct pci_dev *pdev = dev_priv->dev->pdev;
1431         bool primary;
1432
1433         ap = alloc_apertures(1);
1434         if (!ap)
1435                 return;
1436
1437         ap->ranges[0].base = dev_priv->gtt.mappable_base;
1438         ap->ranges[0].size = dev_priv->gtt.mappable_end;
1439
1440         primary =
1441                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1442
1443         remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1444
1445         kfree(ap);
1446 }
1447 #else
1448 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1449 {
1450 }
1451 #endif
1452
1453 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1454 {
1455         const struct intel_device_info *info = &dev_priv->info;
1456
1457 #define PRINT_S(name) "%s"
1458 #define SEP_EMPTY
1459 #define PRINT_FLAG(name) info->name ? #name "," : ""
1460 #define SEP_COMMA ,
1461         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1462                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
1463                          info->gen,
1464                          dev_priv->dev->pdev->device,
1465                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
1466 #undef PRINT_S
1467 #undef SEP_EMPTY
1468 #undef PRINT_FLAG
1469 #undef SEP_COMMA
1470 }
1471
1472 /*
1473  * Determine various intel_device_info fields at runtime.
1474  *
1475  * Use it when either:
1476  *   - it's judged too laborious to fill n static structures with the limit
1477  *     when a simple if statement does the job,
1478  *   - run-time checks (eg read fuse/strap registers) are needed.
1479  *
1480  * This function needs to be called:
1481  *   - after the MMIO has been setup as we are reading registers,
1482  *   - after the PCH has been detected,
1483  *   - before the first usage of the fields it can tweak.
1484  */
1485 static void intel_device_info_runtime_init(struct drm_device *dev)
1486 {
1487         struct drm_i915_private *dev_priv = dev->dev_private;
1488         struct intel_device_info *info;
1489         enum pipe pipe;
1490
1491         info = (struct intel_device_info *)&dev_priv->info;
1492
1493         if (IS_VALLEYVIEW(dev))
1494                 for_each_pipe(pipe)
1495                         info->num_sprites[pipe] = 2;
1496         else
1497                 for_each_pipe(pipe)
1498                         info->num_sprites[pipe] = 1;
1499
1500         if (i915.disable_display) {
1501                 DRM_INFO("Display disabled (module parameter)\n");
1502                 info->num_pipes = 0;
1503         } else if (info->num_pipes > 0 &&
1504                    (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
1505                    !IS_VALLEYVIEW(dev)) {
1506                 u32 fuse_strap = I915_READ(FUSE_STRAP);
1507                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
1508
1509                 /*
1510                  * SFUSE_STRAP is supposed to have a bit signalling the display
1511                  * is fused off. Unfortunately it seems that, at least in
1512                  * certain cases, fused off display means that PCH display
1513                  * reads don't land anywhere. In that case, we read 0s.
1514                  *
1515                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
1516                  * should be set when taking over after the firmware.
1517                  */
1518                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
1519                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
1520                     (dev_priv->pch_type == PCH_CPT &&
1521                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
1522                         DRM_INFO("Display fused off, disabling\n");
1523                         info->num_pipes = 0;
1524                 }
1525         }
1526 }
1527
1528 /**
1529  * i915_driver_load - setup chip and create an initial config
1530  * @dev: DRM device
1531  * @flags: startup flags
1532  *
1533  * The driver load routine has to do several things:
1534  *   - drive output discovery via intel_modeset_init()
1535  *   - initialize the memory manager
1536  *   - allocate initial config memory
1537  *   - setup the DRM framebuffer with the allocated memory
1538  */
1539 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1540 {
1541         struct drm_i915_private *dev_priv;
1542         struct intel_device_info *info, *device_info;
1543         int ret = 0, mmio_bar, mmio_size;
1544         uint32_t aperture_size;
1545
1546         info = (struct intel_device_info *) flags;
1547
1548         /* Refuse to load on gen6+ without kms enabled. */
1549         if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
1550                 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1551                 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1552                 return -ENODEV;
1553         }
1554
1555         /* UMS needs agp support. */
1556         if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
1557                 return -EINVAL;
1558
1559         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1560         if (dev_priv == NULL)
1561                 return -ENOMEM;
1562
1563         dev->dev_private = (void *)dev_priv;
1564         dev_priv->dev = dev;
1565
1566         /* copy initial configuration to dev_priv->info */
1567         device_info = (struct intel_device_info *)&dev_priv->info;
1568         *device_info = *info;
1569
1570         spin_lock_init(&dev_priv->irq_lock);
1571         spin_lock_init(&dev_priv->gpu_error.lock);
1572         spin_lock_init(&dev_priv->backlight_lock);
1573         spin_lock_init(&dev_priv->uncore.lock);
1574         spin_lock_init(&dev_priv->mm.object_stat_lock);
1575         mutex_init(&dev_priv->dpio_lock);
1576         mutex_init(&dev_priv->modeset_restore_lock);
1577
1578         intel_pm_setup(dev);
1579
1580         intel_display_crc_init(dev);
1581
1582         i915_dump_device_info(dev_priv);
1583
1584         /* Not all pre-production machines fall into this category, only the
1585          * very first ones. Almost everything should work, except for maybe
1586          * suspend/resume. And we don't implement workarounds that affect only
1587          * pre-production machines. */
1588         if (IS_HSW_EARLY_SDV(dev))
1589                 DRM_INFO("This is an early pre-production Haswell machine. "
1590                          "It may not be fully functional.\n");
1591
1592         if (i915_get_bridge_dev(dev)) {
1593                 ret = -EIO;
1594                 goto free_priv;
1595         }
1596
1597         mmio_bar = IS_GEN2(dev) ? 1 : 0;
1598         /* Before gen4, the registers and the GTT are behind different BARs.
1599          * However, from gen4 onwards, the registers and the GTT are shared
1600          * in the same BAR, so we want to restrict this ioremap from
1601          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1602          * the register BAR remains the same size for all the earlier
1603          * generations up to Ironlake.
1604          */
1605         if (info->gen < 5)
1606                 mmio_size = 512*1024;
1607         else
1608                 mmio_size = 2*1024*1024;
1609
1610         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1611         if (!dev_priv->regs) {
1612                 DRM_ERROR("failed to map registers\n");
1613                 ret = -EIO;
1614                 goto put_bridge;
1615         }
1616
1617         /* This must be called before any calls to HAS_PCH_* */
1618         intel_detect_pch(dev);
1619
1620         intel_uncore_init(dev);
1621
1622         ret = i915_gem_gtt_init(dev);
1623         if (ret)
1624                 goto out_regs;
1625
1626         if (drm_core_check_feature(dev, DRIVER_MODESET))
1627                 i915_kick_out_firmware_fb(dev_priv);
1628
1629         pci_set_master(dev->pdev);
1630
1631         /* overlay on gen2 is broken and can't address above 1G */
1632         if (IS_GEN2(dev))
1633                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1634
1635         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1636          * using 32bit addressing, overwriting memory if HWS is located
1637          * above 4GB.
1638          *
1639          * The documentation also mentions an issue with undefined
1640          * behaviour if any general state is accessed within a page above 4GB,
1641          * which also needs to be handled carefully.
1642          */
1643         if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1644                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1645
1646         aperture_size = dev_priv->gtt.mappable_end;
1647
1648         dev_priv->gtt.mappable =
1649                 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1650                                      aperture_size);
1651         if (dev_priv->gtt.mappable == NULL) {
1652                 ret = -EIO;
1653                 goto out_gtt;
1654         }
1655
1656         dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1657                                               aperture_size);
1658
1659         /* The i915 workqueue is primarily used for batched retirement of
1660          * requests (and thus managing bo) once the task has been completed
1661          * by the GPU. i915_gem_retire_requests() is called directly when we
1662          * need high-priority retirement, such as waiting for an explicit
1663          * bo.
1664          *
1665          * It is also used for periodic low-priority events, such as
1666          * idle-timers and recording error state.
1667          *
1668          * All tasks on the workqueue are expected to acquire the dev mutex
1669          * so there is no point in running more than one instance of the
1670          * workqueue at any time.  Use an ordered one.
1671          */
1672         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1673         if (dev_priv->wq == NULL) {
1674                 DRM_ERROR("Failed to create our workqueue.\n");
1675                 ret = -ENOMEM;
1676                 goto out_mtrrfree;
1677         }
1678
1679         intel_irq_init(dev);
1680         intel_uncore_sanitize(dev);
1681
1682         /* Try to make sure MCHBAR is enabled before poking at it */
1683         intel_setup_mchbar(dev);
1684         intel_setup_gmbus(dev);
1685         intel_opregion_setup(dev);
1686
1687         intel_setup_bios(dev);
1688
1689         i915_gem_load(dev);
1690
1691         /* On the 945G/GM, the chipset reports the MSI capability on the
1692          * integrated graphics even though the support isn't actually there
1693          * according to the published specs.  It doesn't appear to function
1694          * correctly in testing on 945G.
1695          * This may be a side effect of MSI having been made available for PEG
1696          * and the registers being closely associated.
1697          *
1698          * According to chipset errata, on the 965GM, MSI interrupts may
1699          * be lost or delayed, but we use them anyways to avoid
1700          * stuck interrupts on some machines.
1701          */
1702         if (!IS_I945G(dev) && !IS_I945GM(dev))
1703                 pci_enable_msi(dev->pdev);
1704
1705         intel_device_info_runtime_init(dev);
1706
1707         if (INTEL_INFO(dev)->num_pipes) {
1708                 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1709                 if (ret)
1710                         goto out_gem_unload;
1711         }
1712
1713         intel_power_domains_init(dev_priv);
1714
1715         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1716                 ret = i915_load_modeset_init(dev);
1717                 if (ret < 0) {
1718                         DRM_ERROR("failed to init modeset\n");
1719                         goto out_power_well;
1720                 }
1721         } else {
1722                 /* Start out suspended in ums mode. */
1723                 dev_priv->ums.mm_suspended = 1;
1724         }
1725
1726         i915_setup_sysfs(dev);
1727
1728         if (INTEL_INFO(dev)->num_pipes) {
1729                 /* Must be done after probing outputs */
1730                 intel_opregion_init(dev);
1731                 acpi_video_register();
1732         }
1733
1734         if (IS_GEN5(dev))
1735                 intel_gpu_ips_init(dev_priv);
1736
1737         intel_init_runtime_pm(dev_priv);
1738
1739         return 0;
1740
1741 out_power_well:
1742         intel_power_domains_remove(dev_priv);
1743         drm_vblank_cleanup(dev);
1744 out_gem_unload:
1745         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1746         unregister_shrinker(&dev_priv->mm.shrinker);
1747
1748         if (dev->pdev->msi_enabled)
1749                 pci_disable_msi(dev->pdev);
1750
1751         intel_teardown_gmbus(dev);
1752         intel_teardown_mchbar(dev);
1753         pm_qos_remove_request(&dev_priv->pm_qos);
1754         destroy_workqueue(dev_priv->wq);
1755 out_mtrrfree:
1756         arch_phys_wc_del(dev_priv->gtt.mtrr);
1757         io_mapping_free(dev_priv->gtt.mappable);
1758 out_gtt:
1759         list_del(&dev_priv->gtt.base.global_link);
1760         drm_mm_takedown(&dev_priv->gtt.base.mm);
1761         dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1762 out_regs:
1763         intel_uncore_fini(dev);
1764         pci_iounmap(dev->pdev, dev_priv->regs);
1765 put_bridge:
1766         pci_dev_put(dev_priv->bridge_dev);
1767 free_priv:
1768         if (dev_priv->slab)
1769                 kmem_cache_destroy(dev_priv->slab);
1770         kfree(dev_priv);
1771         return ret;
1772 }
1773
1774 int i915_driver_unload(struct drm_device *dev)
1775 {
1776         struct drm_i915_private *dev_priv = dev->dev_private;
1777         int ret;
1778
1779         ret = i915_gem_suspend(dev);
1780         if (ret) {
1781                 DRM_ERROR("failed to idle hardware: %d\n", ret);
1782                 return ret;
1783         }
1784
1785         intel_fini_runtime_pm(dev_priv);
1786
1787         intel_gpu_ips_teardown();
1788
1789         /* The i915.ko module is still not prepared to be loaded when
1790          * the power well is not enabled, so just enable it in case
1791          * we're going to unload/reload. */
1792         intel_display_set_init_power(dev_priv, true);
1793         intel_power_domains_remove(dev_priv);
1794
1795         i915_teardown_sysfs(dev);
1796
1797         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1798         unregister_shrinker(&dev_priv->mm.shrinker);
1799
1800         io_mapping_free(dev_priv->gtt.mappable);
1801         arch_phys_wc_del(dev_priv->gtt.mtrr);
1802
1803         acpi_video_unregister();
1804
1805         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1806                 intel_fbdev_fini(dev);
1807                 intel_modeset_cleanup(dev);
1808                 cancel_work_sync(&dev_priv->console_resume_work);
1809
1810                 /*
1811                  * free the memory space allocated for the child device
1812                  * config parsed from VBT
1813                  */
1814                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1815                         kfree(dev_priv->vbt.child_dev);
1816                         dev_priv->vbt.child_dev = NULL;
1817                         dev_priv->vbt.child_dev_num = 0;
1818                 }
1819
1820                 vga_switcheroo_unregister_client(dev->pdev);
1821                 vga_client_register(dev->pdev, NULL, NULL, NULL);
1822         }
1823
1824         /* Free error state after interrupts are fully disabled. */
1825         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1826         cancel_work_sync(&dev_priv->gpu_error.work);
1827         i915_destroy_error_state(dev);
1828
1829         if (dev->pdev->msi_enabled)
1830                 pci_disable_msi(dev->pdev);
1831
1832         intel_opregion_fini(dev);
1833
1834         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1835                 /* Flush any outstanding unpin_work. */
1836                 flush_workqueue(dev_priv->wq);
1837
1838                 mutex_lock(&dev->struct_mutex);
1839                 i915_gem_cleanup_ringbuffer(dev);
1840                 i915_gem_context_fini(dev);
1841                 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1842                 mutex_unlock(&dev->struct_mutex);
1843                 i915_gem_cleanup_stolen(dev);
1844
1845                 if (!I915_NEED_GFX_HWS(dev))
1846                         i915_free_hws(dev);
1847         }
1848
1849         list_del(&dev_priv->gtt.base.global_link);
1850         WARN_ON(!list_empty(&dev_priv->vm_list));
1851
1852         drm_vblank_cleanup(dev);
1853
1854         intel_teardown_gmbus(dev);
1855         intel_teardown_mchbar(dev);
1856
1857         destroy_workqueue(dev_priv->wq);
1858         pm_qos_remove_request(&dev_priv->pm_qos);
1859
1860         dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1861
1862         intel_uncore_fini(dev);
1863         if (dev_priv->regs != NULL)
1864                 pci_iounmap(dev->pdev, dev_priv->regs);
1865
1866         if (dev_priv->slab)
1867                 kmem_cache_destroy(dev_priv->slab);
1868
1869         pci_dev_put(dev_priv->bridge_dev);
1870         kfree(dev_priv);
1871
1872         return 0;
1873 }
1874
1875 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1876 {
1877         int ret;
1878
1879         ret = i915_gem_open(dev, file);
1880         if (ret)
1881                 return ret;
1882
1883         return 0;
1884 }
1885
1886 /**
1887  * i915_driver_lastclose - clean up after all DRM clients have exited
1888  * @dev: DRM device
1889  *
1890  * Take care of cleaning up after all DRM clients have exited.  In the
1891  * mode setting case, we want to restore the kernel's initial mode (just
1892  * in case the last client left us in a bad state).
1893  *
1894  * Additionally, in the non-mode setting case, we'll tear down the GTT
1895  * and DMA structures, since the kernel won't be using them, and clea
1896  * up any GEM state.
1897  */
1898 void i915_driver_lastclose(struct drm_device * dev)
1899 {
1900         struct drm_i915_private *dev_priv = dev->dev_private;
1901
1902         /* On gen6+ we refuse to init without kms enabled, but then the drm core
1903          * goes right around and calls lastclose. Check for this and don't clean
1904          * up anything. */
1905         if (!dev_priv)
1906                 return;
1907
1908         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1909                 intel_fbdev_restore_mode(dev);
1910                 vga_switcheroo_process_delayed_switch();
1911                 return;
1912         }
1913
1914         i915_gem_lastclose(dev);
1915
1916         i915_dma_cleanup(dev);
1917 }
1918
1919 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1920 {
1921         mutex_lock(&dev->struct_mutex);
1922         i915_gem_context_close(dev, file_priv);
1923         i915_gem_release(dev, file_priv);
1924         mutex_unlock(&dev->struct_mutex);
1925 }
1926
1927 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1928 {
1929         struct drm_i915_file_private *file_priv = file->driver_priv;
1930
1931         if (file_priv && file_priv->bsd_ring)
1932                 file_priv->bsd_ring = NULL;
1933         kfree(file_priv);
1934 }
1935
1936 const struct drm_ioctl_desc i915_ioctls[] = {
1937         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1938         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1939         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1940         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1941         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1942         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1943         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1944         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1945         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1946         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1947         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1948         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1949         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1950         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1951         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
1952         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1953         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1954         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1955         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1956         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1957         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1958         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1959         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1960         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1961         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1962         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1963         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1964         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1965         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1966         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1967         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1968         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1969         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1970         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1971         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1972         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1973         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1974         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1975         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1976         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1977         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1978         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1979         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1980         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1981         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1982         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1983         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1984         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1985         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1986         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1987 };
1988
1989 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1990
1991 /*
1992  * This is really ugly: Because old userspace abused the linux agp interface to
1993  * manage the gtt, we need to claim that all intel devices are agp.  For
1994  * otherwise the drm core refuses to initialize the agp support code.
1995  */
1996 int i915_driver_device_is_agp(struct drm_device * dev)
1997 {
1998         return 1;
1999 }