1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/oom.h>
49 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
51 #define BEGIN_LP_RING(n) \
52 intel_ring_begin(LP_RING(dev_priv), (n))
55 intel_ring_emit(LP_RING(dev_priv), x)
57 #define ADVANCE_LP_RING() \
58 __intel_ring_advance(LP_RING(dev_priv))
61 * Lock test for when it's just for synchronization of ring access.
63 * In that case, we don't need to do it when GEM is initialized as nobody else
64 * has access to the ring.
66 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
67 if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
68 LOCK_TEST_WITH_RETURN(dev, file); \
72 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
74 if (I915_NEED_GFX_HWS(dev_priv->dev))
75 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
77 return intel_read_status_page(LP_RING(dev_priv), reg);
80 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
81 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
82 #define I915_BREADCRUMB_INDEX 0x21
84 void i915_update_dri1_breadcrumb(struct drm_device *dev)
86 struct drm_i915_private *dev_priv = dev->dev_private;
87 struct drm_i915_master_private *master_priv;
90 * The dri breadcrumb update races against the drm master disappearing.
91 * Instead of trying to fix this (this is by far not the only ums issue)
92 * just don't do the update in kms mode.
94 if (drm_core_check_feature(dev, DRIVER_MODESET))
97 if (dev->primary->master) {
98 master_priv = dev->primary->master->driver_priv;
99 if (master_priv->sarea_priv)
100 master_priv->sarea_priv->last_dispatch =
101 READ_BREADCRUMB(dev_priv);
105 static void i915_write_hws_pga(struct drm_device *dev)
107 struct drm_i915_private *dev_priv = dev->dev_private;
110 addr = dev_priv->status_page_dmah->busaddr;
111 if (INTEL_INFO(dev)->gen >= 4)
112 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
113 I915_WRITE(HWS_PGA, addr);
117 * Frees the hardware status page, whether it's a physical address or a virtual
118 * address set up by the X Server.
120 static void i915_free_hws(struct drm_device *dev)
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_engine_cs *ring = LP_RING(dev_priv);
125 if (dev_priv->status_page_dmah) {
126 drm_pci_free(dev, dev_priv->status_page_dmah);
127 dev_priv->status_page_dmah = NULL;
130 if (ring->status_page.gfx_addr) {
131 ring->status_page.gfx_addr = 0;
132 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
135 /* Need to rewrite hardware status page */
136 I915_WRITE(HWS_PGA, 0x1ffff000);
139 void i915_kernel_lost_context(struct drm_device * dev)
141 struct drm_i915_private *dev_priv = dev->dev_private;
142 struct drm_i915_master_private *master_priv;
143 struct intel_engine_cs *ring = LP_RING(dev_priv);
144 struct intel_ringbuffer *ringbuf = ring->buffer;
147 * We should never lose context on the ring with modesetting
148 * as we don't expose it to userspace
150 if (drm_core_check_feature(dev, DRIVER_MODESET))
153 ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
154 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
155 ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
156 if (ringbuf->space < 0)
157 ringbuf->space += ringbuf->size;
159 if (!dev->primary->master)
162 master_priv = dev->primary->master->driver_priv;
163 if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
164 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
167 static int i915_dma_cleanup(struct drm_device * dev)
169 struct drm_i915_private *dev_priv = dev->dev_private;
172 /* Make sure interrupts are disabled here because the uninstall ioctl
173 * may not have been called from userspace and after dev_private
174 * is freed, it's too late.
176 if (dev->irq_enabled)
177 drm_irq_uninstall(dev);
179 mutex_lock(&dev->struct_mutex);
180 for (i = 0; i < I915_NUM_RINGS; i++)
181 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
182 mutex_unlock(&dev->struct_mutex);
184 /* Clear the HWS virtual address at teardown */
185 if (I915_NEED_GFX_HWS(dev))
191 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
197 master_priv->sarea = drm_getsarea(dev);
198 if (master_priv->sarea) {
199 master_priv->sarea_priv = (drm_i915_sarea_t *)
200 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
202 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
205 if (init->ring_size != 0) {
206 if (LP_RING(dev_priv)->buffer->obj != NULL) {
207 i915_dma_cleanup(dev);
208 DRM_ERROR("Client tried to initialize ringbuffer in "
213 ret = intel_render_ring_init_dri(dev,
217 i915_dma_cleanup(dev);
222 dev_priv->dri1.cpp = init->cpp;
223 dev_priv->dri1.back_offset = init->back_offset;
224 dev_priv->dri1.front_offset = init->front_offset;
225 dev_priv->dri1.current_page = 0;
226 if (master_priv->sarea_priv)
227 master_priv->sarea_priv->pf_current_page = 0;
229 /* Allow hardware batchbuffers unless told otherwise.
231 dev_priv->dri1.allow_batchbuffer = 1;
236 static int i915_dma_resume(struct drm_device * dev)
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct intel_engine_cs *ring = LP_RING(dev_priv);
241 DRM_DEBUG_DRIVER("%s\n", __func__);
243 if (ring->buffer->virtual_start == NULL) {
244 DRM_ERROR("can not ioremap virtual address for"
249 /* Program Hardware Status Page */
250 if (!ring->status_page.page_addr) {
251 DRM_ERROR("Can not find hardware status page\n");
254 DRM_DEBUG_DRIVER("hw status page @ %p\n",
255 ring->status_page.page_addr);
256 if (ring->status_page.gfx_addr != 0)
257 intel_ring_setup_status_page(ring);
259 i915_write_hws_pga(dev);
261 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
266 static int i915_dma_init(struct drm_device *dev, void *data,
267 struct drm_file *file_priv)
269 drm_i915_init_t *init = data;
272 if (drm_core_check_feature(dev, DRIVER_MODESET))
275 switch (init->func) {
277 retcode = i915_initialize(dev, init);
279 case I915_CLEANUP_DMA:
280 retcode = i915_dma_cleanup(dev);
282 case I915_RESUME_DMA:
283 retcode = i915_dma_resume(dev);
293 /* Implement basically the same security restrictions as hardware does
294 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
296 * Most of the calculations below involve calculating the size of a
297 * particular instruction. It's important to get the size right as
298 * that tells us where the next instruction to check is. Any illegal
299 * instruction detected will be given a size of zero, which is a
300 * signal to abort the rest of the buffer.
302 static int validate_cmd(int cmd)
304 switch (((cmd >> 29) & 0x7)) {
306 switch ((cmd >> 23) & 0x3f) {
308 return 1; /* MI_NOOP */
310 return 1; /* MI_FLUSH */
312 return 0; /* disallow everything else */
316 return 0; /* reserved */
318 return (cmd & 0xff) + 2; /* 2d commands */
320 if (((cmd >> 24) & 0x1f) <= 0x18)
323 switch ((cmd >> 24) & 0x1f) {
327 switch ((cmd >> 16) & 0xff) {
329 return (cmd & 0x1f) + 2;
331 return (cmd & 0xf) + 2;
333 return (cmd & 0xffff) + 2;
337 return (cmd & 0xffff) + 1;
341 if ((cmd & (1 << 23)) == 0) /* inline vertices */
342 return (cmd & 0x1ffff) + 2;
343 else if (cmd & (1 << 17)) /* indirect random */
344 if ((cmd & 0xffff) == 0)
345 return 0; /* unknown length, too hard */
347 return (((cmd & 0xffff) + 1) / 2) + 1;
349 return 2; /* indirect sequential */
360 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
362 struct drm_i915_private *dev_priv = dev->dev_private;
365 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
368 for (i = 0; i < dwords;) {
369 int sz = validate_cmd(buffer[i]);
370 if (sz == 0 || i + sz > dwords)
375 ret = BEGIN_LP_RING((dwords+1)&~1);
379 for (i = 0; i < dwords; i++)
390 i915_emit_box(struct drm_device *dev,
391 struct drm_clip_rect *box,
394 struct drm_i915_private *dev_priv = dev->dev_private;
397 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
398 box->y2 <= 0 || box->x2 <= 0) {
399 DRM_ERROR("Bad box %d,%d..%d,%d\n",
400 box->x1, box->y1, box->x2, box->y2);
404 if (INTEL_INFO(dev)->gen >= 4) {
405 ret = BEGIN_LP_RING(4);
409 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
410 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
411 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
414 ret = BEGIN_LP_RING(6);
418 OUT_RING(GFX_OP_DRAWRECT_INFO);
420 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
421 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
430 /* XXX: Emitting the counter should really be moved to part of the IRQ
431 * emit. For now, do it in both places:
434 static void i915_emit_breadcrumb(struct drm_device *dev)
436 struct drm_i915_private *dev_priv = dev->dev_private;
437 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
439 dev_priv->dri1.counter++;
440 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
441 dev_priv->dri1.counter = 0;
442 if (master_priv->sarea_priv)
443 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
445 if (BEGIN_LP_RING(4) == 0) {
446 OUT_RING(MI_STORE_DWORD_INDEX);
447 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
448 OUT_RING(dev_priv->dri1.counter);
454 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
455 drm_i915_cmdbuffer_t *cmd,
456 struct drm_clip_rect *cliprects,
459 int nbox = cmd->num_cliprects;
460 int i = 0, count, ret;
463 DRM_ERROR("alignment");
467 i915_kernel_lost_context(dev);
469 count = nbox ? nbox : 1;
471 for (i = 0; i < count; i++) {
473 ret = i915_emit_box(dev, &cliprects[i],
479 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
484 i915_emit_breadcrumb(dev);
488 static int i915_dispatch_batchbuffer(struct drm_device * dev,
489 drm_i915_batchbuffer_t * batch,
490 struct drm_clip_rect *cliprects)
492 struct drm_i915_private *dev_priv = dev->dev_private;
493 int nbox = batch->num_cliprects;
496 if ((batch->start | batch->used) & 0x7) {
497 DRM_ERROR("alignment");
501 i915_kernel_lost_context(dev);
503 count = nbox ? nbox : 1;
504 for (i = 0; i < count; i++) {
506 ret = i915_emit_box(dev, &cliprects[i],
507 batch->DR1, batch->DR4);
512 if (!IS_I830(dev) && !IS_845G(dev)) {
513 ret = BEGIN_LP_RING(2);
517 if (INTEL_INFO(dev)->gen >= 4) {
518 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
519 OUT_RING(batch->start);
521 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
522 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
525 ret = BEGIN_LP_RING(4);
529 OUT_RING(MI_BATCH_BUFFER);
530 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
531 OUT_RING(batch->start + batch->used - 4);
538 if (IS_G4X(dev) || IS_GEN5(dev)) {
539 if (BEGIN_LP_RING(2) == 0) {
540 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
546 i915_emit_breadcrumb(dev);
550 static int i915_dispatch_flip(struct drm_device * dev)
552 struct drm_i915_private *dev_priv = dev->dev_private;
553 struct drm_i915_master_private *master_priv =
554 dev->primary->master->driver_priv;
557 if (!master_priv->sarea_priv)
560 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
562 dev_priv->dri1.current_page,
563 master_priv->sarea_priv->pf_current_page);
565 i915_kernel_lost_context(dev);
567 ret = BEGIN_LP_RING(10);
571 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
574 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
576 if (dev_priv->dri1.current_page == 0) {
577 OUT_RING(dev_priv->dri1.back_offset);
578 dev_priv->dri1.current_page = 1;
580 OUT_RING(dev_priv->dri1.front_offset);
581 dev_priv->dri1.current_page = 0;
585 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
590 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
592 if (BEGIN_LP_RING(4) == 0) {
593 OUT_RING(MI_STORE_DWORD_INDEX);
594 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
595 OUT_RING(dev_priv->dri1.counter);
600 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
604 static int i915_quiescent(struct drm_device *dev)
606 i915_kernel_lost_context(dev);
607 return intel_ring_idle(LP_RING(dev->dev_private));
610 static int i915_flush_ioctl(struct drm_device *dev, void *data,
611 struct drm_file *file_priv)
615 if (drm_core_check_feature(dev, DRIVER_MODESET))
618 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
620 mutex_lock(&dev->struct_mutex);
621 ret = i915_quiescent(dev);
622 mutex_unlock(&dev->struct_mutex);
627 static int i915_batchbuffer(struct drm_device *dev, void *data,
628 struct drm_file *file_priv)
630 struct drm_i915_private *dev_priv = dev->dev_private;
631 struct drm_i915_master_private *master_priv;
632 drm_i915_sarea_t *sarea_priv;
633 drm_i915_batchbuffer_t *batch = data;
635 struct drm_clip_rect *cliprects = NULL;
637 if (drm_core_check_feature(dev, DRIVER_MODESET))
640 master_priv = dev->primary->master->driver_priv;
641 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
643 if (!dev_priv->dri1.allow_batchbuffer) {
644 DRM_ERROR("Batchbuffer ioctl disabled\n");
648 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
649 batch->start, batch->used, batch->num_cliprects);
651 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
653 if (batch->num_cliprects < 0)
656 if (batch->num_cliprects) {
657 cliprects = kcalloc(batch->num_cliprects,
660 if (cliprects == NULL)
663 ret = copy_from_user(cliprects, batch->cliprects,
664 batch->num_cliprects *
665 sizeof(struct drm_clip_rect));
672 mutex_lock(&dev->struct_mutex);
673 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
674 mutex_unlock(&dev->struct_mutex);
677 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
685 static int i915_cmdbuffer(struct drm_device *dev, void *data,
686 struct drm_file *file_priv)
688 struct drm_i915_private *dev_priv = dev->dev_private;
689 struct drm_i915_master_private *master_priv;
690 drm_i915_sarea_t *sarea_priv;
691 drm_i915_cmdbuffer_t *cmdbuf = data;
692 struct drm_clip_rect *cliprects = NULL;
696 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
697 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
699 if (drm_core_check_feature(dev, DRIVER_MODESET))
702 master_priv = dev->primary->master->driver_priv;
703 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
705 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
707 if (cmdbuf->num_cliprects < 0)
710 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
711 if (batch_data == NULL)
714 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
717 goto fail_batch_free;
720 if (cmdbuf->num_cliprects) {
721 cliprects = kcalloc(cmdbuf->num_cliprects,
722 sizeof(*cliprects), GFP_KERNEL);
723 if (cliprects == NULL) {
725 goto fail_batch_free;
728 ret = copy_from_user(cliprects, cmdbuf->cliprects,
729 cmdbuf->num_cliprects *
730 sizeof(struct drm_clip_rect));
737 mutex_lock(&dev->struct_mutex);
738 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
739 mutex_unlock(&dev->struct_mutex);
741 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
746 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
756 static int i915_emit_irq(struct drm_device * dev)
758 struct drm_i915_private *dev_priv = dev->dev_private;
759 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
761 i915_kernel_lost_context(dev);
763 DRM_DEBUG_DRIVER("\n");
765 dev_priv->dri1.counter++;
766 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
767 dev_priv->dri1.counter = 1;
768 if (master_priv->sarea_priv)
769 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
771 if (BEGIN_LP_RING(4) == 0) {
772 OUT_RING(MI_STORE_DWORD_INDEX);
773 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
774 OUT_RING(dev_priv->dri1.counter);
775 OUT_RING(MI_USER_INTERRUPT);
779 return dev_priv->dri1.counter;
782 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
784 struct drm_i915_private *dev_priv = dev->dev_private;
785 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
787 struct intel_engine_cs *ring = LP_RING(dev_priv);
789 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
790 READ_BREADCRUMB(dev_priv));
792 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
793 if (master_priv->sarea_priv)
794 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
798 if (master_priv->sarea_priv)
799 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
801 if (ring->irq_get(ring)) {
802 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
803 READ_BREADCRUMB(dev_priv) >= irq_nr);
805 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
809 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
810 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
816 /* Needs the lock as it touches the ring.
818 static int i915_irq_emit(struct drm_device *dev, void *data,
819 struct drm_file *file_priv)
821 struct drm_i915_private *dev_priv = dev->dev_private;
822 drm_i915_irq_emit_t *emit = data;
825 if (drm_core_check_feature(dev, DRIVER_MODESET))
828 if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
829 DRM_ERROR("called with no initialization\n");
833 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
835 mutex_lock(&dev->struct_mutex);
836 result = i915_emit_irq(dev);
837 mutex_unlock(&dev->struct_mutex);
839 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
840 DRM_ERROR("copy_to_user\n");
847 /* Doesn't need the hardware lock.
849 static int i915_irq_wait(struct drm_device *dev, void *data,
850 struct drm_file *file_priv)
852 struct drm_i915_private *dev_priv = dev->dev_private;
853 drm_i915_irq_wait_t *irqwait = data;
855 if (drm_core_check_feature(dev, DRIVER_MODESET))
859 DRM_ERROR("called with no initialization\n");
863 return i915_wait_irq(dev, irqwait->irq_seq);
866 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
867 struct drm_file *file_priv)
869 struct drm_i915_private *dev_priv = dev->dev_private;
870 drm_i915_vblank_pipe_t *pipe = data;
872 if (drm_core_check_feature(dev, DRIVER_MODESET))
876 DRM_ERROR("called with no initialization\n");
880 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
886 * Schedule buffer swap at given vertical blank.
888 static int i915_vblank_swap(struct drm_device *dev, void *data,
889 struct drm_file *file_priv)
891 /* The delayed swap mechanism was fundamentally racy, and has been
892 * removed. The model was that the client requested a delayed flip/swap
893 * from the kernel, then waited for vblank before continuing to perform
894 * rendering. The problem was that the kernel might wake the client
895 * up before it dispatched the vblank swap (since the lock has to be
896 * held while touching the ringbuffer), in which case the client would
897 * clear and start the next frame before the swap occurred, and
898 * flicker would occur in addition to likely missing the vblank.
900 * In the absence of this ioctl, userland falls back to a correct path
901 * of waiting for a vblank, then dispatching the swap on its own.
902 * Context switching to userland and back is plenty fast enough for
903 * meeting the requirements of vblank swapping.
908 static int i915_flip_bufs(struct drm_device *dev, void *data,
909 struct drm_file *file_priv)
913 if (drm_core_check_feature(dev, DRIVER_MODESET))
916 DRM_DEBUG_DRIVER("%s\n", __func__);
918 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
920 mutex_lock(&dev->struct_mutex);
921 ret = i915_dispatch_flip(dev);
922 mutex_unlock(&dev->struct_mutex);
927 static int i915_getparam(struct drm_device *dev, void *data,
928 struct drm_file *file_priv)
930 struct drm_i915_private *dev_priv = dev->dev_private;
931 drm_i915_getparam_t *param = data;
935 DRM_ERROR("called with no initialization\n");
939 switch (param->param) {
940 case I915_PARAM_IRQ_ACTIVE:
941 value = dev->pdev->irq ? 1 : 0;
943 case I915_PARAM_ALLOW_BATCHBUFFER:
944 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
946 case I915_PARAM_LAST_DISPATCH:
947 value = READ_BREADCRUMB(dev_priv);
949 case I915_PARAM_CHIPSET_ID:
950 value = dev->pdev->device;
952 case I915_PARAM_HAS_GEM:
955 case I915_PARAM_NUM_FENCES_AVAIL:
956 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
958 case I915_PARAM_HAS_OVERLAY:
959 value = dev_priv->overlay ? 1 : 0;
961 case I915_PARAM_HAS_PAGEFLIPPING:
964 case I915_PARAM_HAS_EXECBUF2:
968 case I915_PARAM_HAS_BSD:
969 value = intel_ring_initialized(&dev_priv->ring[VCS]);
971 case I915_PARAM_HAS_BLT:
972 value = intel_ring_initialized(&dev_priv->ring[BCS]);
974 case I915_PARAM_HAS_VEBOX:
975 value = intel_ring_initialized(&dev_priv->ring[VECS]);
977 case I915_PARAM_HAS_RELAXED_FENCING:
980 case I915_PARAM_HAS_COHERENT_RINGS:
983 case I915_PARAM_HAS_EXEC_CONSTANTS:
984 value = INTEL_INFO(dev)->gen >= 4;
986 case I915_PARAM_HAS_RELAXED_DELTA:
989 case I915_PARAM_HAS_GEN7_SOL_RESET:
992 case I915_PARAM_HAS_LLC:
993 value = HAS_LLC(dev);
995 case I915_PARAM_HAS_WT:
998 case I915_PARAM_HAS_ALIASING_PPGTT:
999 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
1001 case I915_PARAM_HAS_WAIT_TIMEOUT:
1004 case I915_PARAM_HAS_SEMAPHORES:
1005 value = i915_semaphore_is_enabled(dev);
1007 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1010 case I915_PARAM_HAS_SECURE_BATCHES:
1011 value = capable(CAP_SYS_ADMIN);
1013 case I915_PARAM_HAS_PINNED_BATCHES:
1016 case I915_PARAM_HAS_EXEC_NO_RELOC:
1019 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1022 case I915_PARAM_CMD_PARSER_VERSION:
1023 value = i915_cmd_parser_get_version();
1026 DRM_DEBUG("Unknown parameter %d\n", param->param);
1030 if (copy_to_user(param->value, &value, sizeof(int))) {
1031 DRM_ERROR("copy_to_user failed\n");
1038 static int i915_setparam(struct drm_device *dev, void *data,
1039 struct drm_file *file_priv)
1041 struct drm_i915_private *dev_priv = dev->dev_private;
1042 drm_i915_setparam_t *param = data;
1045 DRM_ERROR("called with no initialization\n");
1049 switch (param->param) {
1050 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1052 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1054 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1055 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1057 case I915_SETPARAM_NUM_USED_FENCES:
1058 if (param->value > dev_priv->num_fence_regs ||
1061 /* Userspace can use first N regs */
1062 dev_priv->fence_reg_start = param->value;
1065 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1073 static int i915_set_status_page(struct drm_device *dev, void *data,
1074 struct drm_file *file_priv)
1076 struct drm_i915_private *dev_priv = dev->dev_private;
1077 drm_i915_hws_addr_t *hws = data;
1078 struct intel_engine_cs *ring;
1080 if (drm_core_check_feature(dev, DRIVER_MODESET))
1083 if (!I915_NEED_GFX_HWS(dev))
1087 DRM_ERROR("called with no initialization\n");
1091 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1092 WARN(1, "tried to set status page when mode setting active\n");
1096 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1098 ring = LP_RING(dev_priv);
1099 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1101 dev_priv->dri1.gfx_hws_cpu_addr =
1102 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1103 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1104 i915_dma_cleanup(dev);
1105 ring->status_page.gfx_addr = 0;
1106 DRM_ERROR("can not ioremap virtual address for"
1107 " G33 hw status page\n");
1111 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1112 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1114 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1115 ring->status_page.gfx_addr);
1116 DRM_DEBUG_DRIVER("load hws at %p\n",
1117 ring->status_page.page_addr);
1121 static int i915_get_bridge_dev(struct drm_device *dev)
1123 struct drm_i915_private *dev_priv = dev->dev_private;
1125 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1126 if (!dev_priv->bridge_dev) {
1127 DRM_ERROR("bridge device not found\n");
1133 #define MCHBAR_I915 0x44
1134 #define MCHBAR_I965 0x48
1135 #define MCHBAR_SIZE (4*4096)
1137 #define DEVEN_REG 0x54
1138 #define DEVEN_MCHBAR_EN (1 << 28)
1140 /* Allocate space for the MCH regs if needed, return nonzero on error */
1142 intel_alloc_mchbar_resource(struct drm_device *dev)
1144 struct drm_i915_private *dev_priv = dev->dev_private;
1145 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1146 u32 temp_lo, temp_hi = 0;
1150 if (INTEL_INFO(dev)->gen >= 4)
1151 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1152 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1153 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1155 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1158 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1162 /* Get some space for it */
1163 dev_priv->mch_res.name = "i915 MCHBAR";
1164 dev_priv->mch_res.flags = IORESOURCE_MEM;
1165 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
1167 MCHBAR_SIZE, MCHBAR_SIZE,
1169 0, pcibios_align_resource,
1170 dev_priv->bridge_dev);
1172 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
1173 dev_priv->mch_res.start = 0;
1177 if (INTEL_INFO(dev)->gen >= 4)
1178 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1179 upper_32_bits(dev_priv->mch_res.start));
1181 pci_write_config_dword(dev_priv->bridge_dev, reg,
1182 lower_32_bits(dev_priv->mch_res.start));
1186 /* Setup MCHBAR if possible, return true if we should disable it again */
1188 intel_setup_mchbar(struct drm_device *dev)
1190 struct drm_i915_private *dev_priv = dev->dev_private;
1191 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1195 if (IS_VALLEYVIEW(dev))
1198 dev_priv->mchbar_need_disable = false;
1200 if (IS_I915G(dev) || IS_I915GM(dev)) {
1201 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1202 enabled = !!(temp & DEVEN_MCHBAR_EN);
1204 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1208 /* If it's already enabled, don't have to do anything */
1212 if (intel_alloc_mchbar_resource(dev))
1215 dev_priv->mchbar_need_disable = true;
1217 /* Space is allocated or reserved, so enable it. */
1218 if (IS_I915G(dev) || IS_I915GM(dev)) {
1219 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1220 temp | DEVEN_MCHBAR_EN);
1222 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1223 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1228 intel_teardown_mchbar(struct drm_device *dev)
1230 struct drm_i915_private *dev_priv = dev->dev_private;
1231 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1234 if (dev_priv->mchbar_need_disable) {
1235 if (IS_I915G(dev) || IS_I915GM(dev)) {
1236 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1237 temp &= ~DEVEN_MCHBAR_EN;
1238 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1240 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1242 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1246 if (dev_priv->mch_res.start)
1247 release_resource(&dev_priv->mch_res);
1250 /* true = enable decode, false = disable decoder */
1251 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1253 struct drm_device *dev = cookie;
1255 intel_modeset_vga_set_state(dev, state);
1257 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1258 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1260 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1263 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1265 struct drm_device *dev = pci_get_drvdata(pdev);
1266 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1267 if (state == VGA_SWITCHEROO_ON) {
1268 pr_info("switched on\n");
1269 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1270 /* i915 resume handler doesn't set to D0 */
1271 pci_set_power_state(dev->pdev, PCI_D0);
1273 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1275 pr_err("switched off\n");
1276 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1277 i915_suspend(dev, pmm);
1278 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1282 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1284 struct drm_device *dev = pci_get_drvdata(pdev);
1287 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1288 * locking inversion with the driver load path. And the access here is
1289 * completely racy anyway. So don't bother with locking for now.
1291 return dev->open_count == 0;
1294 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1295 .set_gpu_state = i915_switcheroo_set_state,
1297 .can_switch = i915_switcheroo_can_switch,
1300 static int i915_load_modeset_init(struct drm_device *dev)
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1305 ret = intel_parse_bios(dev);
1307 DRM_INFO("failed to find VBIOS tables\n");
1309 /* If we have > 1 VGA cards, then we need to arbitrate access
1310 * to the common VGA resources.
1312 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1313 * then we do not take part in VGA arbitration and the
1314 * vga_client_register() fails with -ENODEV.
1316 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1317 if (ret && ret != -ENODEV)
1320 intel_register_dsm_handler();
1322 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1324 goto cleanup_vga_client;
1326 /* Initialise stolen first so that we may reserve preallocated
1327 * objects for the BIOS to KMS transition.
1329 ret = i915_gem_init_stolen(dev);
1331 goto cleanup_vga_switcheroo;
1333 intel_power_domains_init_hw(dev_priv);
1335 ret = drm_irq_install(dev, dev->pdev->irq);
1337 goto cleanup_gem_stolen;
1339 /* Important: The output setup functions called by modeset_init need
1340 * working irqs for e.g. gmbus and dp aux transfers. */
1341 intel_modeset_init(dev);
1343 ret = i915_gem_init(dev);
1347 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1349 intel_modeset_gem_init(dev);
1351 /* Always safe in the mode setting case. */
1352 /* FIXME: do pre/post-mode set stuff in core KMS code */
1353 dev->vblank_disable_allowed = true;
1354 if (INTEL_INFO(dev)->num_pipes == 0)
1357 ret = intel_fbdev_init(dev);
1361 /* Only enable hotplug handling once the fbdev is fully set up. */
1362 intel_hpd_init(dev);
1365 * Some ports require correctly set-up hpd registers for detection to
1366 * work properly (leading to ghost connected connector status), e.g. VGA
1367 * on gm45. Hence we can only set up the initial fbdev config after hpd
1368 * irqs are fully enabled. Now we should scan for the initial config
1369 * only once hotplug handling is enabled, but due to screwed-up locking
1370 * around kms/fbdev init we can't protect the fdbev initial config
1371 * scanning against hotplug events. Hence do this first and ignore the
1372 * tiny window where we will loose hotplug notifactions.
1374 intel_fbdev_initial_config(dev);
1376 /* Only enable hotplug handling once the fbdev is fully set up. */
1377 dev_priv->enable_hotplug_processing = true;
1379 drm_kms_helper_poll_init(dev);
1384 mutex_lock(&dev->struct_mutex);
1385 i915_gem_cleanup_ringbuffer(dev);
1386 i915_gem_context_fini(dev);
1387 mutex_unlock(&dev->struct_mutex);
1388 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1389 drm_mm_takedown(&dev_priv->gtt.base.mm);
1391 drm_irq_uninstall(dev);
1393 i915_gem_cleanup_stolen(dev);
1394 cleanup_vga_switcheroo:
1395 vga_switcheroo_unregister_client(dev->pdev);
1397 vga_client_register(dev->pdev, NULL, NULL, NULL);
1402 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1404 struct drm_i915_master_private *master_priv;
1406 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1410 master->driver_priv = master_priv;
1414 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1416 struct drm_i915_master_private *master_priv = master->driver_priv;
1423 master->driver_priv = NULL;
1426 #if IS_ENABLED(CONFIG_FB)
1427 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1429 struct apertures_struct *ap;
1430 struct pci_dev *pdev = dev_priv->dev->pdev;
1433 ap = alloc_apertures(1);
1437 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1438 ap->ranges[0].size = dev_priv->gtt.mappable_end;
1441 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1443 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1448 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1453 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1455 const struct intel_device_info *info = &dev_priv->info;
1457 #define PRINT_S(name) "%s"
1459 #define PRINT_FLAG(name) info->name ? #name "," : ""
1461 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1462 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
1464 dev_priv->dev->pdev->device,
1465 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
1473 * Determine various intel_device_info fields at runtime.
1475 * Use it when either:
1476 * - it's judged too laborious to fill n static structures with the limit
1477 * when a simple if statement does the job,
1478 * - run-time checks (eg read fuse/strap registers) are needed.
1480 * This function needs to be called:
1481 * - after the MMIO has been setup as we are reading registers,
1482 * - after the PCH has been detected,
1483 * - before the first usage of the fields it can tweak.
1485 static void intel_device_info_runtime_init(struct drm_device *dev)
1487 struct drm_i915_private *dev_priv = dev->dev_private;
1488 struct intel_device_info *info;
1491 info = (struct intel_device_info *)&dev_priv->info;
1493 if (IS_VALLEYVIEW(dev))
1495 info->num_sprites[pipe] = 2;
1498 info->num_sprites[pipe] = 1;
1500 if (i915.disable_display) {
1501 DRM_INFO("Display disabled (module parameter)\n");
1502 info->num_pipes = 0;
1503 } else if (info->num_pipes > 0 &&
1504 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
1505 !IS_VALLEYVIEW(dev)) {
1506 u32 fuse_strap = I915_READ(FUSE_STRAP);
1507 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
1510 * SFUSE_STRAP is supposed to have a bit signalling the display
1511 * is fused off. Unfortunately it seems that, at least in
1512 * certain cases, fused off display means that PCH display
1513 * reads don't land anywhere. In that case, we read 0s.
1515 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
1516 * should be set when taking over after the firmware.
1518 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
1519 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
1520 (dev_priv->pch_type == PCH_CPT &&
1521 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
1522 DRM_INFO("Display fused off, disabling\n");
1523 info->num_pipes = 0;
1529 * i915_driver_load - setup chip and create an initial config
1531 * @flags: startup flags
1533 * The driver load routine has to do several things:
1534 * - drive output discovery via intel_modeset_init()
1535 * - initialize the memory manager
1536 * - allocate initial config memory
1537 * - setup the DRM framebuffer with the allocated memory
1539 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1541 struct drm_i915_private *dev_priv;
1542 struct intel_device_info *info, *device_info;
1543 int ret = 0, mmio_bar, mmio_size;
1544 uint32_t aperture_size;
1546 info = (struct intel_device_info *) flags;
1548 /* Refuse to load on gen6+ without kms enabled. */
1549 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
1550 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1551 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1555 /* UMS needs agp support. */
1556 if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
1559 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1560 if (dev_priv == NULL)
1563 dev->dev_private = (void *)dev_priv;
1564 dev_priv->dev = dev;
1566 /* copy initial configuration to dev_priv->info */
1567 device_info = (struct intel_device_info *)&dev_priv->info;
1568 *device_info = *info;
1570 spin_lock_init(&dev_priv->irq_lock);
1571 spin_lock_init(&dev_priv->gpu_error.lock);
1572 spin_lock_init(&dev_priv->backlight_lock);
1573 spin_lock_init(&dev_priv->uncore.lock);
1574 spin_lock_init(&dev_priv->mm.object_stat_lock);
1575 mutex_init(&dev_priv->dpio_lock);
1576 mutex_init(&dev_priv->modeset_restore_lock);
1578 intel_pm_setup(dev);
1580 intel_display_crc_init(dev);
1582 i915_dump_device_info(dev_priv);
1584 /* Not all pre-production machines fall into this category, only the
1585 * very first ones. Almost everything should work, except for maybe
1586 * suspend/resume. And we don't implement workarounds that affect only
1587 * pre-production machines. */
1588 if (IS_HSW_EARLY_SDV(dev))
1589 DRM_INFO("This is an early pre-production Haswell machine. "
1590 "It may not be fully functional.\n");
1592 if (i915_get_bridge_dev(dev)) {
1597 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1598 /* Before gen4, the registers and the GTT are behind different BARs.
1599 * However, from gen4 onwards, the registers and the GTT are shared
1600 * in the same BAR, so we want to restrict this ioremap from
1601 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1602 * the register BAR remains the same size for all the earlier
1603 * generations up to Ironlake.
1606 mmio_size = 512*1024;
1608 mmio_size = 2*1024*1024;
1610 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1611 if (!dev_priv->regs) {
1612 DRM_ERROR("failed to map registers\n");
1617 /* This must be called before any calls to HAS_PCH_* */
1618 intel_detect_pch(dev);
1620 intel_uncore_init(dev);
1622 ret = i915_gem_gtt_init(dev);
1626 if (drm_core_check_feature(dev, DRIVER_MODESET))
1627 i915_kick_out_firmware_fb(dev_priv);
1629 pci_set_master(dev->pdev);
1631 /* overlay on gen2 is broken and can't address above 1G */
1633 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1635 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1636 * using 32bit addressing, overwriting memory if HWS is located
1639 * The documentation also mentions an issue with undefined
1640 * behaviour if any general state is accessed within a page above 4GB,
1641 * which also needs to be handled carefully.
1643 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1644 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1646 aperture_size = dev_priv->gtt.mappable_end;
1648 dev_priv->gtt.mappable =
1649 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1651 if (dev_priv->gtt.mappable == NULL) {
1656 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1659 /* The i915 workqueue is primarily used for batched retirement of
1660 * requests (and thus managing bo) once the task has been completed
1661 * by the GPU. i915_gem_retire_requests() is called directly when we
1662 * need high-priority retirement, such as waiting for an explicit
1665 * It is also used for periodic low-priority events, such as
1666 * idle-timers and recording error state.
1668 * All tasks on the workqueue are expected to acquire the dev mutex
1669 * so there is no point in running more than one instance of the
1670 * workqueue at any time. Use an ordered one.
1672 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1673 if (dev_priv->wq == NULL) {
1674 DRM_ERROR("Failed to create our workqueue.\n");
1679 intel_irq_init(dev);
1680 intel_uncore_sanitize(dev);
1682 /* Try to make sure MCHBAR is enabled before poking at it */
1683 intel_setup_mchbar(dev);
1684 intel_setup_gmbus(dev);
1685 intel_opregion_setup(dev);
1687 intel_setup_bios(dev);
1691 /* On the 945G/GM, the chipset reports the MSI capability on the
1692 * integrated graphics even though the support isn't actually there
1693 * according to the published specs. It doesn't appear to function
1694 * correctly in testing on 945G.
1695 * This may be a side effect of MSI having been made available for PEG
1696 * and the registers being closely associated.
1698 * According to chipset errata, on the 965GM, MSI interrupts may
1699 * be lost or delayed, but we use them anyways to avoid
1700 * stuck interrupts on some machines.
1702 if (!IS_I945G(dev) && !IS_I945GM(dev))
1703 pci_enable_msi(dev->pdev);
1705 intel_device_info_runtime_init(dev);
1707 if (INTEL_INFO(dev)->num_pipes) {
1708 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1710 goto out_gem_unload;
1713 intel_power_domains_init(dev_priv);
1715 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1716 ret = i915_load_modeset_init(dev);
1718 DRM_ERROR("failed to init modeset\n");
1719 goto out_power_well;
1722 /* Start out suspended in ums mode. */
1723 dev_priv->ums.mm_suspended = 1;
1726 i915_setup_sysfs(dev);
1728 if (INTEL_INFO(dev)->num_pipes) {
1729 /* Must be done after probing outputs */
1730 intel_opregion_init(dev);
1731 acpi_video_register();
1735 intel_gpu_ips_init(dev_priv);
1737 intel_init_runtime_pm(dev_priv);
1742 intel_power_domains_remove(dev_priv);
1743 drm_vblank_cleanup(dev);
1745 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1746 unregister_shrinker(&dev_priv->mm.shrinker);
1748 if (dev->pdev->msi_enabled)
1749 pci_disable_msi(dev->pdev);
1751 intel_teardown_gmbus(dev);
1752 intel_teardown_mchbar(dev);
1753 pm_qos_remove_request(&dev_priv->pm_qos);
1754 destroy_workqueue(dev_priv->wq);
1756 arch_phys_wc_del(dev_priv->gtt.mtrr);
1757 io_mapping_free(dev_priv->gtt.mappable);
1759 list_del(&dev_priv->gtt.base.global_link);
1760 drm_mm_takedown(&dev_priv->gtt.base.mm);
1761 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1763 intel_uncore_fini(dev);
1764 pci_iounmap(dev->pdev, dev_priv->regs);
1766 pci_dev_put(dev_priv->bridge_dev);
1769 kmem_cache_destroy(dev_priv->slab);
1774 int i915_driver_unload(struct drm_device *dev)
1776 struct drm_i915_private *dev_priv = dev->dev_private;
1779 ret = i915_gem_suspend(dev);
1781 DRM_ERROR("failed to idle hardware: %d\n", ret);
1785 intel_fini_runtime_pm(dev_priv);
1787 intel_gpu_ips_teardown();
1789 /* The i915.ko module is still not prepared to be loaded when
1790 * the power well is not enabled, so just enable it in case
1791 * we're going to unload/reload. */
1792 intel_display_set_init_power(dev_priv, true);
1793 intel_power_domains_remove(dev_priv);
1795 i915_teardown_sysfs(dev);
1797 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1798 unregister_shrinker(&dev_priv->mm.shrinker);
1800 io_mapping_free(dev_priv->gtt.mappable);
1801 arch_phys_wc_del(dev_priv->gtt.mtrr);
1803 acpi_video_unregister();
1805 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1806 intel_fbdev_fini(dev);
1807 intel_modeset_cleanup(dev);
1808 cancel_work_sync(&dev_priv->console_resume_work);
1811 * free the memory space allocated for the child device
1812 * config parsed from VBT
1814 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1815 kfree(dev_priv->vbt.child_dev);
1816 dev_priv->vbt.child_dev = NULL;
1817 dev_priv->vbt.child_dev_num = 0;
1820 vga_switcheroo_unregister_client(dev->pdev);
1821 vga_client_register(dev->pdev, NULL, NULL, NULL);
1824 /* Free error state after interrupts are fully disabled. */
1825 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1826 cancel_work_sync(&dev_priv->gpu_error.work);
1827 i915_destroy_error_state(dev);
1829 if (dev->pdev->msi_enabled)
1830 pci_disable_msi(dev->pdev);
1832 intel_opregion_fini(dev);
1834 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1835 /* Flush any outstanding unpin_work. */
1836 flush_workqueue(dev_priv->wq);
1838 mutex_lock(&dev->struct_mutex);
1839 i915_gem_cleanup_ringbuffer(dev);
1840 i915_gem_context_fini(dev);
1841 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1842 mutex_unlock(&dev->struct_mutex);
1843 i915_gem_cleanup_stolen(dev);
1845 if (!I915_NEED_GFX_HWS(dev))
1849 list_del(&dev_priv->gtt.base.global_link);
1850 WARN_ON(!list_empty(&dev_priv->vm_list));
1852 drm_vblank_cleanup(dev);
1854 intel_teardown_gmbus(dev);
1855 intel_teardown_mchbar(dev);
1857 destroy_workqueue(dev_priv->wq);
1858 pm_qos_remove_request(&dev_priv->pm_qos);
1860 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1862 intel_uncore_fini(dev);
1863 if (dev_priv->regs != NULL)
1864 pci_iounmap(dev->pdev, dev_priv->regs);
1867 kmem_cache_destroy(dev_priv->slab);
1869 pci_dev_put(dev_priv->bridge_dev);
1875 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1879 ret = i915_gem_open(dev, file);
1887 * i915_driver_lastclose - clean up after all DRM clients have exited
1890 * Take care of cleaning up after all DRM clients have exited. In the
1891 * mode setting case, we want to restore the kernel's initial mode (just
1892 * in case the last client left us in a bad state).
1894 * Additionally, in the non-mode setting case, we'll tear down the GTT
1895 * and DMA structures, since the kernel won't be using them, and clea
1898 void i915_driver_lastclose(struct drm_device * dev)
1900 struct drm_i915_private *dev_priv = dev->dev_private;
1902 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1903 * goes right around and calls lastclose. Check for this and don't clean
1908 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1909 intel_fbdev_restore_mode(dev);
1910 vga_switcheroo_process_delayed_switch();
1914 i915_gem_lastclose(dev);
1916 i915_dma_cleanup(dev);
1919 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1921 mutex_lock(&dev->struct_mutex);
1922 i915_gem_context_close(dev, file_priv);
1923 i915_gem_release(dev, file_priv);
1924 mutex_unlock(&dev->struct_mutex);
1927 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1929 struct drm_i915_file_private *file_priv = file->driver_priv;
1931 if (file_priv && file_priv->bsd_ring)
1932 file_priv->bsd_ring = NULL;
1936 const struct drm_ioctl_desc i915_ioctls[] = {
1937 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1938 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1939 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1940 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1941 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1942 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1943 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1944 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1945 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1946 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1947 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1948 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1949 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1950 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1951 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1952 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1953 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1954 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1955 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1956 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1957 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1958 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1959 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1960 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1961 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1962 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1963 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1964 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1965 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1966 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1967 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1968 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1969 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1970 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1971 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1972 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1973 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1974 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1975 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1976 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1977 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1978 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1979 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1980 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1981 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1982 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1983 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1984 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1985 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1986 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1989 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1992 * This is really ugly: Because old userspace abused the linux agp interface to
1993 * manage the gtt, we need to claim that all intel devices are agp. For
1994 * otherwise the drm core refuses to initialize the agp support code.
1996 int i915_driver_device_is_agp(struct drm_device * dev)