2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
40 #define LEGACY_REQUEST_SIZE 200
42 static unsigned int __intel_ring_space(unsigned int head,
47 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
48 * same cacheline, the Head Pointer must not be greater than the Tail
51 GEM_BUG_ON(!is_power_of_2(size));
52 return (head - tail - CACHELINE_BYTES) & (size - 1);
55 void intel_ring_update_space(struct intel_ring *ring)
57 ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
61 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
67 if (mode & EMIT_INVALIDATE)
70 cs = intel_ring_begin(req, 2);
76 intel_ring_advance(req, cs);
82 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
89 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
90 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
91 * also flushed at 2d versus 3d pipeline switches.
95 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
96 * MI_READ_FLUSH is set, and is always flushed on 965.
98 * I915_GEM_DOMAIN_COMMAND may not exist?
100 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
101 * invalidated when MI_EXE_FLUSH is set.
103 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
104 * invalidated with every MI_FLUSH.
108 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
109 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
110 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
111 * are flushed at any MI_FLUSH.
115 if (mode & EMIT_INVALIDATE) {
117 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
118 cmd |= MI_INVALIDATE_ISP;
121 cs = intel_ring_begin(req, 2);
127 intel_ring_advance(req, cs);
133 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
134 * implementing two workarounds on gen6. From section 1.4.7.1
135 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
137 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
138 * produced by non-pipelined state commands), software needs to first
139 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
142 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
143 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
145 * And the workaround for these two requires this workaround first:
147 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
148 * BEFORE the pipe-control with a post-sync op and no write-cache
151 * And this last workaround is tricky because of the requirements on
152 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
155 * "1 of the following must also be set:
156 * - Render Target Cache Flush Enable ([12] of DW1)
157 * - Depth Cache Flush Enable ([0] of DW1)
158 * - Stall at Pixel Scoreboard ([1] of DW1)
159 * - Depth Stall ([13] of DW1)
160 * - Post-Sync Operation ([13] of DW1)
161 * - Notify Enable ([8] of DW1)"
163 * The cache flushes require the workaround flush that triggered this
164 * one, so we can't use it. Depth stall would trigger the same.
165 * Post-sync nonzero is what triggered this second workaround, so we
166 * can't use that one either. Notify enable is IRQs, which aren't
167 * really our business. That leaves only stall at scoreboard.
170 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
173 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
176 cs = intel_ring_begin(req, 6);
180 *cs++ = GFX_OP_PIPE_CONTROL(5);
181 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
182 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
183 *cs++ = 0; /* low dword */
184 *cs++ = 0; /* high dword */
186 intel_ring_advance(req, cs);
188 cs = intel_ring_begin(req, 6);
192 *cs++ = GFX_OP_PIPE_CONTROL(5);
193 *cs++ = PIPE_CONTROL_QW_WRITE;
194 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
198 intel_ring_advance(req, cs);
204 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
207 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
211 /* Force SNB workarounds for PIPE_CONTROL flushes */
212 ret = intel_emit_post_sync_nonzero_flush(req);
216 /* Just flush everything. Experiments have shown that reducing the
217 * number of bits based on the write domains has little performance
220 if (mode & EMIT_FLUSH) {
221 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
222 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
224 * Ensure that any following seqno writes only happen
225 * when the render cache is indeed flushed.
227 flags |= PIPE_CONTROL_CS_STALL;
229 if (mode & EMIT_INVALIDATE) {
230 flags |= PIPE_CONTROL_TLB_INVALIDATE;
231 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
232 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
233 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
234 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
235 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
237 * TLB invalidate requires a post-sync write.
239 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
242 cs = intel_ring_begin(req, 4);
246 *cs++ = GFX_OP_PIPE_CONTROL(4);
248 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
250 intel_ring_advance(req, cs);
256 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
260 cs = intel_ring_begin(req, 4);
264 *cs++ = GFX_OP_PIPE_CONTROL(4);
265 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
268 intel_ring_advance(req, cs);
274 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
277 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
281 * Ensure that any following seqno writes only happen when the render
282 * cache is indeed flushed.
284 * Workaround: 4th PIPE_CONTROL command (except the ones with only
285 * read-cache invalidate bits set) must have the CS_STALL bit set. We
286 * don't try to be clever and just set it unconditionally.
288 flags |= PIPE_CONTROL_CS_STALL;
290 /* Just flush everything. Experiments have shown that reducing the
291 * number of bits based on the write domains has little performance
294 if (mode & EMIT_FLUSH) {
295 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
296 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
297 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
298 flags |= PIPE_CONTROL_FLUSH_ENABLE;
300 if (mode & EMIT_INVALIDATE) {
301 flags |= PIPE_CONTROL_TLB_INVALIDATE;
302 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
303 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
304 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
305 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
306 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
307 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
309 * TLB invalidate requires a post-sync write.
311 flags |= PIPE_CONTROL_QW_WRITE;
312 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
314 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
316 /* Workaround: we must issue a pipe_control with CS-stall bit
317 * set before a pipe_control command that has the state cache
318 * invalidate bit set. */
319 gen7_render_ring_cs_stall_wa(req);
322 cs = intel_ring_begin(req, 4);
326 *cs++ = GFX_OP_PIPE_CONTROL(4);
328 *cs++ = scratch_addr;
330 intel_ring_advance(req, cs);
336 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
341 cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
345 flags = PIPE_CONTROL_CS_STALL;
347 if (mode & EMIT_FLUSH) {
348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
350 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
351 flags |= PIPE_CONTROL_FLUSH_ENABLE;
353 if (mode & EMIT_INVALIDATE) {
354 flags |= PIPE_CONTROL_TLB_INVALIDATE;
355 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
356 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
357 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
358 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
359 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
360 flags |= PIPE_CONTROL_QW_WRITE;
361 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
363 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
364 cs = gen8_emit_pipe_control(cs,
365 PIPE_CONTROL_CS_STALL |
366 PIPE_CONTROL_STALL_AT_SCOREBOARD,
370 cs = gen8_emit_pipe_control(cs, flags,
371 i915_ggtt_offset(req->engine->scratch) +
372 2 * CACHELINE_BYTES);
374 intel_ring_advance(req, cs);
379 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
381 struct drm_i915_private *dev_priv = engine->i915;
384 addr = dev_priv->status_page_dmah->busaddr;
385 if (INTEL_GEN(dev_priv) >= 4)
386 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
387 I915_WRITE(HWS_PGA, addr);
390 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
392 struct drm_i915_private *dev_priv = engine->i915;
395 /* The ring status page addresses are no longer next to the rest of
396 * the ring registers as of gen7.
398 if (IS_GEN7(dev_priv)) {
399 switch (engine->id) {
401 mmio = RENDER_HWS_PGA_GEN7;
404 mmio = BLT_HWS_PGA_GEN7;
407 * VCS2 actually doesn't exist on Gen7. Only shut up
408 * gcc switch check warning
412 mmio = BSD_HWS_PGA_GEN7;
415 mmio = VEBOX_HWS_PGA_GEN7;
418 } else if (IS_GEN6(dev_priv)) {
419 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
421 /* XXX: gen8 returns to sanity */
422 mmio = RING_HWS_PGA(engine->mmio_base);
425 I915_WRITE(mmio, engine->status_page.ggtt_offset);
429 * Flush the TLB for this page
431 * FIXME: These two bits have disappeared on gen8, so a question
432 * arises: do we still need this and if so how should we go about
433 * invalidating the TLB?
435 if (IS_GEN(dev_priv, 6, 7)) {
436 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
438 /* ring should be idle before issuing a sync flush*/
439 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
442 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
444 if (intel_wait_for_register(dev_priv,
445 reg, INSTPM_SYNC_FLUSH, 0,
447 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
452 static bool stop_ring(struct intel_engine_cs *engine)
454 struct drm_i915_private *dev_priv = engine->i915;
456 if (INTEL_GEN(dev_priv) > 2) {
457 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
458 if (intel_wait_for_register(dev_priv,
459 RING_MI_MODE(engine->mmio_base),
463 DRM_ERROR("%s : timed out trying to stop ring\n",
465 /* Sometimes we observe that the idle flag is not
466 * set even though the ring is empty. So double
467 * check before giving up.
469 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
474 I915_WRITE_CTL(engine, 0);
475 I915_WRITE_HEAD(engine, 0);
476 I915_WRITE_TAIL(engine, 0);
478 if (INTEL_GEN(dev_priv) > 2) {
479 (void)I915_READ_CTL(engine);
480 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
483 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
486 static int init_ring_common(struct intel_engine_cs *engine)
488 struct drm_i915_private *dev_priv = engine->i915;
489 struct intel_ring *ring = engine->buffer;
492 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
494 if (!stop_ring(engine)) {
495 /* G45 ring initialization often fails to reset head to zero */
496 DRM_DEBUG_KMS("%s head not reset to zero "
497 "ctl %08x head %08x tail %08x start %08x\n",
499 I915_READ_CTL(engine),
500 I915_READ_HEAD(engine),
501 I915_READ_TAIL(engine),
502 I915_READ_START(engine));
504 if (!stop_ring(engine)) {
505 DRM_ERROR("failed to set %s head to zero "
506 "ctl %08x head %08x tail %08x start %08x\n",
508 I915_READ_CTL(engine),
509 I915_READ_HEAD(engine),
510 I915_READ_TAIL(engine),
511 I915_READ_START(engine));
517 if (HWS_NEEDS_PHYSICAL(dev_priv))
518 ring_setup_phys_status_page(engine);
520 intel_ring_setup_status_page(engine);
522 intel_engine_reset_breadcrumbs(engine);
524 /* Enforce ordering by reading HEAD register back */
525 I915_READ_HEAD(engine);
527 /* Initialize the ring. This must happen _after_ we've cleared the ring
528 * registers with the above sequence (the readback of the HEAD registers
529 * also enforces ordering), otherwise the hw might lose the new ring
530 * register values. */
531 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
533 /* WaClearRingBufHeadRegAtInit:ctg,elk */
534 if (I915_READ_HEAD(engine))
535 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
536 engine->name, I915_READ_HEAD(engine));
538 intel_ring_update_space(ring);
539 I915_WRITE_HEAD(engine, ring->head);
540 I915_WRITE_TAIL(engine, ring->tail);
541 (void)I915_READ_TAIL(engine);
543 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
545 /* If the head is still not zero, the ring is dead */
546 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
547 RING_VALID, RING_VALID,
549 DRM_ERROR("%s initialization failed "
550 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
552 I915_READ_CTL(engine),
553 I915_READ_CTL(engine) & RING_VALID,
554 I915_READ_HEAD(engine), ring->head,
555 I915_READ_TAIL(engine), ring->tail,
556 I915_READ_START(engine),
557 i915_ggtt_offset(ring->vma));
562 intel_engine_init_hangcheck(engine);
565 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
570 static void reset_ring_common(struct intel_engine_cs *engine,
571 struct drm_i915_gem_request *request)
573 /* Try to restore the logical GPU state to match the continuation
574 * of the request queue. If we skip the context/PD restore, then
575 * the next request may try to execute assuming that its context
576 * is valid and loaded on the GPU and so may try to access invalid
577 * memory, prompting repeated GPU hangs.
579 * If the request was guilty, we still restore the logical state
580 * in case the next request requires it (e.g. the aliasing ppgtt),
581 * but skip over the hung batch.
583 * If the request was innocent, we try to replay the request with
584 * the restored context.
587 struct drm_i915_private *dev_priv = request->i915;
588 struct intel_context *ce = &request->ctx->engine[engine->id];
589 struct i915_hw_ppgtt *ppgtt;
591 /* FIXME consider gen8 reset */
595 i915_ggtt_offset(ce->state) |
596 BIT(8) /* must be set! */ |
597 CCID_EXTENDED_STATE_SAVE |
598 CCID_EXTENDED_STATE_RESTORE |
602 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
604 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
606 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
607 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
609 /* Wait for the PD reload to complete */
610 if (intel_wait_for_register(dev_priv,
611 RING_PP_DIR_BASE(engine),
614 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
616 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
619 /* If the rq hung, jump to its breadcrumb and skip the batch */
620 if (request->fence.error == -EIO)
621 request->ring->head = request->postfix;
623 engine->legacy_active_context = NULL;
627 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
631 ret = intel_ring_workarounds_emit(req);
635 ret = i915_gem_render_state_emit(req);
642 static int init_render_ring(struct intel_engine_cs *engine)
644 struct drm_i915_private *dev_priv = engine->i915;
645 int ret = init_ring_common(engine);
649 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
650 if (IS_GEN(dev_priv, 4, 6))
651 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
653 /* We need to disable the AsyncFlip performance optimisations in order
654 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
655 * programmed to '1' on all products.
657 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
659 if (IS_GEN(dev_priv, 6, 7))
660 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
662 /* Required for the hardware to program scanline values for waiting */
663 /* WaEnableFlushTlbInvalidationMode:snb */
664 if (IS_GEN6(dev_priv))
666 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
668 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
669 if (IS_GEN7(dev_priv))
670 I915_WRITE(GFX_MODE_GEN7,
671 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
672 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
674 if (IS_GEN6(dev_priv)) {
675 /* From the Sandybridge PRM, volume 1 part 3, page 24:
676 * "If this bit is set, STCunit will have LRA as replacement
677 * policy. [...] This bit must be reset. LRA replacement
678 * policy is not supported."
680 I915_WRITE(CACHE_MODE_0,
681 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
684 if (IS_GEN(dev_priv, 6, 7))
685 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
687 if (INTEL_INFO(dev_priv)->gen >= 6)
688 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
690 return init_workarounds_ring(engine);
693 static void render_ring_cleanup(struct intel_engine_cs *engine)
695 struct drm_i915_private *dev_priv = engine->i915;
697 i915_vma_unpin_and_release(&dev_priv->semaphore);
700 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
702 struct drm_i915_private *dev_priv = req->i915;
703 struct intel_engine_cs *waiter;
704 enum intel_engine_id id;
706 for_each_engine(waiter, dev_priv, id) {
707 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
708 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
711 *cs++ = GFX_OP_PIPE_CONTROL(6);
712 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
713 PIPE_CONTROL_CS_STALL;
714 *cs++ = lower_32_bits(gtt_offset);
715 *cs++ = upper_32_bits(gtt_offset);
716 *cs++ = req->global_seqno;
718 *cs++ = MI_SEMAPHORE_SIGNAL |
719 MI_SEMAPHORE_TARGET(waiter->hw_id);
726 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
728 struct drm_i915_private *dev_priv = req->i915;
729 struct intel_engine_cs *waiter;
730 enum intel_engine_id id;
732 for_each_engine(waiter, dev_priv, id) {
733 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
734 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
737 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
738 *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
739 *cs++ = upper_32_bits(gtt_offset);
740 *cs++ = req->global_seqno;
741 *cs++ = MI_SEMAPHORE_SIGNAL |
742 MI_SEMAPHORE_TARGET(waiter->hw_id);
749 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
751 struct drm_i915_private *dev_priv = req->i915;
752 struct intel_engine_cs *engine;
753 enum intel_engine_id id;
756 for_each_engine(engine, dev_priv, id) {
759 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
762 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
763 if (i915_mmio_reg_valid(mbox_reg)) {
764 *cs++ = MI_LOAD_REGISTER_IMM(1);
765 *cs++ = i915_mmio_reg_offset(mbox_reg);
766 *cs++ = req->global_seqno;
776 static void i9xx_submit_request(struct drm_i915_gem_request *request)
778 struct drm_i915_private *dev_priv = request->i915;
780 i915_gem_request_submit(request);
782 I915_WRITE_TAIL(request->engine,
783 intel_ring_set_tail(request->ring, request->tail));
786 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
788 *cs++ = MI_STORE_DWORD_INDEX;
789 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
790 *cs++ = req->global_seqno;
791 *cs++ = MI_USER_INTERRUPT;
793 req->tail = intel_ring_offset(req, cs);
794 assert_ring_tail_valid(req->ring, req->tail);
797 static const int i9xx_emit_breadcrumb_sz = 4;
800 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
802 * @request - request to write to the ring
804 * Update the mailbox registers in the *other* rings with the current seqno.
805 * This acts like a signal in the canonical semaphore.
807 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
809 return i9xx_emit_breadcrumb(req,
810 req->engine->semaphore.signal(req, cs));
813 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
816 struct intel_engine_cs *engine = req->engine;
818 if (engine->semaphore.signal)
819 cs = engine->semaphore.signal(req, cs);
821 *cs++ = GFX_OP_PIPE_CONTROL(6);
822 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
823 PIPE_CONTROL_QW_WRITE;
824 *cs++ = intel_hws_seqno_address(engine);
826 *cs++ = req->global_seqno;
827 /* We're thrashing one dword of HWS. */
829 *cs++ = MI_USER_INTERRUPT;
832 req->tail = intel_ring_offset(req, cs);
833 assert_ring_tail_valid(req->ring, req->tail);
836 static const int gen8_render_emit_breadcrumb_sz = 8;
839 * intel_ring_sync - sync the waiter to the signaller on seqno
841 * @waiter - ring that is waiting
842 * @signaller - ring which has, or will signal
843 * @seqno - seqno which the waiter will block on
847 gen8_ring_sync_to(struct drm_i915_gem_request *req,
848 struct drm_i915_gem_request *signal)
850 struct drm_i915_private *dev_priv = req->i915;
851 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
852 struct i915_hw_ppgtt *ppgtt;
855 cs = intel_ring_begin(req, 4);
859 *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
860 MI_SEMAPHORE_SAD_GTE_SDD;
861 *cs++ = signal->global_seqno;
862 *cs++ = lower_32_bits(offset);
863 *cs++ = upper_32_bits(offset);
864 intel_ring_advance(req, cs);
866 /* When the !RCS engines idle waiting upon a semaphore, they lose their
867 * pagetables and we must reload them before executing the batch.
868 * We do this on the i915_switch_context() following the wait and
869 * before the dispatch.
871 ppgtt = req->ctx->ppgtt;
872 if (ppgtt && req->engine->id != RCS)
873 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
878 gen6_ring_sync_to(struct drm_i915_gem_request *req,
879 struct drm_i915_gem_request *signal)
881 u32 dw1 = MI_SEMAPHORE_MBOX |
882 MI_SEMAPHORE_COMPARE |
883 MI_SEMAPHORE_REGISTER;
884 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
887 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
889 cs = intel_ring_begin(req, 4);
893 *cs++ = dw1 | wait_mbox;
894 /* Throughout all of the GEM code, seqno passed implies our current
895 * seqno is >= the last seqno executed. However for hardware the
896 * comparison is strictly greater than.
898 *cs++ = signal->global_seqno - 1;
901 intel_ring_advance(req, cs);
907 gen5_seqno_barrier(struct intel_engine_cs *engine)
909 /* MI_STORE are internally buffered by the GPU and not flushed
910 * either by MI_FLUSH or SyncFlush or any other combination of
913 * "Only the submission of the store operation is guaranteed.
914 * The write result will be complete (coherent) some time later
915 * (this is practically a finite period but there is no guaranteed
918 * Empirically, we observe that we need a delay of at least 75us to
919 * be sure that the seqno write is visible by the CPU.
921 usleep_range(125, 250);
925 gen6_seqno_barrier(struct intel_engine_cs *engine)
927 struct drm_i915_private *dev_priv = engine->i915;
929 /* Workaround to force correct ordering between irq and seqno writes on
930 * ivb (and maybe also on snb) by reading from a CS register (like
931 * ACTHD) before reading the status page.
933 * Note that this effectively stalls the read by the time it takes to
934 * do a memory transaction, which more or less ensures that the write
935 * from the GPU has sufficient time to invalidate the CPU cacheline.
936 * Alternatively we could delay the interrupt from the CS ring to give
937 * the write time to land, but that would incur a delay after every
938 * batch i.e. much more frequent than a delay when waiting for the
939 * interrupt (with the same net latency).
941 * Also note that to prevent whole machine hangs on gen7, we have to
942 * take the spinlock to guard against concurrent cacheline access.
944 spin_lock_irq(&dev_priv->uncore.lock);
945 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
946 spin_unlock_irq(&dev_priv->uncore.lock);
950 gen5_irq_enable(struct intel_engine_cs *engine)
952 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
956 gen5_irq_disable(struct intel_engine_cs *engine)
958 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
962 i9xx_irq_enable(struct intel_engine_cs *engine)
964 struct drm_i915_private *dev_priv = engine->i915;
966 dev_priv->irq_mask &= ~engine->irq_enable_mask;
967 I915_WRITE(IMR, dev_priv->irq_mask);
968 POSTING_READ_FW(RING_IMR(engine->mmio_base));
972 i9xx_irq_disable(struct intel_engine_cs *engine)
974 struct drm_i915_private *dev_priv = engine->i915;
976 dev_priv->irq_mask |= engine->irq_enable_mask;
977 I915_WRITE(IMR, dev_priv->irq_mask);
981 i8xx_irq_enable(struct intel_engine_cs *engine)
983 struct drm_i915_private *dev_priv = engine->i915;
985 dev_priv->irq_mask &= ~engine->irq_enable_mask;
986 I915_WRITE16(IMR, dev_priv->irq_mask);
987 POSTING_READ16(RING_IMR(engine->mmio_base));
991 i8xx_irq_disable(struct intel_engine_cs *engine)
993 struct drm_i915_private *dev_priv = engine->i915;
995 dev_priv->irq_mask |= engine->irq_enable_mask;
996 I915_WRITE16(IMR, dev_priv->irq_mask);
1000 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1004 cs = intel_ring_begin(req, 2);
1010 intel_ring_advance(req, cs);
1015 gen6_irq_enable(struct intel_engine_cs *engine)
1017 struct drm_i915_private *dev_priv = engine->i915;
1019 I915_WRITE_IMR(engine,
1020 ~(engine->irq_enable_mask |
1021 engine->irq_keep_mask));
1022 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1026 gen6_irq_disable(struct intel_engine_cs *engine)
1028 struct drm_i915_private *dev_priv = engine->i915;
1030 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1031 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1035 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1037 struct drm_i915_private *dev_priv = engine->i915;
1039 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1040 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1044 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1046 struct drm_i915_private *dev_priv = engine->i915;
1048 I915_WRITE_IMR(engine, ~0);
1049 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1053 gen8_irq_enable(struct intel_engine_cs *engine)
1055 struct drm_i915_private *dev_priv = engine->i915;
1057 I915_WRITE_IMR(engine,
1058 ~(engine->irq_enable_mask |
1059 engine->irq_keep_mask));
1060 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1064 gen8_irq_disable(struct intel_engine_cs *engine)
1066 struct drm_i915_private *dev_priv = engine->i915;
1068 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1072 i965_emit_bb_start(struct drm_i915_gem_request *req,
1073 u64 offset, u32 length,
1074 unsigned int dispatch_flags)
1078 cs = intel_ring_begin(req, 2);
1082 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1083 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1085 intel_ring_advance(req, cs);
1090 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1091 #define I830_BATCH_LIMIT (256*1024)
1092 #define I830_TLB_ENTRIES (2)
1093 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1095 i830_emit_bb_start(struct drm_i915_gem_request *req,
1096 u64 offset, u32 len,
1097 unsigned int dispatch_flags)
1099 u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1101 cs = intel_ring_begin(req, 6);
1105 /* Evict the invalid PTE TLBs */
1106 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1107 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1108 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1112 intel_ring_advance(req, cs);
1114 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1115 if (len > I830_BATCH_LIMIT)
1118 cs = intel_ring_begin(req, 6 + 2);
1122 /* Blit the batch (which has now all relocs applied) to the
1123 * stable batch scratch bo area (so that the CS never
1124 * stumbles over its tlb invalidation bug) ...
1126 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1127 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1128 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1135 intel_ring_advance(req, cs);
1137 /* ... and execute it. */
1141 cs = intel_ring_begin(req, 2);
1145 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1146 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1147 MI_BATCH_NON_SECURE);
1148 intel_ring_advance(req, cs);
1154 i915_emit_bb_start(struct drm_i915_gem_request *req,
1155 u64 offset, u32 len,
1156 unsigned int dispatch_flags)
1160 cs = intel_ring_begin(req, 2);
1164 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1165 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1166 MI_BATCH_NON_SECURE);
1167 intel_ring_advance(req, cs);
1172 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1174 struct drm_i915_private *dev_priv = engine->i915;
1176 if (!dev_priv->status_page_dmah)
1179 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1180 engine->status_page.page_addr = NULL;
1183 static void cleanup_status_page(struct intel_engine_cs *engine)
1185 struct i915_vma *vma;
1186 struct drm_i915_gem_object *obj;
1188 vma = fetch_and_zero(&engine->status_page.vma);
1194 i915_vma_unpin(vma);
1195 i915_vma_close(vma);
1197 i915_gem_object_unpin_map(obj);
1198 __i915_gem_object_release_unless_active(obj);
1201 static int init_status_page(struct intel_engine_cs *engine)
1203 struct drm_i915_gem_object *obj;
1204 struct i915_vma *vma;
1209 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1211 DRM_ERROR("Failed to allocate status page\n");
1212 return PTR_ERR(obj);
1215 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1219 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1226 if (!HAS_LLC(engine->i915))
1227 /* On g33, we cannot place HWS above 256MiB, so
1228 * restrict its pinning to the low mappable arena.
1229 * Though this restriction is not documented for
1230 * gen4, gen5, or byt, they also behave similarly
1231 * and hang if the HWS is placed at the top of the
1232 * GTT. To generalise, it appears that all !llc
1233 * platforms have issues with us placing the HWS
1234 * above the mappable region (even though we never
1237 flags |= PIN_MAPPABLE;
1238 ret = i915_vma_pin(vma, 0, 4096, flags);
1242 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1243 if (IS_ERR(vaddr)) {
1244 ret = PTR_ERR(vaddr);
1248 engine->status_page.vma = vma;
1249 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1250 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
1252 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1253 engine->name, i915_ggtt_offset(vma));
1257 i915_vma_unpin(vma);
1259 i915_gem_object_put(obj);
1263 static int init_phys_status_page(struct intel_engine_cs *engine)
1265 struct drm_i915_private *dev_priv = engine->i915;
1267 GEM_BUG_ON(engine->id != RCS);
1269 dev_priv->status_page_dmah =
1270 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1271 if (!dev_priv->status_page_dmah)
1274 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1275 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1280 int intel_ring_pin(struct intel_ring *ring,
1281 struct drm_i915_private *i915,
1282 unsigned int offset_bias)
1284 enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1285 struct i915_vma *vma = ring->vma;
1290 GEM_BUG_ON(ring->vaddr);
1295 flags |= PIN_OFFSET_BIAS | offset_bias;
1296 if (vma->obj->stolen)
1297 flags |= PIN_MAPPABLE;
1299 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1300 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1301 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1303 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1308 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1312 if (i915_vma_is_map_and_fenceable(vma))
1313 addr = (void __force *)i915_vma_pin_iomap(vma);
1315 addr = i915_gem_object_pin_map(vma->obj, map);
1323 i915_vma_unpin(vma);
1324 return PTR_ERR(addr);
1327 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1329 GEM_BUG_ON(!list_empty(&ring->request_list));
1333 intel_ring_update_space(ring);
1336 void intel_ring_unpin(struct intel_ring *ring)
1338 GEM_BUG_ON(!ring->vma);
1339 GEM_BUG_ON(!ring->vaddr);
1341 /* Discard any unused bytes beyond that submitted to hw. */
1342 intel_ring_reset(ring, ring->tail);
1344 if (i915_vma_is_map_and_fenceable(ring->vma))
1345 i915_vma_unpin_iomap(ring->vma);
1347 i915_gem_object_unpin_map(ring->vma->obj);
1350 i915_vma_unpin(ring->vma);
1353 static struct i915_vma *
1354 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1356 struct drm_i915_gem_object *obj;
1357 struct i915_vma *vma;
1359 obj = i915_gem_object_create_stolen(dev_priv, size);
1361 obj = i915_gem_object_create_internal(dev_priv, size);
1363 return ERR_CAST(obj);
1365 /* mark ring buffers as read-only from GPU side by default */
1368 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1375 i915_gem_object_put(obj);
1380 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1382 struct intel_ring *ring;
1383 struct i915_vma *vma;
1385 GEM_BUG_ON(!is_power_of_2(size));
1386 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1388 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1390 return ERR_PTR(-ENOMEM);
1392 INIT_LIST_HEAD(&ring->request_list);
1395 /* Workaround an erratum on the i830 which causes a hang if
1396 * the TAIL pointer points to within the last 2 cachelines
1399 ring->effective_size = size;
1400 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1401 ring->effective_size -= 2 * CACHELINE_BYTES;
1403 intel_ring_update_space(ring);
1405 vma = intel_ring_create_vma(engine->i915, size);
1408 return ERR_CAST(vma);
1416 intel_ring_free(struct intel_ring *ring)
1418 struct drm_i915_gem_object *obj = ring->vma->obj;
1420 i915_vma_close(ring->vma);
1421 __i915_gem_object_release_unless_active(obj);
1426 static int context_pin(struct i915_gem_context *ctx)
1428 struct i915_vma *vma = ctx->engine[RCS].state;
1431 /* Clear this page out of any CPU caches for coherent swap-in/out.
1432 * We only want to do this on the first bind so that we do not stall
1433 * on an active context (which by nature is already on the GPU).
1435 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1436 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
1441 return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
1442 PIN_GLOBAL | PIN_HIGH);
1445 static struct i915_vma *
1446 alloc_context_vma(struct intel_engine_cs *engine)
1448 struct drm_i915_private *i915 = engine->i915;
1449 struct drm_i915_gem_object *obj;
1450 struct i915_vma *vma;
1452 obj = i915_gem_object_create(i915, engine->context_size);
1454 return ERR_CAST(obj);
1457 * Try to make the context utilize L3 as well as LLC.
1459 * On VLV we don't have L3 controls in the PTEs so we
1460 * shouldn't touch the cache level, especially as that
1461 * would make the object snooped which might have a
1462 * negative performance impact.
1464 * Snooping is required on non-llc platforms in execlist
1465 * mode, but since all GGTT accesses use PAT entry 0 we
1466 * get snooping anyway regardless of cache_level.
1468 * This is only applicable for Ivy Bridge devices since
1469 * later platforms don't have L3 control bits in the PTE.
1471 if (IS_IVYBRIDGE(i915)) {
1472 /* Ignore any error, regard it as a simple optimisation */
1473 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1476 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1478 i915_gem_object_put(obj);
1483 static struct intel_ring *
1484 intel_ring_context_pin(struct intel_engine_cs *engine,
1485 struct i915_gem_context *ctx)
1487 struct intel_context *ce = &ctx->engine[engine->id];
1490 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1492 if (likely(ce->pin_count++))
1494 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1496 if (!ce->state && engine->context_size) {
1497 struct i915_vma *vma;
1499 vma = alloc_context_vma(engine);
1509 ret = context_pin(ctx);
1513 ce->state->obj->mm.dirty = true;
1516 /* The kernel context is only used as a placeholder for flushing the
1517 * active context. It is never used for submitting user rendering and
1518 * as such never requires the golden render context, and so we can skip
1519 * emitting it when we switch to the kernel context. This is required
1520 * as during eviction we cannot allocate and pin the renderstate in
1521 * order to initialise the context.
1523 if (i915_gem_context_is_kernel(ctx))
1524 ce->initialised = true;
1526 i915_gem_context_get(ctx);
1529 /* One ringbuffer to rule them all */
1530 return engine->buffer;
1534 return ERR_PTR(ret);
1537 static void intel_ring_context_unpin(struct intel_engine_cs *engine,
1538 struct i915_gem_context *ctx)
1540 struct intel_context *ce = &ctx->engine[engine->id];
1542 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1543 GEM_BUG_ON(ce->pin_count == 0);
1545 if (--ce->pin_count)
1549 i915_vma_unpin(ce->state);
1551 i915_gem_context_put(ctx);
1554 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1556 struct intel_ring *ring;
1559 intel_engine_setup_common(engine);
1561 err = intel_engine_init_common(engine);
1565 if (HWS_NEEDS_PHYSICAL(engine->i915))
1566 err = init_phys_status_page(engine);
1568 err = init_status_page(engine);
1572 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
1574 err = PTR_ERR(ring);
1578 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1579 err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
1583 GEM_BUG_ON(engine->buffer);
1584 engine->buffer = ring;
1589 intel_ring_free(ring);
1591 if (HWS_NEEDS_PHYSICAL(engine->i915))
1592 cleanup_phys_status_page(engine);
1594 cleanup_status_page(engine);
1596 intel_engine_cleanup_common(engine);
1600 void intel_engine_cleanup(struct intel_engine_cs *engine)
1602 struct drm_i915_private *dev_priv = engine->i915;
1604 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1605 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1607 intel_ring_unpin(engine->buffer);
1608 intel_ring_free(engine->buffer);
1610 if (engine->cleanup)
1611 engine->cleanup(engine);
1613 if (HWS_NEEDS_PHYSICAL(dev_priv))
1614 cleanup_phys_status_page(engine);
1616 cleanup_status_page(engine);
1618 intel_engine_cleanup_common(engine);
1620 dev_priv->engine[engine->id] = NULL;
1624 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1626 struct intel_engine_cs *engine;
1627 enum intel_engine_id id;
1629 /* Restart from the beginning of the rings for convenience */
1630 for_each_engine(engine, dev_priv, id)
1631 intel_ring_reset(engine->buffer, 0);
1634 static int ring_request_alloc(struct drm_i915_gem_request *request)
1638 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
1640 /* Flush enough space to reduce the likelihood of waiting after
1641 * we start building the request - in which case we will just
1642 * have to repeat work.
1644 request->reserved_space += LEGACY_REQUEST_SIZE;
1646 cs = intel_ring_begin(request, 0);
1650 request->reserved_space -= LEGACY_REQUEST_SIZE;
1654 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1656 struct intel_ring *ring = req->ring;
1657 struct drm_i915_gem_request *target;
1660 lockdep_assert_held(&req->i915->drm.struct_mutex);
1662 intel_ring_update_space(ring);
1663 if (ring->space >= bytes)
1667 * Space is reserved in the ringbuffer for finalising the request,
1668 * as that cannot be allowed to fail. During request finalisation,
1669 * reserved_space is set to 0 to stop the overallocation and the
1670 * assumption is that then we never need to wait (which has the
1671 * risk of failing with EINTR).
1673 * See also i915_gem_request_alloc() and i915_add_request().
1675 GEM_BUG_ON(!req->reserved_space);
1677 list_for_each_entry(target, &ring->request_list, ring_link) {
1678 /* Would completion of this request free enough space? */
1679 if (bytes <= __intel_ring_space(target->postfix,
1680 ring->emit, ring->size))
1684 if (WARN_ON(&target->ring_link == &ring->request_list))
1687 timeout = i915_wait_request(target,
1688 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1689 MAX_SCHEDULE_TIMEOUT);
1693 i915_gem_request_retire_upto(target);
1695 intel_ring_update_space(ring);
1696 GEM_BUG_ON(ring->space < bytes);
1700 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1702 struct intel_ring *ring = req->ring;
1703 int remain_actual = ring->size - ring->emit;
1704 int remain_usable = ring->effective_size - ring->emit;
1705 int bytes = num_dwords * sizeof(u32);
1706 int total_bytes, wait_bytes;
1707 bool need_wrap = false;
1710 total_bytes = bytes + req->reserved_space;
1712 if (unlikely(bytes > remain_usable)) {
1714 * Not enough space for the basic request. So need to flush
1715 * out the remainder and then wait for base + reserved.
1717 wait_bytes = remain_actual + total_bytes;
1719 } else if (unlikely(total_bytes > remain_usable)) {
1721 * The base request will fit but the reserved space
1722 * falls off the end. So we don't need an immediate wrap
1723 * and only need to effectively wait for the reserved
1724 * size space from the start of ringbuffer.
1726 wait_bytes = remain_actual + req->reserved_space;
1728 /* No wrapping required, just waiting. */
1729 wait_bytes = total_bytes;
1732 if (wait_bytes > ring->space) {
1733 int ret = wait_for_space(req, wait_bytes);
1735 return ERR_PTR(ret);
1738 if (unlikely(need_wrap)) {
1739 GEM_BUG_ON(remain_actual > ring->space);
1740 GEM_BUG_ON(ring->emit + remain_actual > ring->size);
1742 /* Fill the tail with MI_NOOP */
1743 memset(ring->vaddr + ring->emit, 0, remain_actual);
1745 ring->space -= remain_actual;
1748 GEM_BUG_ON(ring->emit > ring->size - bytes);
1749 GEM_BUG_ON(ring->space < bytes);
1750 cs = ring->vaddr + ring->emit;
1751 GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
1752 ring->emit += bytes;
1753 ring->space -= bytes;
1758 /* Align the ring tail to a cacheline boundary */
1759 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1762 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1765 if (num_dwords == 0)
1768 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1769 cs = intel_ring_begin(req, num_dwords);
1773 while (num_dwords--)
1776 intel_ring_advance(req, cs);
1781 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
1783 struct drm_i915_private *dev_priv = request->i915;
1785 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1787 /* Every tail move must follow the sequence below */
1789 /* Disable notification that the ring is IDLE. The GT
1790 * will then assume that it is busy and bring it out of rc6.
1792 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1793 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1795 /* Clear the context id. Here be magic! */
1796 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1798 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1799 if (__intel_wait_for_register_fw(dev_priv,
1800 GEN6_BSD_SLEEP_PSMI_CONTROL,
1801 GEN6_BSD_SLEEP_INDICATOR,
1804 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1806 /* Now that the ring is fully powered up, update the tail */
1807 i9xx_submit_request(request);
1809 /* Let the ring send IDLE messages to the GT again,
1810 * and so let it sleep to conserve power when idle.
1812 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1813 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1815 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1818 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1822 cs = intel_ring_begin(req, 4);
1827 if (INTEL_GEN(req->i915) >= 8)
1830 /* We always require a command barrier so that subsequent
1831 * commands, such as breadcrumb interrupts, are strictly ordered
1832 * wrt the contents of the write cache being flushed to memory
1833 * (and thus being coherent from the CPU).
1835 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1838 * Bspec vol 1c.5 - video engine command streamer:
1839 * "If ENABLED, all TLBs will be invalidated once the flush
1840 * operation is complete. This bit is only valid when the
1841 * Post-Sync Operation field is a value of 1h or 3h."
1843 if (mode & EMIT_INVALIDATE)
1844 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1847 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1848 if (INTEL_GEN(req->i915) >= 8) {
1849 *cs++ = 0; /* upper addr */
1850 *cs++ = 0; /* value */
1855 intel_ring_advance(req, cs);
1860 gen8_emit_bb_start(struct drm_i915_gem_request *req,
1861 u64 offset, u32 len,
1862 unsigned int dispatch_flags)
1864 bool ppgtt = USES_PPGTT(req->i915) &&
1865 !(dispatch_flags & I915_DISPATCH_SECURE);
1868 cs = intel_ring_begin(req, 4);
1872 /* FIXME(BDW): Address space and security selectors. */
1873 *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
1874 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1875 *cs++ = lower_32_bits(offset);
1876 *cs++ = upper_32_bits(offset);
1878 intel_ring_advance(req, cs);
1884 hsw_emit_bb_start(struct drm_i915_gem_request *req,
1885 u64 offset, u32 len,
1886 unsigned int dispatch_flags)
1890 cs = intel_ring_begin(req, 2);
1894 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1895 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
1896 (dispatch_flags & I915_DISPATCH_RS ?
1897 MI_BATCH_RESOURCE_STREAMER : 0);
1898 /* bit0-7 is the length on GEN6+ */
1900 intel_ring_advance(req, cs);
1906 gen6_emit_bb_start(struct drm_i915_gem_request *req,
1907 u64 offset, u32 len,
1908 unsigned int dispatch_flags)
1912 cs = intel_ring_begin(req, 2);
1916 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1917 0 : MI_BATCH_NON_SECURE_I965);
1918 /* bit0-7 is the length on GEN6+ */
1920 intel_ring_advance(req, cs);
1925 /* Blitter support (SandyBridge+) */
1927 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1931 cs = intel_ring_begin(req, 4);
1936 if (INTEL_GEN(req->i915) >= 8)
1939 /* We always require a command barrier so that subsequent
1940 * commands, such as breadcrumb interrupts, are strictly ordered
1941 * wrt the contents of the write cache being flushed to memory
1942 * (and thus being coherent from the CPU).
1944 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1947 * Bspec vol 1c.3 - blitter engine command streamer:
1948 * "If ENABLED, all TLBs will be invalidated once the flush
1949 * operation is complete. This bit is only valid when the
1950 * Post-Sync Operation field is a value of 1h or 3h."
1952 if (mode & EMIT_INVALIDATE)
1953 cmd |= MI_INVALIDATE_TLB;
1955 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1956 if (INTEL_GEN(req->i915) >= 8) {
1957 *cs++ = 0; /* upper addr */
1958 *cs++ = 0; /* value */
1963 intel_ring_advance(req, cs);
1968 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
1969 struct intel_engine_cs *engine)
1971 struct drm_i915_gem_object *obj;
1974 if (!i915.semaphores)
1977 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
1978 struct i915_vma *vma;
1980 obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
1984 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1988 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1992 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1996 dev_priv->semaphore = vma;
1999 if (INTEL_GEN(dev_priv) >= 8) {
2000 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2002 engine->semaphore.sync_to = gen8_ring_sync_to;
2003 engine->semaphore.signal = gen8_xcs_signal;
2005 for (i = 0; i < I915_NUM_ENGINES; i++) {
2008 if (i != engine->id)
2009 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2011 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2013 engine->semaphore.signal_ggtt[i] = ring_offset;
2015 } else if (INTEL_GEN(dev_priv) >= 6) {
2016 engine->semaphore.sync_to = gen6_ring_sync_to;
2017 engine->semaphore.signal = gen6_signal;
2020 * The current semaphore is only applied on pre-gen8
2021 * platform. And there is no VCS2 ring on the pre-gen8
2022 * platform. So the semaphore between RCS and VCS2 is
2023 * initialized as INVALID. Gen8 will initialize the
2024 * sema between VCS2 and RCS later.
2026 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2027 static const struct {
2029 i915_reg_t mbox_reg;
2030 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2032 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2033 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2034 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2037 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2038 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2039 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2042 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2043 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2044 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2047 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2048 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2049 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2053 i915_reg_t mbox_reg;
2055 if (i == engine->hw_id) {
2056 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2057 mbox_reg = GEN6_NOSYNC;
2059 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2060 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2063 engine->semaphore.mbox.wait[i] = wait_mbox;
2064 engine->semaphore.mbox.signal[i] = mbox_reg;
2071 i915_gem_object_put(obj);
2073 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2074 i915.semaphores = 0;
2077 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2078 struct intel_engine_cs *engine)
2080 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2082 if (INTEL_GEN(dev_priv) >= 8) {
2083 engine->irq_enable = gen8_irq_enable;
2084 engine->irq_disable = gen8_irq_disable;
2085 engine->irq_seqno_barrier = gen6_seqno_barrier;
2086 } else if (INTEL_GEN(dev_priv) >= 6) {
2087 engine->irq_enable = gen6_irq_enable;
2088 engine->irq_disable = gen6_irq_disable;
2089 engine->irq_seqno_barrier = gen6_seqno_barrier;
2090 } else if (INTEL_GEN(dev_priv) >= 5) {
2091 engine->irq_enable = gen5_irq_enable;
2092 engine->irq_disable = gen5_irq_disable;
2093 engine->irq_seqno_barrier = gen5_seqno_barrier;
2094 } else if (INTEL_GEN(dev_priv) >= 3) {
2095 engine->irq_enable = i9xx_irq_enable;
2096 engine->irq_disable = i9xx_irq_disable;
2098 engine->irq_enable = i8xx_irq_enable;
2099 engine->irq_disable = i8xx_irq_disable;
2103 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2105 engine->submit_request = i9xx_submit_request;
2108 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2110 engine->submit_request = gen6_bsd_submit_request;
2113 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2114 struct intel_engine_cs *engine)
2116 intel_ring_init_irq(dev_priv, engine);
2117 intel_ring_init_semaphores(dev_priv, engine);
2119 engine->init_hw = init_ring_common;
2120 engine->reset_hw = reset_ring_common;
2122 engine->context_pin = intel_ring_context_pin;
2123 engine->context_unpin = intel_ring_context_unpin;
2125 engine->request_alloc = ring_request_alloc;
2127 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2128 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2129 if (i915.semaphores) {
2132 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2134 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2135 if (INTEL_GEN(dev_priv) >= 8) {
2136 engine->emit_breadcrumb_sz += num_rings * 6;
2138 engine->emit_breadcrumb_sz += num_rings * 3;
2140 engine->emit_breadcrumb_sz++;
2144 engine->set_default_submission = i9xx_set_default_submission;
2146 if (INTEL_GEN(dev_priv) >= 8)
2147 engine->emit_bb_start = gen8_emit_bb_start;
2148 else if (INTEL_GEN(dev_priv) >= 6)
2149 engine->emit_bb_start = gen6_emit_bb_start;
2150 else if (INTEL_GEN(dev_priv) >= 4)
2151 engine->emit_bb_start = i965_emit_bb_start;
2152 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2153 engine->emit_bb_start = i830_emit_bb_start;
2155 engine->emit_bb_start = i915_emit_bb_start;
2158 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2160 struct drm_i915_private *dev_priv = engine->i915;
2163 intel_ring_default_vfuncs(dev_priv, engine);
2165 if (HAS_L3_DPF(dev_priv))
2166 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2168 if (INTEL_GEN(dev_priv) >= 8) {
2169 engine->init_context = intel_rcs_ctx_init;
2170 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2171 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2172 engine->emit_flush = gen8_render_ring_flush;
2173 if (i915.semaphores) {
2176 engine->semaphore.signal = gen8_rcs_signal;
2179 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2180 engine->emit_breadcrumb_sz += num_rings * 8;
2182 } else if (INTEL_GEN(dev_priv) >= 6) {
2183 engine->init_context = intel_rcs_ctx_init;
2184 engine->emit_flush = gen7_render_ring_flush;
2185 if (IS_GEN6(dev_priv))
2186 engine->emit_flush = gen6_render_ring_flush;
2187 } else if (IS_GEN5(dev_priv)) {
2188 engine->emit_flush = gen4_render_ring_flush;
2190 if (INTEL_GEN(dev_priv) < 4)
2191 engine->emit_flush = gen2_render_ring_flush;
2193 engine->emit_flush = gen4_render_ring_flush;
2194 engine->irq_enable_mask = I915_USER_INTERRUPT;
2197 if (IS_HASWELL(dev_priv))
2198 engine->emit_bb_start = hsw_emit_bb_start;
2200 engine->init_hw = init_render_ring;
2201 engine->cleanup = render_ring_cleanup;
2203 ret = intel_init_ring_buffer(engine);
2207 if (INTEL_GEN(dev_priv) >= 6) {
2208 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2211 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2212 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2220 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2222 struct drm_i915_private *dev_priv = engine->i915;
2224 intel_ring_default_vfuncs(dev_priv, engine);
2226 if (INTEL_GEN(dev_priv) >= 6) {
2227 /* gen6 bsd needs a special wa for tail updates */
2228 if (IS_GEN6(dev_priv))
2229 engine->set_default_submission = gen6_bsd_set_default_submission;
2230 engine->emit_flush = gen6_bsd_ring_flush;
2231 if (INTEL_GEN(dev_priv) < 8)
2232 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2234 engine->mmio_base = BSD_RING_BASE;
2235 engine->emit_flush = bsd_ring_flush;
2236 if (IS_GEN5(dev_priv))
2237 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2239 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2242 return intel_init_ring_buffer(engine);
2245 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2247 struct drm_i915_private *dev_priv = engine->i915;
2249 intel_ring_default_vfuncs(dev_priv, engine);
2251 engine->emit_flush = gen6_ring_flush;
2252 if (INTEL_GEN(dev_priv) < 8)
2253 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2255 return intel_init_ring_buffer(engine);
2258 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2260 struct drm_i915_private *dev_priv = engine->i915;
2262 intel_ring_default_vfuncs(dev_priv, engine);
2264 engine->emit_flush = gen6_ring_flush;
2266 if (INTEL_GEN(dev_priv) < 8) {
2267 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2268 engine->irq_enable = hsw_vebox_irq_enable;
2269 engine->irq_disable = hsw_vebox_irq_disable;
2272 return intel_init_ring_buffer(engine);