X-Git-Url: https://git.kernelconcepts.de/?p=karo-tx-linux.git;a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fintel_lrc.c;h=88e12bdf79e23388a912fc53939658c9971a406f;hp=29dd4488dc49856b6518ba5fce760cbd4710a1e8;hb=5ae95032be3af9e4944132714f9a6cc799f6a06c;hpb=60a53737afb80b3f016e79471fc4d599b938ed6e diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 29dd4488dc49..88e12bdf79e2 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -196,13 +196,21 @@ reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ } +#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ + reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ + reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ +} + enum { ADVANCED_CONTEXT = 0, - LEGACY_CONTEXT, + LEGACY_32B_CONTEXT, ADVANCED_AD_CONTEXT, LEGACY_64B_CONTEXT }; -#define GEN8_CTX_MODE_SHIFT 3 +#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 +#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ + LEGACY_64B_CONTEXT :\ + LEGACY_32B_CONTEXT) enum { FAULT_AND_HANG = 0, FAULT_AND_HALT, /* Debug only */ @@ -213,6 +221,9 @@ enum { #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 static int intel_lr_context_pin(struct drm_i915_gem_request *rq); +static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, + struct drm_i915_gem_object *default_ctx_obj); + /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists @@ -228,6 +239,12 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists { WARN_ON(i915.enable_ppgtt == -1); + /* On platforms with execlist available, vGPU will only + * support execlist mode, no ring buffer mode. + */ + if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) + return 1; + if (INTEL_INFO(dev)->gen >= 9) return 1; @@ -255,25 +272,35 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists */ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) { - u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj); + u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + + LRC_PPHWSP_PN * PAGE_SIZE; /* LRCA is required to be 4K aligned so the more significant 20 bits * are globally unique */ return lrca >> 12; } -static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq) +static bool disable_lite_restore_wa(struct intel_engine_cs *ring) { - struct intel_engine_cs *ring = rq->ring; struct drm_device *dev = ring->dev; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; + + return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || + (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && + (ring->id == VCS || ring->id == VCS2); +} + +uint64_t intel_lr_context_descriptor(struct intel_context *ctx, + struct intel_engine_cs *ring) +{ + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; uint64_t desc; - uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); + uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + + LRC_PPHWSP_PN * PAGE_SIZE; WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); desc = GEN8_CTX_VALID; - desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT; + desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; if (IS_GEN8(ctx_obj->base.dev)) desc |= GEN8_CTX_L3LLC_COHERENT; desc |= GEN8_CTX_PRIVILEGE; @@ -285,10 +312,8 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq) /* desc |= GEN8_CTX_FORCE_RESTORE; */ /* WaEnableForceRestoreInCtxtDescForVCS:skl */ - if (IS_GEN9(dev) && - INTEL_REVID(dev) <= SKL_REVID_B0 && - (ring->id == BCS || ring->id == VCS || - ring->id == VECS || ring->id == VCS2)) + /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ + if (disable_lite_restore_wa(ring)) desc |= GEN8_CTX_FORCE_RESTORE; return desc; @@ -304,13 +329,13 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, uint64_t desc[2]; if (rq1) { - desc[1] = execlists_ctx_descriptor(rq1); + desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); rq1->elsp_submitted++; } else { desc[1] = 0; } - desc[0] = execlists_ctx_descriptor(rq0); + desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring); rq0->elsp_submitted++; /* You must always write both descriptors in the order below. */ @@ -324,7 +349,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); /* ELSP is a wo register, use another nearby reg for posting */ - POSTING_READ_FW(RING_EXECLIST_STATUS(ring)); + POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring)); intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); spin_unlock(&dev_priv->uncore.lock); } @@ -342,16 +367,18 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - page = i915_gem_object_get_page(ctx_obj, 1); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_TAIL+1] = rq->tail; reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); - /* True PPGTT with dynamic page allocation: update PDP registers and - * point the unallocated PDPs to the scratch page - */ - if (ppgtt) { + if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { + /* True 32b PPGTT with dynamic page allocation: update PDP + * registers and point the unallocated PDPs to scratch page. + * PML4 is allocated during ppgtt init, so this is not needed + * in 48-bit mode. + */ ASSIGN_CTX_PDP(ppgtt, reg_state, 3); ASSIGN_CTX_PDP(ppgtt, reg_state, 2); ASSIGN_CTX_PDP(ppgtt, reg_state, 1); @@ -477,7 +504,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) u32 status_pointer; u8 read_pointer; u8 write_pointer; - u32 status; + u32 status = 0; u32 status_id; u32 submit_contexts = 0; @@ -492,10 +519,8 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) while (read_pointer < write_pointer) { read_pointer++; - status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % GEN8_CSB_ENTRIES) * 8); - status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); + status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES)); + status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES)); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -515,8 +540,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) } } - if (submit_contexts != 0) + if (disable_lite_restore_wa(ring)) { + /* Prevent a ctx to preempt itself */ + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && + (submit_contexts != 0)) + execlists_context_unqueue(ring); + } else if (submit_contexts != 0) { execlists_context_unqueue(ring); + } spin_unlock(&ring->execlist_lock); @@ -540,8 +571,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) i915_gem_request_reference(request); - request->tail = request->ringbuf->tail; - spin_lock_irq(&ring->execlist_lock); list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) @@ -694,13 +723,19 @@ static void intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_engine_cs *ring = request->ring; + struct drm_i915_private *dev_priv = request->i915; intel_logical_ring_advance(request->ringbuf); + request->tail = request->ringbuf->tail; + if (intel_ring_stopped(ring)) return; - execlists_context_queue(request); + if (dev_priv->guc.execbuf_client) + i915_guc_submit(dev_priv->guc.execbuf_client, request); + else + execlists_context_queue(request); } static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) @@ -767,8 +802,7 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) /** * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands * - * @request: The request to start some new work for - * @ctx: Logical ring context whose ringbuffer is being prepared. + * @req: The request to start some new work for * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. * * The ringbuffer might not be ready to accept the commands right away (maybe it needs to @@ -870,21 +904,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return -EINVAL; } - if (args->num_cliprects != 0) { - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); - return -EINVAL; - } else { - if (args->DR4 == 0xffffffff) { - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); - args->DR4 = 0; - } - - if (args->DR1 || args->DR4 || args->cliprects_ptr) { - DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); - return -EINVAL; - } - } - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { DRM_DEBUG("sol reset is gen7 only\n"); return -EINVAL; @@ -988,34 +1007,54 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } +static int intel_lr_context_do_pin(struct intel_engine_cs *ring, + struct drm_i915_gem_object *ctx_obj, + struct intel_ringbuffer *ringbuf) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ret) + return ret; + + ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); + if (ret) + goto unpin_ctx_obj; + + ctx_obj->dirty = true; + + /* Invalidate GuC TLB. */ + if (i915.enable_guc_submission) + I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); + + return ret; + +unpin_ctx_obj: + i915_gem_object_ggtt_unpin(ctx_obj); + + return ret; +} + static int intel_lr_context_pin(struct drm_i915_gem_request *rq) { + int ret = 0; struct intel_engine_cs *ring = rq->ring; struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; struct intel_ringbuffer *ringbuf = rq->ringbuf; - int ret = 0; - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); if (rq->ctx->engine[ring->id].pin_count++ == 0) { - ret = i915_gem_obj_ggtt_pin(ctx_obj, - GEN8_LR_CONTEXT_ALIGN, 0); + ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); if (ret) goto reset_pin_count; - - ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); - if (ret) - goto unpin_ctx_obj; - - ctx_obj->dirty = true; } - return ret; -unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ctx_obj); reset_pin_count: rq->ctx->engine[ring->id].pin_count = 0; - return ret; } @@ -1113,7 +1152,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; - wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) | + wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); @@ -1131,7 +1170,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, 0); - wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) | + wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); @@ -1200,9 +1239,10 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ if (IS_BROADWELL(ring->dev)) { - index = gen8_emit_flush_coherentl3_wa(ring, batch, index); - if (index < 0) - return index; + int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index); + if (rc < 0) + return rc; + index = rc; } /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ @@ -1426,6 +1466,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; u8 next_context_status_buffer_hw; + lrc_setup_hardware_status_page(ring, + ring->default_context->engine[ring->id].state); + I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1542,12 +1585,16 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, * Ideally, we should set Force PD Restore in ctx descriptor, * but we can't. Force Restore would be a second option, but * it is unsafe in case of lite-restore (because the ctx is - * not idle). */ + * not idle). PML4 is allocated during ppgtt init so this is + * not needed in 48-bit.*/ if (req->ctx->ppgtt && (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { - ret = intel_logical_ring_emit_pdps(req); - if (ret) - return ret; + if (!USES_FULL_48BIT_PPGTT(req->i915) && + !intel_vgpu_active(req->i915->dev)) { + ret = intel_logical_ring_emit_pdps(req); + if (ret) + return ret; + } req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); } @@ -1714,6 +1761,34 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); } +static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +{ + + /* + * On BXT A steppings there is a HW coherency issue whereby the + * MI_STORE_DATA_IMM storing the completed request's seqno + * occasionally doesn't invalidate the CPU cache. Work around this by + * clflushing the corresponding cacheline whenever the caller wants + * the coherency to be guaranteed. Note that this cacheline is known + * to be clean at this point, since we only write it in + * bxt_a_set_seqno(), where we also do a clflush after the write. So + * this clflush in practice becomes an invalidate operation. + */ + + if (!lazy_coherency) + intel_flush_status_page(ring, I915_GEM_HWS_INDEX); + + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) +{ + intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); + + /* See bxt_a_get_seqno() explaining the reason for the clflush. */ + intel_flush_status_page(ring, I915_GEM_HWS_INDEX); +} + static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; @@ -1856,7 +1931,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin if (ret) return ret; - ret = intel_lr_context_deferred_create(ring->default_context, ring); + ret = intel_lr_context_deferred_alloc(ring->default_context, ring); + if (ret) + return ret; + + /* As this is the default context, always pin it */ + ret = intel_lr_context_do_pin( + ring, + ring->default_context->engine[ring->id].state, + ring->default_context->engine[ring->id].ringbuf); + if (ret) { + DRM_ERROR( + "Failed to pin and map ringbuffer %s: %d\n", + ring->name, ret); + return ret; + } return ret; } @@ -1883,8 +1972,13 @@ static int logical_render_ring_init(struct drm_device *dev) ring->init_hw = gen8_init_render_ring; ring->init_context = gen8_init_rcs_context; ring->cleanup = intel_fini_pipe_control; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; + if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush_render; ring->irq_get = gen8_logical_ring_get_irq; @@ -1930,8 +2024,13 @@ static int logical_bsd_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; + if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush; ring->irq_get = gen8_logical_ring_get_irq; @@ -1980,8 +2079,13 @@ static int logical_blt_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; + if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush; ring->irq_get = gen8_logical_ring_get_irq; @@ -2005,8 +2109,13 @@ static int logical_vebox_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; + if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush; ring->irq_get = gen8_logical_ring_get_irq; @@ -2059,14 +2168,8 @@ int intel_logical_rings_init(struct drm_device *dev) goto cleanup_vebox_ring; } - ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); - if (ret) - goto cleanup_bsd2_ring; - return 0; -cleanup_bsd2_ring: - intel_logical_ring_cleanup(&dev_priv->ring[VCS2]); cleanup_vebox_ring: intel_logical_ring_cleanup(&dev_priv->ring[VECS]); cleanup_blt_ring: @@ -2152,7 +2255,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - page = i915_gem_object_get_page(ctx_obj, 1); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM @@ -2229,13 +2332,24 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); - /* With dynamic page allocation, PDPs may not be allocated at this point, - * Point the unallocated PDPs to the scratch page - */ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); + if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { + /* 64b PPGTT (48bit canonical) + * PDP0_DESCRIPTOR contains the base address to PML4 and + * other PDP Descriptors are ignored. + */ + ASSIGN_CTX_PML4(ppgtt, reg_state); + } else { + /* 32b PPGTT + * PDP*_DESCRIPTOR contains the base address of space supported. + * With dynamic page allocation, PDPs may not be allocated at + * this point. Point the unallocated PDPs to the scratch page + */ + ASSIGN_CTX_PDP(ppgtt, reg_state, 3); + ASSIGN_CTX_PDP(ppgtt, reg_state, 2); + ASSIGN_CTX_PDP(ppgtt, reg_state, 1); + ASSIGN_CTX_PDP(ppgtt, reg_state, 0); + } + if (ring->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; @@ -2276,8 +2390,7 @@ void intel_lr_context_free(struct intel_context *ctx) i915_gem_object_ggtt_unpin(ctx_obj); } WARN_ON(ctx->engine[ring->id].pin_count); - intel_destroy_ringbuffer_obj(ringbuf); - kfree(ringbuf); + intel_ringbuffer_free(ringbuf); drm_gem_object_unreference(&ctx_obj->base); } } @@ -2311,12 +2424,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, struct drm_i915_gem_object *default_ctx_obj) { struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct page *page; - /* The status page is offset 0 from the default context object - * in LRC mode. */ - ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); - ring->status_page.page_addr = - kmap(sg_page(default_ctx_obj->pages->sgl)); + /* The HWSP is part of the default context object in LRC mode. */ + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) + + LRC_PPHWSP_PN * PAGE_SIZE; + page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); + ring->status_page.page_addr = kmap(page); ring->status_page.obj = default_ctx_obj; I915_WRITE(RING_HWS_PGA(ring->mmio_base), @@ -2325,7 +2439,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, } /** - * intel_lr_context_deferred_create() - create the LRC specific bits of a context + * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. * @ring: engine to be used with the context. * @@ -2337,10 +2451,10 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, * * Return: non-zero on error. */ -int intel_lr_context_deferred_create(struct intel_context *ctx, + +int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *ring) { - const bool is_global_default_ctx = (ctx == ring->default_context); struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; @@ -2352,107 +2466,58 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, context_size = round_up(get_lr_context_size(ring), 4096); + /* One extra page as the sharing data between driver and GuC */ + context_size += PAGE_SIZE * LRC_PPHWSP_PN; + ctx_obj = i915_gem_alloc_object(dev, context_size); if (!ctx_obj) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); return -ENOMEM; } - if (is_global_default_ctx) { - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); - if (ret) { - DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", - ret); - drm_gem_object_unreference(&ctx_obj->base); - return ret; - } - } - - ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); - if (!ringbuf) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", - ring->name); - ret = -ENOMEM; - goto error_unpin_ctx; - } - - ringbuf->ring = ring; - - ringbuf->size = 32 * PAGE_SIZE; - ringbuf->effective_size = ringbuf->size; - ringbuf->head = 0; - ringbuf->tail = 0; - ringbuf->last_retired_head = -1; - intel_ring_update_space(ringbuf); - - if (ringbuf->obj == NULL) { - ret = intel_alloc_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_DEBUG_DRIVER( - "Failed to allocate ringbuffer obj %s: %d\n", - ring->name, ret); - goto error_free_rbuf; - } - - if (is_global_default_ctx) { - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_ERROR( - "Failed to pin and map ringbuffer %s: %d\n", - ring->name, ret); - goto error_destroy_rbuf; - } - } - + ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); + if (IS_ERR(ringbuf)) { + ret = PTR_ERR(ringbuf); + goto error_deref_obj; } ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); - goto error; + goto error_ringbuf; } ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].state = ctx_obj; - if (ctx == ring->default_context) - lrc_setup_hardware_status_page(ring, ctx_obj); - else if (ring->id == RCS && !ctx->rcs_initialized) { - if (ring->init_context) { - struct drm_i915_gem_request *req; + if (ctx != ring->default_context && ring->init_context) { + struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ctx, &req); - if (ret) - return ret; - - ret = ring->init_context(req); - if (ret) { - DRM_ERROR("ring init context: %d\n", ret); - i915_gem_request_cancel(req); - ctx->engine[ring->id].ringbuf = NULL; - ctx->engine[ring->id].state = NULL; - goto error; - } - - i915_add_request_no_flush(req); + ret = i915_gem_request_alloc(ring, + ctx, &req); + if (ret) { + DRM_ERROR("ring create req: %d\n", + ret); + goto error_ringbuf; } - ctx->rcs_initialized = true; + ret = ring->init_context(req); + if (ret) { + DRM_ERROR("ring init context: %d\n", + ret); + i915_gem_request_cancel(req); + goto error_ringbuf; + } + i915_add_request_no_flush(req); } - return 0; -error: - if (is_global_default_ctx) - intel_unpin_ringbuffer_obj(ringbuf); -error_destroy_rbuf: - intel_destroy_ringbuffer_obj(ringbuf); -error_free_rbuf: - kfree(ringbuf); -error_unpin_ctx: - if (is_global_default_ctx) - i915_gem_object_ggtt_unpin(ctx_obj); +error_ringbuf: + intel_ringbuffer_free(ringbuf); +error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); + ctx->engine[ring->id].ringbuf = NULL; + ctx->engine[ring->id].state = NULL; return ret; } @@ -2478,7 +2543,7 @@ void intel_lr_context_reset(struct drm_device *dev, WARN(1, "Failed get_pages for context obj\n"); continue; } - page = i915_gem_object_get_page(ctx_obj, 1); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_HEAD+1] = 0;