2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
48 static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
49 {RCS, _MMIO(0x229c), 0xffff, false},
50 {RCS, _MMIO(0x2248), 0x0, false},
51 {RCS, _MMIO(0x2098), 0x0, false},
52 {RCS, _MMIO(0x20c0), 0xffff, true},
53 {RCS, _MMIO(0x24d0), 0, false},
54 {RCS, _MMIO(0x24d4), 0, false},
55 {RCS, _MMIO(0x24d8), 0, false},
56 {RCS, _MMIO(0x24dc), 0, false},
57 {RCS, _MMIO(0x24e0), 0, false},
58 {RCS, _MMIO(0x24e4), 0, false},
59 {RCS, _MMIO(0x24e8), 0, false},
60 {RCS, _MMIO(0x24ec), 0, false},
61 {RCS, _MMIO(0x24f0), 0, false},
62 {RCS, _MMIO(0x24f4), 0, false},
63 {RCS, _MMIO(0x24f8), 0, false},
64 {RCS, _MMIO(0x24fc), 0, false},
65 {RCS, _MMIO(0x7004), 0xffff, true},
66 {RCS, _MMIO(0x7008), 0xffff, true},
67 {RCS, _MMIO(0x7000), 0xffff, true},
68 {RCS, _MMIO(0x7010), 0xffff, true},
69 {RCS, _MMIO(0x7300), 0xffff, true},
70 {RCS, _MMIO(0x83a4), 0xffff, true},
72 {BCS, _MMIO(0x2229c), 0xffff, false},
73 {BCS, _MMIO(0x2209c), 0xffff, false},
74 {BCS, _MMIO(0x220c0), 0xffff, false},
75 {BCS, _MMIO(0x22098), 0x0, false},
76 {BCS, _MMIO(0x22028), 0x0, false},
79 static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
80 {RCS, _MMIO(0x229c), 0xffff, false},
81 {RCS, _MMIO(0x2248), 0x0, false},
82 {RCS, _MMIO(0x2098), 0x0, false},
83 {RCS, _MMIO(0x20c0), 0xffff, true},
84 {RCS, _MMIO(0x24d0), 0, false},
85 {RCS, _MMIO(0x24d4), 0, false},
86 {RCS, _MMIO(0x24d8), 0, false},
87 {RCS, _MMIO(0x24dc), 0, false},
88 {RCS, _MMIO(0x24e0), 0, false},
89 {RCS, _MMIO(0x24e4), 0, false},
90 {RCS, _MMIO(0x24e8), 0, false},
91 {RCS, _MMIO(0x24ec), 0, false},
92 {RCS, _MMIO(0x24f0), 0, false},
93 {RCS, _MMIO(0x24f4), 0, false},
94 {RCS, _MMIO(0x24f8), 0, false},
95 {RCS, _MMIO(0x24fc), 0, false},
96 {RCS, _MMIO(0x7004), 0xffff, true},
97 {RCS, _MMIO(0x7008), 0xffff, true},
98 {RCS, _MMIO(0x7000), 0xffff, true},
99 {RCS, _MMIO(0x7010), 0xffff, true},
100 {RCS, _MMIO(0x7300), 0xffff, true},
101 {RCS, _MMIO(0x83a4), 0xffff, true},
103 {RCS, _MMIO(0x40e0), 0, false},
104 {RCS, _MMIO(0x40e4), 0, false},
105 {RCS, _MMIO(0x2580), 0xffff, true},
106 {RCS, _MMIO(0x7014), 0xffff, true},
107 {RCS, _MMIO(0x20ec), 0xffff, false},
108 {RCS, _MMIO(0xb118), 0, false},
109 {RCS, _MMIO(0xe100), 0xffff, true},
110 {RCS, _MMIO(0xe180), 0xffff, true},
111 {RCS, _MMIO(0xe184), 0xffff, true},
112 {RCS, _MMIO(0xe188), 0xffff, true},
113 {RCS, _MMIO(0xe194), 0xffff, true},
114 {RCS, _MMIO(0x4de0), 0, false},
115 {RCS, _MMIO(0x4de4), 0, false},
116 {RCS, _MMIO(0x4de8), 0, false},
117 {RCS, _MMIO(0x4dec), 0, false},
118 {RCS, _MMIO(0x4df0), 0, false},
119 {RCS, _MMIO(0x4df4), 0, false},
121 {BCS, _MMIO(0x2229c), 0xffff, false},
122 {BCS, _MMIO(0x2209c), 0xffff, false},
123 {BCS, _MMIO(0x220c0), 0xffff, false},
124 {BCS, _MMIO(0x22098), 0x0, false},
125 {BCS, _MMIO(0x22028), 0x0, false},
127 {VCS2, _MMIO(0x1c028), 0xffff, false},
129 {VECS, _MMIO(0x1a028), 0xffff, false},
131 {RCS, _MMIO(0x7304), 0xffff, true},
132 {RCS, _MMIO(0x2248), 0x0, false},
133 {RCS, _MMIO(0x940c), 0x0, false},
134 {RCS, _MMIO(0x4ab8), 0x0, false},
136 {RCS, _MMIO(0x4ab0), 0x0, false},
137 {RCS, _MMIO(0x20d4), 0x0, false},
139 {RCS, _MMIO(0xb004), 0x0, false},
140 {RCS, _MMIO(0x20a0), 0x0, false},
141 {RCS, _MMIO(0x20e4), 0xffff, false},
144 static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
145 static u32 gen9_render_mocs_L3[32];
147 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
149 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
150 enum forcewake_domains fw;
160 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
163 if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
166 reg = _MMIO(regs[ring_id]);
168 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
169 * we need to put a forcewake when invalidating RCS TLB caches,
170 * otherwise device can go to RC6 state and interrupt invalidation
173 fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
174 FW_REG_READ | FW_REG_WRITE);
175 if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
176 fw |= FORCEWAKE_RENDER;
178 intel_uncore_forcewake_get(dev_priv, fw);
180 I915_WRITE_FW(reg, 0x1);
182 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
183 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
185 vgpu_vreg(vgpu, regs[ring_id]) = 0;
187 intel_uncore_forcewake_put(dev_priv, fw);
189 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
192 static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
194 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
195 i915_reg_t offset, l3_offset;
205 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
208 offset.reg = regs[ring_id];
209 for (i = 0; i < 64; i++) {
210 gen9_render_mocs[ring_id][i] = I915_READ(offset);
211 I915_WRITE(offset, vgpu_vreg(vgpu, offset));
212 POSTING_READ(offset);
216 if (ring_id == RCS) {
217 l3_offset.reg = 0xb020;
218 for (i = 0; i < 32; i++) {
219 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
220 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
221 POSTING_READ(l3_offset);
227 static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
229 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
230 i915_reg_t offset, l3_offset;
240 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
243 offset.reg = regs[ring_id];
244 for (i = 0; i < 64; i++) {
245 vgpu_vreg(vgpu, offset) = I915_READ(offset);
246 I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
247 POSTING_READ(offset);
251 if (ring_id == RCS) {
252 l3_offset.reg = 0xb020;
253 for (i = 0; i < 32; i++) {
254 vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
255 I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
256 POSTING_READ(l3_offset);
262 #define CTX_CONTEXT_CONTROL_VAL 0x03
264 /* Switch ring mmio values (context) from host to a vgpu. */
265 static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
267 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
268 struct render_mmio *mmio;
271 u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
272 u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
274 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
276 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
277 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
278 mmio = gen9_render_mmio_list;
279 array_size = ARRAY_SIZE(gen9_render_mmio_list);
280 load_mocs(vgpu, ring_id);
282 mmio = gen8_render_mmio_list;
283 array_size = ARRAY_SIZE(gen8_render_mmio_list);
286 for (i = 0; i < array_size; i++, mmio++) {
287 if (mmio->ring_id != ring_id)
290 mmio->value = I915_READ(mmio->reg);
293 * if it is an inhibit context, load in_context mmio
294 * into HW by mmio write. If it is not, skip this mmio
297 if (mmio->in_context &&
298 ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
299 i915.enable_execlists)
303 v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
305 v = vgpu_vreg(vgpu, mmio->reg);
307 I915_WRITE(mmio->reg, v);
308 POSTING_READ(mmio->reg);
310 trace_render_mmio(vgpu->id, "load",
311 i915_mmio_reg_offset(mmio->reg),
314 handle_tlb_pending_event(vgpu, ring_id);
317 /* Switch ring mmio values (context) from vgpu to host. */
318 static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
320 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
321 struct render_mmio *mmio;
325 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
326 mmio = gen9_render_mmio_list;
327 array_size = ARRAY_SIZE(gen9_render_mmio_list);
328 restore_mocs(vgpu, ring_id);
330 mmio = gen8_render_mmio_list;
331 array_size = ARRAY_SIZE(gen8_render_mmio_list);
334 for (i = 0; i < array_size; i++, mmio++) {
335 if (mmio->ring_id != ring_id)
338 vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
341 vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
342 v = mmio->value | (mmio->mask << 16);
346 if (mmio->in_context)
349 I915_WRITE(mmio->reg, v);
350 POSTING_READ(mmio->reg);
352 trace_render_mmio(vgpu->id, "restore",
353 i915_mmio_reg_offset(mmio->reg),
359 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
360 * @pre: the last vGPU that own the engine
361 * @next: the vGPU to switch to
362 * @ring_id: specify the engine
364 * If pre is null indicates that host own the engine. If next is null
365 * indicates that we are switching to host workload.
367 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
368 struct intel_vgpu *next, int ring_id)
370 if (WARN_ON(!pre && !next))
373 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
374 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
377 * TODO: Optimize for vGPU to vGPU switch by merging
378 * switch_mmio_to_host() and switch_mmio_to_vgpu().
381 switch_mmio_to_host(pre, ring_id);
384 switch_mmio_to_vgpu(next, ring_id);