2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
66 unsigned long context_gpa, context_page_num;
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
72 context_page_num = intel_lr_context_size(
73 gvt->dev_priv->engine[ring_id]);
75 context_page_num = context_page_num >> PAGE_SHIFT;
77 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
78 context_page_num = 19;
82 while (i < context_page_num) {
83 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
84 (u32)((workload->ctx_desc.lrca + i) <<
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_vgpu_err("Invalid guest context descriptor\n");
91 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
93 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
99 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
100 shadow_ring_context = kmap(page);
102 #define COPY_REG(name) \
103 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
104 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
107 COPY_REG(ctx_timestamp);
109 if (ring_id == RCS) {
110 COPY_REG(bb_per_ctx_ptr);
111 COPY_REG(rcs_indirect_ctx);
112 COPY_REG(rcs_indirect_ctx_offset);
116 set_context_pdp_root_pointer(shadow_ring_context,
117 workload->shadow_mm->shadow_page_table);
119 intel_gvt_hypervisor_read_gpa(vgpu,
120 workload->ring_context_gpa +
121 sizeof(*shadow_ring_context),
122 (void *)shadow_ring_context +
123 sizeof(*shadow_ring_context),
124 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
130 static inline bool is_gvt_request(struct drm_i915_gem_request *req)
132 return i915_gem_context_force_single_submission(req->ctx);
135 static int shadow_context_status_change(struct notifier_block *nb,
136 unsigned long action, void *data)
138 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
139 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
140 shadow_ctx_notifier_block[req->engine->id]);
141 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
142 struct intel_vgpu_workload *workload =
143 scheduler->current_workload[req->engine->id];
145 if (!is_gvt_request(req) || unlikely(!workload))
149 case INTEL_CONTEXT_SCHEDULE_IN:
150 intel_gvt_load_render_mmio(workload->vgpu,
152 atomic_set(&workload->shadow_ctx_active, 1);
154 case INTEL_CONTEXT_SCHEDULE_OUT:
155 intel_gvt_restore_render_mmio(workload->vgpu,
157 /* If the status is -EINPROGRESS means this workload
158 * doesn't meet any issue during dispatching so when
159 * get the SCHEDULE_OUT set the status to be zero for
160 * good. If the status is NOT -EINPROGRESS means there
161 * is something wrong happened during dispatching and
162 * the status should not be set to zero
164 if (workload->status == -EINPROGRESS)
165 workload->status = 0;
166 atomic_set(&workload->shadow_ctx_active, 0);
172 wake_up(&workload->shadow_ctx_status_wq);
176 static int dispatch_workload(struct intel_vgpu_workload *workload)
178 int ring_id = workload->ring_id;
179 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
180 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
182 struct drm_i915_gem_request *rq;
183 struct intel_vgpu *vgpu = workload->vgpu;
186 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
189 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
190 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
191 GEN8_CTX_ADDRESSING_MODE_SHIFT;
193 mutex_lock(&dev_priv->drm.struct_mutex);
195 /* pin shadow context by gvt even the shadow context will be pinned
196 * when i915 alloc request. That is because gvt will update the guest
197 * context from shadow context when workload is completed, and at that
198 * moment, i915 may already unpined the shadow context to make the
199 * shadow_ctx pages invalid. So gvt need to pin itself. After update
200 * the guest context, gvt can unpin the shadow_ctx safely.
202 ret = engine->context_pin(engine, shadow_ctx);
204 gvt_vgpu_err("fail to pin shadow context\n");
205 workload->status = ret;
206 mutex_unlock(&dev_priv->drm.struct_mutex);
210 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
212 gvt_vgpu_err("fail to allocate gem request\n");
217 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
219 workload->req = i915_gem_request_get(rq);
221 ret = intel_gvt_scan_and_shadow_workload(workload);
225 if ((workload->ring_id == RCS) &&
226 (workload->wa_ctx.indirect_ctx.size != 0)) {
227 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
232 ret = populate_shadow_context(workload);
236 if (workload->prepare) {
237 ret = workload->prepare(workload);
242 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
243 ring_id, workload->req);
246 workload->dispatched = true;
249 workload->status = ret;
251 if (!IS_ERR_OR_NULL(rq))
252 i915_add_request(rq);
254 engine->context_unpin(engine, shadow_ctx);
256 mutex_unlock(&dev_priv->drm.struct_mutex);
260 static struct intel_vgpu_workload *pick_next_workload(
261 struct intel_gvt *gvt, int ring_id)
263 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
264 struct intel_vgpu_workload *workload = NULL;
266 mutex_lock(&gvt->lock);
269 * no current vgpu / will be scheduled out / no workload
272 if (!scheduler->current_vgpu) {
273 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
277 if (scheduler->need_reschedule) {
278 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
282 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
286 * still have current workload, maybe the workload disptacher
287 * fail to submit it for some reason, resubmit it.
289 if (scheduler->current_workload[ring_id]) {
290 workload = scheduler->current_workload[ring_id];
291 gvt_dbg_sched("ring id %d still have current workload %p\n",
297 * pick a workload as current workload
298 * once current workload is set, schedule policy routines
299 * will wait the current workload is finished when trying to
300 * schedule out a vgpu.
302 scheduler->current_workload[ring_id] = container_of(
303 workload_q_head(scheduler->current_vgpu, ring_id)->next,
304 struct intel_vgpu_workload, list);
306 workload = scheduler->current_workload[ring_id];
308 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
310 atomic_inc(&workload->vgpu->running_workload_num);
312 mutex_unlock(&gvt->lock);
316 static void update_guest_context(struct intel_vgpu_workload *workload)
318 struct intel_vgpu *vgpu = workload->vgpu;
319 struct intel_gvt *gvt = vgpu->gvt;
320 int ring_id = workload->ring_id;
321 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
322 struct drm_i915_gem_object *ctx_obj =
323 shadow_ctx->engine[ring_id].state->obj;
324 struct execlist_ring_context *shadow_ring_context;
327 unsigned long context_gpa, context_page_num;
330 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
331 workload->ctx_desc.lrca);
333 context_page_num = intel_lr_context_size(
334 gvt->dev_priv->engine[ring_id]);
336 context_page_num = context_page_num >> PAGE_SHIFT;
338 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
339 context_page_num = 19;
343 while (i < context_page_num) {
344 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
345 (u32)((workload->ctx_desc.lrca + i) <<
347 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
348 gvt_vgpu_err("invalid guest context descriptor\n");
352 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
354 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
360 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
361 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
363 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
364 shadow_ring_context = kmap(page);
366 #define COPY_REG(name) \
367 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
368 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
371 COPY_REG(ctx_timestamp);
375 intel_gvt_hypervisor_write_gpa(vgpu,
376 workload->ring_context_gpa +
377 sizeof(*shadow_ring_context),
378 (void *)shadow_ring_context +
379 sizeof(*shadow_ring_context),
380 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
385 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
387 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
388 struct intel_vgpu_workload *workload;
389 struct intel_vgpu *vgpu;
392 mutex_lock(&gvt->lock);
394 workload = scheduler->current_workload[ring_id];
395 vgpu = workload->vgpu;
397 /* For the workload w/ request, needs to wait for the context
398 * switch to make sure request is completed.
399 * For the workload w/o request, directly complete the workload.
402 struct drm_i915_private *dev_priv =
403 workload->vgpu->gvt->dev_priv;
404 struct intel_engine_cs *engine =
405 dev_priv->engine[workload->ring_id];
406 wait_event(workload->shadow_ctx_status_wq,
407 !atomic_read(&workload->shadow_ctx_active));
409 i915_gem_request_put(fetch_and_zero(&workload->req));
411 if (!workload->status && !vgpu->resetting) {
412 update_guest_context(workload);
414 for_each_set_bit(event, workload->pending_events,
416 intel_vgpu_trigger_virtual_event(vgpu, event);
418 mutex_lock(&dev_priv->drm.struct_mutex);
419 /* unpin shadow ctx as the shadow_ctx update is done */
420 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
421 mutex_unlock(&dev_priv->drm.struct_mutex);
424 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
425 ring_id, workload, workload->status);
427 scheduler->current_workload[ring_id] = NULL;
429 list_del_init(&workload->list);
430 workload->complete(workload);
432 atomic_dec(&vgpu->running_workload_num);
433 wake_up(&scheduler->workload_complete_wq);
434 mutex_unlock(&gvt->lock);
437 struct workload_thread_param {
438 struct intel_gvt *gvt;
442 static DEFINE_MUTEX(scheduler_mutex);
444 static int workload_thread(void *priv)
446 struct workload_thread_param *p = (struct workload_thread_param *)priv;
447 struct intel_gvt *gvt = p->gvt;
448 int ring_id = p->ring_id;
449 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
450 struct intel_vgpu_workload *workload = NULL;
451 struct intel_vgpu *vgpu = NULL;
453 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
454 || IS_KABYLAKE(gvt->dev_priv);
455 DEFINE_WAIT_FUNC(wait, woken_wake_function);
459 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
461 while (!kthread_should_stop()) {
462 add_wait_queue(&scheduler->waitq[ring_id], &wait);
464 workload = pick_next_workload(gvt, ring_id);
467 wait_woken(&wait, TASK_INTERRUPTIBLE,
468 MAX_SCHEDULE_TIMEOUT);
469 } while (!kthread_should_stop());
470 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
475 mutex_lock(&scheduler_mutex);
477 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
478 workload->ring_id, workload,
481 intel_runtime_pm_get(gvt->dev_priv);
483 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
484 workload->ring_id, workload);
487 intel_uncore_forcewake_get(gvt->dev_priv,
490 mutex_lock(&gvt->lock);
491 ret = dispatch_workload(workload);
492 mutex_unlock(&gvt->lock);
495 vgpu = workload->vgpu;
496 gvt_vgpu_err("fail to dispatch workload, skip\n");
500 gvt_dbg_sched("ring id %d wait workload %p\n",
501 workload->ring_id, workload);
502 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
505 gvt_dbg_sched("will complete workload %p, status: %d\n",
506 workload, workload->status);
508 complete_current_workload(gvt, ring_id);
511 intel_uncore_forcewake_put(gvt->dev_priv,
514 intel_runtime_pm_put(gvt->dev_priv);
516 mutex_unlock(&scheduler_mutex);
522 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
524 struct intel_gvt *gvt = vgpu->gvt;
525 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
527 if (atomic_read(&vgpu->running_workload_num)) {
528 gvt_dbg_sched("wait vgpu idle\n");
530 wait_event(scheduler->workload_complete_wq,
531 !atomic_read(&vgpu->running_workload_num));
535 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
537 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
538 struct intel_engine_cs *engine;
539 enum intel_engine_id i;
541 gvt_dbg_core("clean workload scheduler\n");
543 for_each_engine(engine, gvt->dev_priv, i) {
544 atomic_notifier_chain_unregister(
545 &engine->context_status_notifier,
546 &gvt->shadow_ctx_notifier_block[i]);
547 kthread_stop(scheduler->thread[i]);
551 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
553 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
554 struct workload_thread_param *param = NULL;
555 struct intel_engine_cs *engine;
556 enum intel_engine_id i;
559 gvt_dbg_core("init workload scheduler\n");
561 init_waitqueue_head(&scheduler->workload_complete_wq);
563 for_each_engine(engine, gvt->dev_priv, i) {
564 init_waitqueue_head(&scheduler->waitq[i]);
566 param = kzalloc(sizeof(*param), GFP_KERNEL);
575 scheduler->thread[i] = kthread_run(workload_thread, param,
576 "gvt workload %d", i);
577 if (IS_ERR(scheduler->thread[i])) {
578 gvt_err("fail to create workload thread\n");
579 ret = PTR_ERR(scheduler->thread[i]);
583 gvt->shadow_ctx_notifier_block[i].notifier_call =
584 shadow_context_status_change;
585 atomic_notifier_chain_register(&engine->context_status_notifier,
586 &gvt->shadow_ctx_notifier_block[i]);
590 intel_gvt_clean_workload_scheduler(gvt);
596 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
598 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
601 int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
603 atomic_set(&vgpu->running_workload_num, 0);
605 vgpu->shadow_ctx = i915_gem_context_create_gvt(
606 &vgpu->gvt->dev_priv->drm);
607 if (IS_ERR(vgpu->shadow_ctx))
608 return PTR_ERR(vgpu->shadow_ctx);
610 vgpu->shadow_ctx->engine[RCS].initialised = true;