2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 static const char *i915_fence_get_driver_name(struct fence *fence)
32 static const char *i915_fence_get_timeline_name(struct fence *fence)
34 /* Timelines are bound by eviction to a VM. However, since
35 * we only have a global seqno at the moment, we only have
36 * a single timeline. Note that each timeline will have
37 * multiple execution contexts (fence contexts) as we allow
38 * engines within a single timeline to execute in parallel.
43 static bool i915_fence_signaled(struct fence *fence)
45 return i915_gem_request_completed(to_request(fence));
48 static bool i915_fence_enable_signaling(struct fence *fence)
50 if (i915_fence_signaled(fence))
53 intel_engine_enable_signaling(to_request(fence));
57 static signed long i915_fence_wait(struct fence *fence,
59 signed long timeout_jiffies)
61 s64 timeout_ns, *timeout;
64 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
65 timeout_ns = jiffies_to_nsecs(timeout_jiffies);
66 timeout = &timeout_ns;
71 ret = __i915_wait_request(to_request(fence),
72 interruptible, timeout,
80 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
81 timeout_jiffies = nsecs_to_jiffies(timeout_ns);
83 return timeout_jiffies;
86 static void i915_fence_value_str(struct fence *fence, char *str, int size)
88 snprintf(str, size, "%u", fence->seqno);
91 static void i915_fence_timeline_value_str(struct fence *fence, char *str,
94 snprintf(str, size, "%u",
95 intel_engine_get_seqno(to_request(fence)->engine));
98 static void i915_fence_release(struct fence *fence)
100 struct drm_i915_gem_request *req = to_request(fence);
102 kmem_cache_free(req->i915->requests, req);
105 const struct fence_ops i915_fence_ops = {
106 .get_driver_name = i915_fence_get_driver_name,
107 .get_timeline_name = i915_fence_get_timeline_name,
108 .enable_signaling = i915_fence_enable_signaling,
109 .signaled = i915_fence_signaled,
110 .wait = i915_fence_wait,
111 .release = i915_fence_release,
112 .fence_value_str = i915_fence_value_str,
113 .timeline_value_str = i915_fence_timeline_value_str,
116 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
117 struct drm_file *file)
119 struct drm_i915_private *dev_private;
120 struct drm_i915_file_private *file_priv;
122 WARN_ON(!req || !file || req->file_priv);
130 dev_private = req->i915;
131 file_priv = file->driver_priv;
133 spin_lock(&file_priv->mm.lock);
134 req->file_priv = file_priv;
135 list_add_tail(&req->client_list, &file_priv->mm.request_list);
136 spin_unlock(&file_priv->mm.lock);
138 req->pid = get_pid(task_pid(current));
144 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
146 struct drm_i915_file_private *file_priv = request->file_priv;
151 spin_lock(&file_priv->mm.lock);
152 list_del(&request->client_list);
153 request->file_priv = NULL;
154 spin_unlock(&file_priv->mm.lock);
156 put_pid(request->pid);
160 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
162 trace_i915_gem_request_retire(request);
163 list_del_init(&request->list);
165 /* We know the GPU must have read the request to have
166 * sent us the seqno + interrupt, so use the position
167 * of tail of the request to update the last known position
170 * Note this requires that we are always called in request
173 request->ring->last_retired_head = request->postfix;
175 i915_gem_request_remove_from_client(request);
177 if (request->previous_context) {
178 if (i915.enable_execlists)
179 intel_lr_context_unpin(request->previous_context,
183 i915_gem_context_put(request->ctx);
184 i915_gem_request_put(request);
187 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
189 struct intel_engine_cs *engine = req->engine;
190 struct drm_i915_gem_request *tmp;
192 lockdep_assert_held(&req->i915->drm.struct_mutex);
194 if (list_empty(&req->list))
198 tmp = list_first_entry(&engine->request_list,
201 i915_gem_request_retire(tmp);
202 } while (tmp != req);
204 WARN_ON(i915_verify_lists(engine->dev));
207 static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
209 if (__i915_terminally_wedged(reset_counter))
212 if (__i915_reset_in_progress(reset_counter)) {
213 /* Non-interruptible callers can't handle -EAGAIN, hence return
214 * -EIO unconditionally for these.
225 static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
227 struct intel_engine_cs *engine;
230 /* Carefully retire all requests without writing to the rings */
231 for_each_engine(engine, dev_priv) {
232 ret = intel_engine_idle(engine);
236 i915_gem_retire_requests(dev_priv);
238 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
239 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
240 while (intel_kick_waiters(dev_priv) ||
241 intel_kick_signalers(dev_priv))
245 /* Finally reset hw state */
246 for_each_engine(engine, dev_priv)
247 intel_engine_init_seqno(engine, seqno);
252 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
254 struct drm_i915_private *dev_priv = to_i915(dev);
260 /* HWS page needs to be set less than what we
261 * will inject to ring
263 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
267 /* Carefully set the last_seqno value so that wrap
268 * detection still works
270 dev_priv->next_seqno = seqno;
271 dev_priv->last_seqno = seqno - 1;
272 if (dev_priv->last_seqno == 0)
273 dev_priv->last_seqno--;
278 static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
280 /* reserve 0 for non-seqno */
281 if (unlikely(dev_priv->next_seqno == 0)) {
284 ret = i915_gem_init_seqno(dev_priv, 0);
288 dev_priv->next_seqno = 1;
291 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
296 * i915_gem_request_alloc - allocate a request structure
298 * @engine: engine that we wish to issue the request on.
299 * @ctx: context that the request will be associated with.
300 * This can be NULL if the request is not directly related to
301 * any specific user context, in which case this function will
302 * choose an appropriate context to use.
304 * Returns a pointer to the allocated request if successful,
305 * or an error code if not.
307 struct drm_i915_gem_request *
308 i915_gem_request_alloc(struct intel_engine_cs *engine,
309 struct i915_gem_context *ctx)
311 struct drm_i915_private *dev_priv = engine->i915;
312 unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
313 struct drm_i915_gem_request *req;
317 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
318 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
321 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
325 /* Move the oldest request to the slab-cache (if not in use!) */
326 req = list_first_entry_or_null(&engine->request_list,
328 if (req && i915_gem_request_completed(req))
329 i915_gem_request_retire(req);
331 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
333 return ERR_PTR(-ENOMEM);
335 ret = i915_gem_get_seqno(dev_priv, &seqno);
339 spin_lock_init(&req->lock);
340 fence_init(&req->fence,
343 engine->fence_context,
346 req->i915 = dev_priv;
347 req->engine = engine;
348 req->ctx = i915_gem_context_get(ctx);
351 * Reserve space in the ring buffer for all the commands required to
352 * eventually emit this request. This is to guarantee that the
353 * i915_add_request() call can't fail. Note that the reserve may need
354 * to be redone if the request is not actually submitted straight
355 * away, e.g. because a GPU scheduler has deferred it.
357 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
359 if (i915.enable_execlists)
360 ret = intel_logical_ring_alloc_request_extras(req);
362 ret = intel_ring_alloc_request_extras(req);
369 i915_gem_context_put(ctx);
371 kmem_cache_free(dev_priv->requests, req);
375 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
377 struct drm_i915_private *dev_priv = engine->i915;
379 dev_priv->gt.active_engines |= intel_engine_flag(engine);
380 if (dev_priv->gt.awake)
383 intel_runtime_pm_get_noresume(dev_priv);
384 dev_priv->gt.awake = true;
386 intel_enable_gt_powersave(dev_priv);
387 i915_update_gfx_val(dev_priv);
388 if (INTEL_GEN(dev_priv) >= 6)
389 gen6_rps_busy(dev_priv);
391 queue_delayed_work(dev_priv->wq,
392 &dev_priv->gt.retire_work,
393 round_jiffies_up_relative(HZ));
397 * NB: This function is not allowed to fail. Doing so would mean the the
398 * request is not being tracked for completion but the work itself is
399 * going to happen on the hardware. This would be a Bad Thing(tm).
401 void __i915_add_request(struct drm_i915_gem_request *request,
402 struct drm_i915_gem_object *obj,
405 struct intel_engine_cs *engine;
406 struct intel_ring *ring;
411 if (WARN_ON(!request))
414 engine = request->engine;
415 ring = request->ring;
418 * To ensure that this call will not fail, space for its emissions
419 * should already have been reserved in the ring buffer. Let the ring
420 * know that it is time to use that space up.
422 request_start = ring->tail;
423 reserved_tail = request->reserved_space;
424 request->reserved_space = 0;
427 * Emit any outstanding flushes - execbuf can fail to emit the flush
428 * after having emitted the batchbuffer command. Hence we need to fix
429 * things up similar to emitting the lazy request. The difference here
430 * is that the flush _must_ happen before the next request, no matter
434 ret = engine->emit_flush(request, EMIT_FLUSH);
436 /* Not allowed to fail! */
437 WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
440 trace_i915_gem_request_add(request);
442 request->head = request_start;
444 /* Whilst this request exists, batch_obj will be on the
445 * active_list, and so will hold the active reference. Only when this
446 * request is retired will the the batch_obj be moved onto the
447 * inactive_list and lose its active reference. Hence we do not need
448 * to explicitly hold another reference here.
450 request->batch_obj = obj;
452 /* Seal the request and mark it as pending execution. Note that
453 * we may inspect this state, without holding any locks, during
454 * hangcheck. Hence we apply the barrier to ensure that we do not
455 * see a more recent value in the hws than we are tracking.
457 request->emitted_jiffies = jiffies;
458 request->previous_seqno = engine->last_submitted_seqno;
459 smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
460 list_add_tail(&request->list, &engine->request_list);
462 /* Record the position of the start of the request so that
463 * should we detect the updated seqno part-way through the
464 * GPU processing the request, we never over-estimate the
465 * position of the head.
467 request->postfix = ring->tail;
469 if (i915.enable_execlists)
470 ret = engine->emit_request(request);
472 ret = engine->add_request(request);
473 /* Not allowed to fail! */
474 WARN(ret, "emit|add_request failed: %d!\n", ret);
476 /* Sanity check that the reserved size was large enough. */
477 ret = ring->tail - request_start;
480 WARN_ONCE(ret > reserved_tail,
481 "Not enough space reserved (%d bytes) "
482 "for adding the request (%d bytes)\n",
485 i915_gem_mark_busy(engine);
488 static unsigned long local_clock_us(unsigned int *cpu)
492 /* Cheaply and approximately convert from nanoseconds to microseconds.
493 * The result and subsequent calculations are also defined in the same
494 * approximate microseconds units. The principal source of timing
495 * error here is from the simple truncation.
497 * Note that local_clock() is only defined wrt to the current CPU;
498 * the comparisons are no longer valid if we switch CPUs. Instead of
499 * blocking preemption for the entire busywait, we can detect the CPU
500 * switch and use that as indicator of system load and a reason to
501 * stop busywaiting, see busywait_stop().
504 t = local_clock() >> 10;
510 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
512 unsigned int this_cpu;
514 if (time_after(local_clock_us(&this_cpu), timeout))
517 return this_cpu != cpu;
520 bool __i915_spin_request(const struct drm_i915_gem_request *req,
521 int state, unsigned long timeout_us)
525 /* When waiting for high frequency requests, e.g. during synchronous
526 * rendering split between the CPU and GPU, the finite amount of time
527 * required to set up the irq and wait upon it limits the response
528 * rate. By busywaiting on the request completion for a short while we
529 * can service the high frequency waits as quick as possible. However,
530 * if it is a slow request, we want to sleep as quickly as possible.
531 * The tradeoff between waiting and sleeping is roughly the time it
532 * takes to sleep on a request, on the order of a microsecond.
535 timeout_us += local_clock_us(&cpu);
537 if (i915_gem_request_completed(req))
540 if (signal_pending_state(state, current))
543 if (busywait_stop(timeout_us, cpu))
546 cpu_relax_lowlatency();
547 } while (!need_resched());
553 * __i915_wait_request - wait until execution of request has finished
555 * @interruptible: do an interruptible wait (normally yes)
556 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
557 * @rps: client to charge for RPS boosting
559 * Note: It is of utmost importance that the passed in seqno and reset_counter
560 * values have been read by the caller in an smp safe manner. Where read-side
561 * locks are involved, it is sufficient to read the reset_counter before
562 * unlocking the lock that protects the seqno. For lockless tricks, the
563 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
566 * Returns 0 if the request was found within the alloted time. Else returns the
567 * errno with remaining time filled in timeout argument.
569 int __i915_wait_request(struct drm_i915_gem_request *req,
572 struct intel_rps_client *rps)
574 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
576 struct intel_wait wait;
577 unsigned long timeout_remain;
582 if (list_empty(&req->list))
585 if (i915_gem_request_completed(req))
588 timeout_remain = MAX_SCHEDULE_TIMEOUT;
590 if (WARN_ON(*timeout < 0))
596 /* Record current time in case interrupted, or wedged */
597 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
598 *timeout += ktime_get_raw_ns();
601 trace_i915_gem_request_wait_begin(req);
603 /* This client is about to stall waiting for the GPU. In many cases
604 * this is undesirable and limits the throughput of the system, as
605 * many clients cannot continue processing user input/output whilst
606 * blocked. RPS autotuning may take tens of milliseconds to respond
607 * to the GPU load and thus incurs additional latency for the client.
608 * We can circumvent that by promoting the GPU frequency to maximum
609 * before we wait. This makes the GPU throttle up much more quickly
610 * (good for benchmarks and user experience, e.g. window animations),
611 * but at a cost of spending more power processing the workload
612 * (bad for battery). Not all clients even want their results
613 * immediately and for them we should just let the GPU select its own
614 * frequency to maximise efficiency. To prevent a single client from
615 * forcing the clocks too high for the whole system, we only allow
616 * each client to waitboost once in a busy period.
618 if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
619 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
621 /* Optimistic spin for the next ~jiffie before touching IRQs */
622 if (i915_spin_request(req, state, 5))
625 set_current_state(state);
626 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
628 intel_wait_init(&wait, req->fence.seqno);
629 if (intel_engine_add_wait(req->engine, &wait))
630 /* In order to check that we haven't missed the interrupt
631 * as we enabled it, we need to kick ourselves to do a
632 * coherent check on the seqno before we sleep.
637 if (signal_pending_state(state, current)) {
642 timeout_remain = io_schedule_timeout(timeout_remain);
643 if (timeout_remain == 0) {
648 if (intel_wait_complete(&wait))
651 set_current_state(state);
654 /* Carefully check if the request is complete, giving time
655 * for the seqno to be visible following the interrupt.
656 * We also have to check in case we are kicked by the GPU
657 * reset in order to drop the struct_mutex.
659 if (__i915_request_irq_complete(req))
662 /* Only spin if we know the GPU is processing this request */
663 if (i915_spin_request(req, state, 2))
666 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
668 intel_engine_remove_wait(req->engine, &wait);
669 __set_current_state(TASK_RUNNING);
671 trace_i915_gem_request_wait_end(req);
674 *timeout -= ktime_get_raw_ns();
679 * Apparently ktime isn't accurate enough and occasionally has a
680 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
681 * things up to make the test happy. We allow up to 1 jiffy.
683 * This is a regrssion from the timespec->ktime conversion.
685 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
689 if (IS_RPS_USER(rps) &&
690 req->fence.seqno == req->engine->last_submitted_seqno) {
691 /* The GPU is now idle and this client has stalled.
692 * Since no other client has submitted a request in the
693 * meantime, assume that this client is the only one
694 * supplying work to the GPU but is unable to keep that
695 * work supplied because it is waiting. Since the GPU is
696 * then never kept fully busy, RPS autoclocking will
697 * keep the clocks relatively low, causing further delays.
698 * Compensate by giving the synchronous client credit for
699 * a waitboost next time.
701 spin_lock(&req->i915->rps.client_lock);
702 list_del_init(&rps->link);
703 spin_unlock(&req->i915->rps.client_lock);
710 * Waits for a request to be signaled, and cleans up the
711 * request and object lists appropriately for that event.
713 int i915_wait_request(struct drm_i915_gem_request *req)
718 lockdep_assert_held(&req->i915->drm.struct_mutex);
720 ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
724 /* If the GPU hung, we want to keep the requests to find the guilty. */
725 if (!i915_reset_in_progress(&req->i915->gpu_error))
726 i915_gem_request_retire_upto(req);