2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
32 struct intel_wait *wait;
33 unsigned int result = 0;
35 lockdep_assert_held(&b->irq_lock);
39 result = ENGINE_WAKEUP_WAITER;
40 if (wake_up_process(wait->tsk))
41 result |= ENGINE_WAKEUP_ASLEEP;
47 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
49 struct intel_breadcrumbs *b = &engine->breadcrumbs;
52 spin_lock_irq(&b->irq_lock);
53 result = __intel_breadcrumbs_wakeup(b);
54 spin_unlock_irq(&b->irq_lock);
59 static unsigned long wait_timeout(void)
61 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
64 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
66 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
67 engine->name, __builtin_return_address(0),
68 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
69 &engine->irq_posted)));
71 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
74 static void intel_breadcrumbs_hangcheck(unsigned long data)
76 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
77 struct intel_breadcrumbs *b = &engine->breadcrumbs;
82 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
83 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
84 mod_timer(&b->hangcheck, wait_timeout());
88 /* We keep the hangcheck time alive until we disarm the irq, even
89 * if there are no waiters at present.
91 * If the waiter was currently running, assume it hasn't had a chance
92 * to process the pending interrupt (e.g, low priority task on a loaded
93 * system) and wait until it sleeps before declaring a missed interrupt.
95 * If the waiter was asleep (and not even pending a wakeup), then we
96 * must have missed an interrupt as the GPU has stopped advancing
97 * but we still have a waiter. Assuming all batches complete within
98 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
100 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
101 missed_breadcrumb(engine);
102 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
104 mod_timer(&b->hangcheck, wait_timeout());
108 static void intel_breadcrumbs_fake_irq(unsigned long data)
110 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
111 struct intel_breadcrumbs *b = &engine->breadcrumbs;
114 * The timer persists in case we cannot enable interrupts,
115 * or if we have previously seen seqno/interrupt incoherency
116 * ("missed interrupt" syndrome). Here the worker will wake up
117 * every jiffie in order to kick the oldest waiter to do the
118 * coherent seqno check.
121 spin_lock_irq(&b->irq_lock);
122 if (!__intel_breadcrumbs_wakeup(b))
123 __intel_engine_disarm_breadcrumbs(engine);
124 spin_unlock_irq(&b->irq_lock);
128 mod_timer(&b->fake_irq, jiffies + 1);
130 /* Ensure that even if the GPU hangs, we get woken up.
132 * However, note that if no one is waiting, we never notice
133 * a gpu hang. Eventually, we will have to wait for a resource
134 * held by the GPU and so trigger a hangcheck. In the most
135 * pathological case, this will be upon memory starvation! To
136 * prevent this, we also queue the hangcheck from the retire
139 i915_queue_hangcheck(engine->i915);
142 static void irq_enable(struct intel_engine_cs *engine)
144 /* Enabling the IRQ may miss the generation of the interrupt, but
145 * we still need to force the barrier before reading the seqno,
148 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
150 /* Caller disables interrupts */
151 spin_lock(&engine->i915->irq_lock);
152 engine->irq_enable(engine);
153 spin_unlock(&engine->i915->irq_lock);
156 static void irq_disable(struct intel_engine_cs *engine)
158 /* Caller disables interrupts */
159 spin_lock(&engine->i915->irq_lock);
160 engine->irq_disable(engine);
161 spin_unlock(&engine->i915->irq_lock);
164 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
166 struct intel_breadcrumbs *b = &engine->breadcrumbs;
168 lockdep_assert_held(&b->irq_lock);
169 GEM_BUG_ON(b->irq_wait);
171 if (b->irq_enabled) {
173 b->irq_enabled = false;
176 b->irq_armed = false;
179 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
181 struct intel_breadcrumbs *b = &engine->breadcrumbs;
182 struct intel_wait *wait, *n;
187 /* We only disarm the irq when we are idle (all requests completed),
188 * so if the bottom-half remains asleep, it missed the request
192 spin_lock_irq(&b->rb_lock);
193 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
194 RB_CLEAR_NODE(&wait->node);
195 if (wake_up_process(wait->tsk) && wait == b->irq_wait)
196 missed_breadcrumb(engine);
198 b->waiters = RB_ROOT;
200 spin_lock(&b->irq_lock);
202 __intel_engine_disarm_breadcrumbs(engine);
203 spin_unlock(&b->irq_lock);
205 spin_unlock_irq(&b->rb_lock);
208 static bool use_fake_irq(const struct intel_breadcrumbs *b)
210 const struct intel_engine_cs *engine =
211 container_of(b, struct intel_engine_cs, breadcrumbs);
213 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
216 /* Only start with the heavy weight fake irq timer if we have not
217 * seen any interrupts since enabling it the first time. If the
218 * interrupts are still arriving, it means we made a mistake in our
219 * engine->seqno_barrier(), a timing error that should be transient
220 * and unlikely to reoccur.
222 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
225 static void enable_fake_irq(struct intel_breadcrumbs *b)
227 /* Ensure we never sleep indefinitely */
228 if (!b->irq_enabled || use_fake_irq(b))
229 mod_timer(&b->fake_irq, jiffies + 1);
231 mod_timer(&b->hangcheck, wait_timeout());
234 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
236 struct intel_engine_cs *engine =
237 container_of(b, struct intel_engine_cs, breadcrumbs);
238 struct drm_i915_private *i915 = engine->i915;
240 lockdep_assert_held(&b->irq_lock);
244 /* The breadcrumb irq will be disarmed on the interrupt after the
245 * waiters are signaled. This gives us a single interrupt window in
246 * which we can add a new waiter and avoid the cost of re-enabling
250 GEM_BUG_ON(b->irq_enabled);
252 if (I915_SELFTEST_ONLY(b->mock)) {
253 /* For our mock objects we want to avoid interaction
254 * with the real hardware (which is not set up). So
255 * we simply pretend we have enabled the powerwell
256 * and the irq, and leave it up to the mock
257 * implementation to call intel_engine_wakeup()
258 * itself when it wants to simulate a user interrupt,
263 /* Since we are waiting on a request, the GPU should be busy
264 * and should have its own rpm reference. This is tracked
265 * by i915->gt.awake, we can forgo holding our own wakref
266 * for the interrupt as before i915->gt.awake is released (when
267 * the driver is idle) we disarm the breadcrumbs.
270 /* No interrupts? Kick the waiter every jiffie! */
271 if (intel_irqs_enabled(i915)) {
272 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
274 b->irq_enabled = true;
280 static inline struct intel_wait *to_wait(struct rb_node *node)
282 return rb_entry(node, struct intel_wait, node);
285 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
286 struct intel_wait *wait)
288 lockdep_assert_held(&b->rb_lock);
290 /* This request is completed, so remove it from the tree, mark it as
291 * complete, and *then* wake up the associated task.
293 rb_erase(&wait->node, &b->waiters);
294 RB_CLEAR_NODE(&wait->node);
296 wake_up_process(wait->tsk); /* implicit smp_wmb() */
299 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
300 struct rb_node *next)
302 struct intel_breadcrumbs *b = &engine->breadcrumbs;
304 spin_lock(&b->irq_lock);
305 GEM_BUG_ON(!b->irq_armed);
306 b->irq_wait = to_wait(next);
307 spin_unlock(&b->irq_lock);
309 /* We always wake up the next waiter that takes over as the bottom-half
310 * as we may delegate not only the irq-seqno barrier to the next waiter
311 * but also the task of waking up concurrent waiters.
314 wake_up_process(to_wait(next)->tsk);
317 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
318 struct intel_wait *wait)
320 struct intel_breadcrumbs *b = &engine->breadcrumbs;
321 struct rb_node **p, *parent, *completed;
325 /* Insert the request into the retirement ordered list
326 * of waiters by walking the rbtree. If we are the oldest
327 * seqno in the tree (the first to be retired), then
328 * set ourselves as the bottom-half.
330 * As we descend the tree, prune completed branches since we hold the
331 * spinlock we know that the first_waiter must be delayed and can
332 * reduce some of the sequential wake up latency if we take action
333 * ourselves and wake up the completed tasks in parallel. Also, by
334 * removing stale elements in the tree, we may be able to reduce the
335 * ping-pong between the old bottom-half and ourselves as first-waiter.
340 seqno = intel_engine_get_seqno(engine);
342 /* If the request completed before we managed to grab the spinlock,
343 * return now before adding ourselves to the rbtree. We let the
344 * current bottom-half handle any pending wakeups and instead
345 * try and get out of the way quickly.
347 if (i915_seqno_passed(seqno, wait->seqno)) {
348 RB_CLEAR_NODE(&wait->node);
352 p = &b->waiters.rb_node;
355 if (wait->seqno == to_wait(parent)->seqno) {
356 /* We have multiple waiters on the same seqno, select
357 * the highest priority task (that with the smallest
358 * task->prio) to serve as the bottom-half for this
361 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
362 p = &parent->rb_right;
365 p = &parent->rb_left;
367 } else if (i915_seqno_passed(wait->seqno,
368 to_wait(parent)->seqno)) {
369 p = &parent->rb_right;
370 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
375 p = &parent->rb_left;
378 rb_link_node(&wait->node, parent, p);
379 rb_insert_color(&wait->node, &b->waiters);
382 struct rb_node *next = rb_next(completed);
384 GEM_BUG_ON(!next && !first);
385 if (next && next != &wait->node) {
387 __intel_breadcrumbs_next(engine, next);
391 struct intel_wait *crumb = to_wait(completed);
392 completed = rb_prev(completed);
393 __intel_breadcrumbs_finish(b, crumb);
398 spin_lock(&b->irq_lock);
399 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
401 /* After assigning ourselves as the new bottom-half, we must
402 * perform a cursory check to prevent a missed interrupt.
403 * Either we miss the interrupt whilst programming the hardware,
404 * or if there was a previous waiter (for a later seqno) they
405 * may be woken instead of us (due to the inherent race
406 * in the unlocked read of b->irq_seqno_bh in the irq handler)
407 * and so we miss the wake up.
409 __intel_breadcrumbs_enable_irq(b);
410 spin_unlock(&b->irq_lock);
412 GEM_BUG_ON(!b->irq_wait);
413 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
418 bool intel_engine_add_wait(struct intel_engine_cs *engine,
419 struct intel_wait *wait)
421 struct intel_breadcrumbs *b = &engine->breadcrumbs;
424 spin_lock_irq(&b->rb_lock);
425 first = __intel_engine_add_wait(engine, wait);
426 spin_unlock_irq(&b->rb_lock);
431 static inline bool chain_wakeup(struct rb_node *rb, int priority)
433 return rb && to_wait(rb)->tsk->prio <= priority;
436 static inline int wakeup_priority(struct intel_breadcrumbs *b,
437 struct task_struct *tsk)
439 if (tsk == b->signaler)
445 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
446 struct intel_wait *wait)
448 struct intel_breadcrumbs *b = &engine->breadcrumbs;
450 lockdep_assert_held(&b->rb_lock);
452 if (RB_EMPTY_NODE(&wait->node))
455 if (b->irq_wait == wait) {
456 const int priority = wakeup_priority(b, wait->tsk);
457 struct rb_node *next;
459 /* We are the current bottom-half. Find the next candidate,
460 * the first waiter in the queue on the remaining oldest
461 * request. As multiple seqnos may complete in the time it
462 * takes us to wake up and find the next waiter, we have to
463 * wake up that waiter for it to perform its own coherent
466 next = rb_next(&wait->node);
467 if (chain_wakeup(next, priority)) {
468 /* If the next waiter is already complete,
469 * wake it up and continue onto the next waiter. So
470 * if have a small herd, they will wake up in parallel
471 * rather than sequentially, which should reduce
472 * the overall latency in waking all the completed
475 * However, waking up a chain adds extra latency to
476 * the first_waiter. This is undesirable if that
477 * waiter is a high priority task.
479 u32 seqno = intel_engine_get_seqno(engine);
481 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
482 struct rb_node *n = rb_next(next);
484 __intel_breadcrumbs_finish(b, to_wait(next));
486 if (!chain_wakeup(next, priority))
491 __intel_breadcrumbs_next(engine, next);
493 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
496 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
497 rb_erase(&wait->node, &b->waiters);
500 GEM_BUG_ON(b->irq_wait == wait);
501 GEM_BUG_ON(rb_first(&b->waiters) !=
502 (b->irq_wait ? &b->irq_wait->node : NULL));
505 void intel_engine_remove_wait(struct intel_engine_cs *engine,
506 struct intel_wait *wait)
508 struct intel_breadcrumbs *b = &engine->breadcrumbs;
510 /* Quick check to see if this waiter was already decoupled from
511 * the tree by the bottom-half to avoid contention on the spinlock
514 if (RB_EMPTY_NODE(&wait->node))
517 spin_lock_irq(&b->rb_lock);
518 __intel_engine_remove_wait(engine, wait);
519 spin_unlock_irq(&b->rb_lock);
522 static bool signal_valid(const struct drm_i915_gem_request *request)
524 return intel_wait_check_request(&request->signaling.wait, request);
527 static bool signal_complete(const struct drm_i915_gem_request *request)
532 /* If another process served as the bottom-half it may have already
533 * signalled that this wait is already completed.
535 if (intel_wait_complete(&request->signaling.wait))
536 return signal_valid(request);
538 /* Carefully check if the request is complete, giving time for the
539 * seqno to be visible or if the GPU hung.
541 if (__i915_request_irq_complete(request))
547 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
549 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
552 static void signaler_set_rtpriority(void)
554 struct sched_param param = { .sched_priority = 1 };
556 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
559 static int intel_breadcrumbs_signaler(void *arg)
561 struct intel_engine_cs *engine = arg;
562 struct intel_breadcrumbs *b = &engine->breadcrumbs;
563 struct drm_i915_gem_request *request;
565 /* Install ourselves with high priority to reduce signalling latency */
566 signaler_set_rtpriority();
569 set_current_state(TASK_INTERRUPTIBLE);
571 /* We are either woken up by the interrupt bottom-half,
572 * or by a client adding a new signaller. In both cases,
573 * the GPU seqno may have advanced beyond our oldest signal.
574 * If it has, propagate the signal, remove the waiter and
575 * check again with the next oldest signal. Otherwise we
576 * need to wait for a new interrupt from the GPU or for
580 request = rcu_dereference(b->first_signal);
582 request = i915_gem_request_get_rcu(request);
584 if (signal_complete(request)) {
586 dma_fence_signal(&request->fence);
587 local_bh_enable(); /* kick start the tasklets */
589 spin_lock_irq(&b->rb_lock);
591 /* Wake up all other completed waiters and select the
592 * next bottom-half for the next user interrupt.
594 __intel_engine_remove_wait(engine,
595 &request->signaling.wait);
597 /* Find the next oldest signal. Note that as we have
598 * not been holding the lock, another client may
599 * have installed an even older signal than the one
600 * we just completed - so double check we are still
601 * the oldest before picking the next one.
603 if (request == rcu_access_pointer(b->first_signal)) {
605 rb_next(&request->signaling.node);
606 rcu_assign_pointer(b->first_signal,
607 rb ? to_signaler(rb) : NULL);
609 rb_erase(&request->signaling.node, &b->signals);
610 RB_CLEAR_NODE(&request->signaling.node);
612 spin_unlock_irq(&b->rb_lock);
614 i915_gem_request_put(request);
618 if (kthread_should_stop()) {
624 add_wait_queue(&request->execute, &exec);
629 remove_wait_queue(&request->execute, &exec);
631 if (kthread_should_park())
634 i915_gem_request_put(request);
636 __set_current_state(TASK_RUNNING);
641 void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
643 struct intel_engine_cs *engine = request->engine;
644 struct intel_breadcrumbs *b = &engine->breadcrumbs;
645 struct rb_node *parent, **p;
649 /* Note that we may be called from an interrupt handler on another
650 * device (e.g. nouveau signaling a fence completion causing us
651 * to submit a request, and so enable signaling). As such,
652 * we need to make sure that all other users of b->lock protect
653 * against interrupts, i.e. use spin_lock_irqsave.
656 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
657 GEM_BUG_ON(!irqs_disabled());
658 lockdep_assert_held(&request->lock);
660 seqno = i915_gem_request_global_seqno(request);
664 request->signaling.wait.tsk = b->signaler;
665 request->signaling.wait.request = request;
666 request->signaling.wait.seqno = seqno;
667 i915_gem_request_get(request);
669 spin_lock(&b->rb_lock);
671 /* First add ourselves into the list of waiters, but register our
672 * bottom-half as the signaller thread. As per usual, only the oldest
673 * waiter (not just signaller) is tasked as the bottom-half waking
674 * up all completed waiters after the user interrupt.
676 * If we are the oldest waiter, enable the irq (after which we
677 * must double check that the seqno did not complete).
679 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
681 /* Now insert ourselves into the retirement ordered list of signals
682 * on this engine. We track the oldest seqno as that will be the
683 * first signal to complete.
687 p = &b->signals.rb_node;
690 if (i915_seqno_passed(seqno,
691 to_signaler(parent)->signaling.wait.seqno)) {
692 p = &parent->rb_right;
695 p = &parent->rb_left;
698 rb_link_node(&request->signaling.node, parent, p);
699 rb_insert_color(&request->signaling.node, &b->signals);
701 rcu_assign_pointer(b->first_signal, request);
703 spin_unlock(&b->rb_lock);
706 wake_up_process(b->signaler);
709 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
711 struct intel_engine_cs *engine = request->engine;
712 struct intel_breadcrumbs *b = &engine->breadcrumbs;
714 GEM_BUG_ON(!irqs_disabled());
715 lockdep_assert_held(&request->lock);
716 GEM_BUG_ON(!request->signaling.wait.seqno);
718 spin_lock(&b->rb_lock);
720 if (!RB_EMPTY_NODE(&request->signaling.node)) {
721 if (request == rcu_access_pointer(b->first_signal)) {
723 rb_next(&request->signaling.node);
724 rcu_assign_pointer(b->first_signal,
725 rb ? to_signaler(rb) : NULL);
727 rb_erase(&request->signaling.node, &b->signals);
728 RB_CLEAR_NODE(&request->signaling.node);
729 i915_gem_request_put(request);
732 __intel_engine_remove_wait(engine, &request->signaling.wait);
734 spin_unlock(&b->rb_lock);
736 request->signaling.wait.seqno = 0;
739 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
741 struct intel_breadcrumbs *b = &engine->breadcrumbs;
742 struct task_struct *tsk;
744 spin_lock_init(&b->rb_lock);
745 spin_lock_init(&b->irq_lock);
747 setup_timer(&b->fake_irq,
748 intel_breadcrumbs_fake_irq,
749 (unsigned long)engine);
750 setup_timer(&b->hangcheck,
751 intel_breadcrumbs_hangcheck,
752 (unsigned long)engine);
754 /* Spawn a thread to provide a common bottom-half for all signals.
755 * As this is an asynchronous interface we cannot steal the current
756 * task for handling the bottom-half to the user interrupt, therefore
757 * we create a thread to do the coherent seqno dance after the
758 * interrupt and then signal the waitqueue (via the dma-buf/fence).
760 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
761 "i915/signal:%d", engine->id);
770 static void cancel_fake_irq(struct intel_engine_cs *engine)
772 struct intel_breadcrumbs *b = &engine->breadcrumbs;
774 del_timer_sync(&b->hangcheck);
775 del_timer_sync(&b->fake_irq);
776 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
779 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
781 struct intel_breadcrumbs *b = &engine->breadcrumbs;
783 cancel_fake_irq(engine);
784 spin_lock_irq(&b->irq_lock);
791 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
792 * GPU is active and may have already executed the MI_USER_INTERRUPT
793 * before the CPU is ready to receive. However, the engine is currently
794 * idle (we haven't started it yet), there is no possibility for a
795 * missed interrupt as we enabled the irq and so we can clear the
796 * immediate wakeup (until a real interrupt arrives for the waiter).
798 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
803 spin_unlock_irq(&b->irq_lock);
806 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
808 struct intel_breadcrumbs *b = &engine->breadcrumbs;
810 /* The engines should be idle and all requests accounted for! */
811 WARN_ON(READ_ONCE(b->irq_wait));
812 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
813 WARN_ON(rcu_access_pointer(b->first_signal));
814 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
816 if (!IS_ERR_OR_NULL(b->signaler))
817 kthread_stop(b->signaler);
819 cancel_fake_irq(engine);
822 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
824 struct intel_breadcrumbs *b = &engine->breadcrumbs;
827 spin_lock_irq(&b->rb_lock);
830 wake_up_process(b->irq_wait->tsk);
831 busy |= intel_engine_flag(engine);
834 if (rcu_access_pointer(b->first_signal)) {
835 wake_up_process(b->signaler);
836 busy |= intel_engine_flag(engine);
839 spin_unlock_irq(&b->rb_lock);
844 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
845 #include "selftests/intel_breadcrumbs.c"