2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
28 #include <linux/fence.h>
33 * Request queue structure.
35 * The request queue allows us to note sequence numbers that have been emitted
36 * and may be associated with active buffers to be retired.
38 * By keeping this list, we can avoid having to do questionable sequence
39 * number comparisons on buffer last_read|write_seqno. It also allows an
40 * emission time to be associated with the request for tracking how far ahead
41 * of the GPU the submission is.
43 * The requests are reference counted.
45 struct drm_i915_gem_request {
49 /** On Which ring this request was generated */
50 struct drm_i915_private *i915;
53 * Context and ring buffer related to this request
54 * Contexts are refcounted, so when this request is associated with a
55 * context, we must increment the context's refcount, to guarantee that
56 * it persists while any request is linked to it. Requests themselves
57 * are also refcounted, so the request will only be freed when the last
58 * reference to it is dismissed, and the code in
59 * i915_gem_request_free() will then decrement the refcount on the
62 struct i915_gem_context *ctx;
63 struct intel_engine_cs *engine;
64 struct intel_ring *ring;
65 struct intel_signal_node signaling;
67 /** GEM sequence number associated with the previous request,
68 * when the HWS breadcrumb is equal to this the GPU is processing
73 /** Position in the ringbuffer of the start of the request */
77 * Position in the ringbuffer of the start of the postfix.
78 * This is required to calculate the maximum available ringbuffer
79 * space without overwriting the postfix.
83 /** Position in the ringbuffer of the end of the whole request */
86 /** Preallocate space in the ringbuffer for the emitting the request */
90 * Context related to the previous request.
91 * As the contexts are accessed by the hardware until the switch is
92 * completed to a new context, the hardware may still be writing
93 * to the context object after the breadcrumb is visible. We must
94 * not unpin/unbind/prune that object whilst still active and so
95 * we keep the previous context pinned until the following (this)
98 struct i915_gem_context *previous_context;
100 /** Batch buffer related to this request if any (used for
101 * error state dump only).
103 struct drm_i915_gem_object *batch_obj;
104 struct list_head active_list;
106 /** Time at which this request was emitted, in jiffies. */
107 unsigned long emitted_jiffies;
109 /** engine->request_list entry for this request */
110 struct list_head link;
112 /** ring->request_list entry for this request */
113 struct list_head ring_link;
115 struct drm_i915_file_private *file_priv;
116 /** file_priv list entry for this request */
117 struct list_head client_list;
119 /** process identifier submitting this request */
123 * The ELSP only accepts two elements at a time, so we queue
124 * context/tail pairs on a given queue (ring->execlist_queue) until the
125 * hardware is available. The queue serves a double purpose: we also use
126 * it to keep track of the up to 2 contexts currently in the hardware
127 * (usually one in execution and the other queued up by the GPU): We
128 * only remove elements from the head of the queue when the hardware
129 * informs us that an element has been completed.
131 * All accesses to the queue are mediated by a spinlock
132 * (ring->execlist_lock).
135 /** Execlist link in the submission queue.*/
136 struct list_head execlist_link;
138 /** Execlists no. of times this request has been sent to the ELSP */
141 /** Execlists context hardware id. */
142 unsigned int ctx_hw_id;
145 extern const struct fence_ops i915_fence_ops;
147 static inline bool fence_is_i915(struct fence *fence)
149 return fence->ops == &i915_fence_ops;
152 struct drm_i915_gem_request * __must_check
153 i915_gem_request_alloc(struct intel_engine_cs *engine,
154 struct i915_gem_context *ctx);
155 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
156 struct drm_file *file);
157 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
160 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
162 return req ? req->fence.seqno : 0;
165 static inline struct intel_engine_cs *
166 i915_gem_request_get_engine(struct drm_i915_gem_request *req)
168 return req ? req->engine : NULL;
171 static inline struct drm_i915_gem_request *
172 to_request(struct fence *fence)
174 /* We assume that NULL fence/request are interoperable */
175 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
176 GEM_BUG_ON(fence && !fence_is_i915(fence));
177 return container_of(fence, struct drm_i915_gem_request, fence);
180 static inline struct drm_i915_gem_request *
181 i915_gem_request_get(struct drm_i915_gem_request *req)
183 return to_request(fence_get(&req->fence));
186 static inline struct drm_i915_gem_request *
187 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
189 return to_request(fence_get_rcu(&req->fence));
193 i915_gem_request_put(struct drm_i915_gem_request *req)
195 fence_put(&req->fence);
198 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
199 struct drm_i915_gem_request *src)
202 i915_gem_request_get(src);
205 i915_gem_request_put(*pdst);
210 void __i915_add_request(struct drm_i915_gem_request *req,
211 struct drm_i915_gem_object *batch_obj,
213 #define i915_add_request(req) \
214 __i915_add_request(req, NULL, true)
215 #define i915_add_request_no_flush(req) \
216 __i915_add_request(req, NULL, false)
218 struct intel_rps_client;
219 #define NO_WAITBOOST ERR_PTR(-1)
220 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
221 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
223 int i915_wait_request(struct drm_i915_gem_request *req,
226 struct intel_rps_client *rps)
227 __attribute__((nonnull(1)));
229 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
232 * Returns true if seq1 is later than seq2.
234 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
236 return (s32)(seq1 - seq2) >= 0;
240 i915_gem_request_started(const struct drm_i915_gem_request *req)
242 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
243 req->previous_seqno);
247 i915_gem_request_completed(const struct drm_i915_gem_request *req)
249 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
253 bool __i915_spin_request(const struct drm_i915_gem_request *request,
254 int state, unsigned long timeout_us);
255 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
256 int state, unsigned long timeout_us)
258 return (i915_gem_request_started(request) &&
259 __i915_spin_request(request, state, timeout_us));
262 /* We treat requests as fences. This is not be to confused with our
263 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
264 * We use the fences to synchronize access from the CPU with activity on the
265 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
266 * is reading them. We also track fences at a higher level to provide
267 * implicit synchronisation around GEM objects, e.g. set-domain will wait
268 * for outstanding GPU rendering before marking the object ready for CPU
269 * access, or a pageflip will wait until the GPU is complete before showing
270 * the frame on the scanout.
272 * In order to use a fence, the object must track the fence it needs to
273 * serialise with. For example, GEM objects want to track both read and
274 * write access so that we can perform concurrent read operations between
275 * the CPU and GPU engines, as well as waiting for all rendering to
276 * complete, or waiting for the last GPU user of a "fence register". The
277 * object then embeds a #i915_gem_active to track the most recent (in
278 * retirement order) request relevant for the desired mode of access.
279 * The #i915_gem_active is updated with i915_gem_active_set() to track the
280 * most recent fence request, typically this is done as part of
281 * i915_vma_move_to_active().
283 * When the #i915_gem_active completes (is retired), it will
284 * signal its completion to the owner through a callback as well as mark
285 * itself as idle (i915_gem_active.request == NULL). The owner
286 * can then perform any action, such as delayed freeing of an active
287 * resource including itself.
289 struct i915_gem_active;
291 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
292 struct drm_i915_gem_request *);
294 struct i915_gem_active {
295 struct drm_i915_gem_request __rcu *request;
296 struct list_head link;
297 i915_gem_retire_fn retire;
300 void i915_gem_retire_noop(struct i915_gem_active *,
301 struct drm_i915_gem_request *request);
304 * init_request_active - prepares the activity tracker for use
305 * @active - the active tracker
306 * @func - a callback when then the tracker is retired (becomes idle),
309 * init_request_active() prepares the embedded @active struct for use as
310 * an activity tracker, that is for tracking the last known active request
311 * associated with it. When the last request becomes idle, when it is retired
312 * after completion, the optional callback @func is invoked.
315 init_request_active(struct i915_gem_active *active,
316 i915_gem_retire_fn retire)
318 INIT_LIST_HEAD(&active->link);
319 active->retire = retire ?: i915_gem_retire_noop;
323 * i915_gem_active_set - updates the tracker to watch the current request
324 * @active - the active tracker
325 * @request - the request to watch
327 * i915_gem_active_set() watches the given @request for completion. Whilst
328 * that @request is busy, the @active reports busy. When that @request is
329 * retired, the @active tracker is updated to report idle.
332 i915_gem_active_set(struct i915_gem_active *active,
333 struct drm_i915_gem_request *request)
335 list_move(&active->link, &request->active_list);
336 rcu_assign_pointer(active->request, request);
339 static inline struct drm_i915_gem_request *
340 __i915_gem_active_peek(const struct i915_gem_active *active)
342 /* Inside the error capture (running with the driver in an unknown
343 * state), we want to bend the rules slightly (a lot).
345 * Work is in progress to make it safer, in the meantime this keeps
346 * the known issue from spamming the logs.
348 return rcu_dereference_protected(active->request, 1);
352 * i915_gem_active_peek - report the active request being monitored
353 * @active - the active tracker
355 * i915_gem_active_peek() returns the current request being tracked if
356 * still active, or NULL. It does not obtain a reference on the request
357 * for the caller, so the caller must hold struct_mutex.
359 static inline struct drm_i915_gem_request *
360 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
362 struct drm_i915_gem_request *request;
364 request = rcu_dereference_protected(active->request,
365 lockdep_is_held(mutex));
366 if (!request || i915_gem_request_completed(request))
373 * i915_gem_active_peek_rcu - report the active request being monitored
374 * @active - the active tracker
376 * i915_gem_active_peek_rcu() returns the current request being tracked if
377 * still active, or NULL. It does not obtain a reference on the request
378 * for the caller, and inspection of the request is only valid under
381 static inline struct drm_i915_gem_request *
382 i915_gem_active_peek_rcu(const struct i915_gem_active *active)
384 struct drm_i915_gem_request *request;
386 request = rcu_dereference(active->request);
387 if (!request || i915_gem_request_completed(request))
394 * i915_gem_active_get - return a reference to the active request
395 * @active - the active tracker
397 * i915_gem_active_get() returns a reference to the active request, or NULL
398 * if the active tracker is idle. The caller must hold struct_mutex.
400 static inline struct drm_i915_gem_request *
401 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
403 return i915_gem_request_get(i915_gem_active_peek(active, mutex));
407 * __i915_gem_active_get_rcu - return a reference to the active request
408 * @active - the active tracker
410 * __i915_gem_active_get() returns a reference to the active request, or NULL
411 * if the active tracker is idle. The caller must hold the RCU read lock, but
412 * the returned pointer is safe to use outside of RCU.
414 static inline struct drm_i915_gem_request *
415 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
417 /* Performing a lockless retrieval of the active request is super
418 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
419 * slab of request objects will not be freed whilst we hold the
420 * RCU read lock. It does not guarantee that the request itself
421 * will not be freed and then *reused*. Viz,
425 * req = active.request
426 * retire(req) -> free(req);
427 * (req is now first on the slab freelist)
428 * active.request = NULL
430 * req = new submission on a new object
433 * To prevent the request from being reused whilst the caller
434 * uses it, we take a reference like normal. Whilst acquiring
435 * the reference we check that it is not in a destroyed state
436 * (refcnt == 0). That prevents the request being reallocated
437 * whilst the caller holds on to it. To check that the request
438 * was not reallocated as we acquired the reference we have to
439 * check that our request remains the active request across
440 * the lookup, in the same manner as a seqlock. The visibility
441 * of the pointer versus the reference counting is controlled
442 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
444 * In the middle of all that, we inspect whether the request is
445 * complete. Retiring is lazy so the request may be completed long
446 * before the active tracker is updated. Querying whether the
447 * request is complete is far cheaper (as it involves no locked
448 * instructions setting cachelines to exclusive) than acquiring
449 * the reference, so we do it first. The RCU read lock ensures the
450 * pointer dereference is valid, but does not ensure that the
451 * seqno nor HWS is the right one! However, if the request was
452 * reallocated, that means the active tracker's request was complete.
453 * If the new request is also complete, then both are and we can
454 * just report the active tracker is idle. If the new request is
455 * incomplete, then we acquire a reference on it and check that
456 * it remained the active request.
459 struct drm_i915_gem_request *request;
461 request = rcu_dereference(active->request);
462 if (!request || i915_gem_request_completed(request))
465 request = i915_gem_request_get_rcu(request);
467 /* What stops the following rcu_access_pointer() from occurring
468 * before the above i915_gem_request_get_rcu()? If we were
469 * to read the value before pausing to get the reference to
470 * the request, we may not notice a change in the active
473 * The rcu_access_pointer() is a mere compiler barrier, which
474 * means both the CPU and compiler are free to perform the
475 * memory read without constraint. The compiler only has to
476 * ensure that any operations after the rcu_access_pointer()
477 * occur afterwards in program order. This means the read may
478 * be performed earlier by an out-of-order CPU, or adventurous
481 * The atomic operation at the heart of
482 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
483 * atomic_inc_not_zero() which is only a full memory barrier
484 * when successful. That is, if i915_gem_request_get_rcu()
485 * returns the request (and so with the reference counted
486 * incremented) then the following read for rcu_access_pointer()
487 * must occur after the atomic operation and so confirm
488 * that this request is the one currently being tracked.
490 if (!request || request == rcu_access_pointer(active->request))
491 return rcu_pointer_handoff(request);
493 i915_gem_request_put(request);
498 * i915_gem_active_get_unlocked - return a reference to the active request
499 * @active - the active tracker
501 * i915_gem_active_get_unlocked() returns a reference to the active request,
502 * or NULL if the active tracker is idle. The reference is obtained under RCU,
503 * so no locking is required by the caller.
505 * The reference should be freed with i915_gem_request_put().
507 static inline struct drm_i915_gem_request *
508 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
510 struct drm_i915_gem_request *request;
513 request = __i915_gem_active_get_rcu(active);
520 * i915_gem_active_isset - report whether the active tracker is assigned
521 * @active - the active tracker
523 * i915_gem_active_isset() returns true if the active tracker is currently
524 * assigned to a request. Due to the lazy retiring, that request may be idle
525 * and this may report stale information.
528 i915_gem_active_isset(const struct i915_gem_active *active)
530 return rcu_access_pointer(active->request);
534 * i915_gem_active_is_idle - report whether the active tracker is idle
535 * @active - the active tracker
537 * i915_gem_active_is_idle() returns true if the active tracker is currently
538 * unassigned or if the request is complete (but not yet retired). Requires
539 * the caller to hold struct_mutex (but that can be relaxed if desired).
542 i915_gem_active_is_idle(const struct i915_gem_active *active,
545 return !i915_gem_active_peek(active, mutex);
549 * i915_gem_active_wait - waits until the request is completed
550 * @active - the active request on which to wait
552 * i915_gem_active_wait() waits until the request is completed before
553 * returning. Note that it does not guarantee that the request is
554 * retired first, see i915_gem_active_retire().
556 * i915_gem_active_wait() returns immediately if the active
557 * request is already complete.
559 static inline int __must_check
560 i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
562 struct drm_i915_gem_request *request;
564 request = i915_gem_active_peek(active, mutex);
568 return i915_wait_request(request, true, NULL, NULL);
572 * i915_gem_active_wait_unlocked - waits until the request is completed
573 * @active - the active request on which to wait
574 * @interruptible - whether the wait can be woken by a userspace signal
575 * @timeout - how long to wait at most
576 * @rps - userspace client to charge for a waitboost
578 * i915_gem_active_wait_unlocked() waits until the request is completed before
579 * returning, without requiring any locks to be held. Note that it does not
580 * retire any requests before returning.
582 * This function relies on RCU in order to acquire the reference to the active
583 * request without holding any locks. See __i915_gem_active_get_rcu() for the
584 * glory details on how that is managed. Once the reference is acquired, we
585 * can then wait upon the request, and afterwards release our reference,
586 * free of any locking.
588 * This function wraps i915_wait_request(), see it for the full details on
591 * Returns 0 if successful, or a negative error code.
594 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
597 struct intel_rps_client *rps)
599 struct drm_i915_gem_request *request;
602 request = i915_gem_active_get_unlocked(active);
604 ret = i915_wait_request(request, interruptible, timeout, rps);
605 i915_gem_request_put(request);
612 * i915_gem_active_retire - waits until the request is retired
613 * @active - the active request on which to wait
615 * i915_gem_active_retire() waits until the request is completed,
616 * and then ensures that at least the retirement handler for this
617 * @active tracker is called before returning. If the @active
618 * tracker is idle, the function returns immediately.
620 static inline int __must_check
621 i915_gem_active_retire(struct i915_gem_active *active,
624 struct drm_i915_gem_request *request;
627 request = rcu_dereference_protected(active->request,
628 lockdep_is_held(mutex));
632 ret = i915_wait_request(request, true, NULL, NULL);
636 list_del_init(&active->link);
637 RCU_INIT_POINTER(active->request, NULL);
639 active->retire(active, request);
644 /* Convenience functions for peeking at state inside active's request whilst
645 * guarded by the struct_mutex.
648 static inline uint32_t
649 i915_gem_active_get_seqno(const struct i915_gem_active *active,
652 return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
655 static inline struct intel_engine_cs *
656 i915_gem_active_get_engine(const struct i915_gem_active *active,
659 return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
662 #define for_each_active(mask, idx) \
663 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
665 #endif /* I915_GEM_REQUEST_H */