]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem_request.c
drm/i915: Wait first for submission, before waiting for request completion
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_request.c
index 64c370681a813ea33a1d80b478bd184c08519fd0..03ae85a1eefb018720c4cb66cfbbece31b1d3c81 100644 (file)
  */
 
 #include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
 
 #include "i915_drv.h"
 
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
        return "i915";
 }
 
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
        /* Timelines are bound by eviction to a VM. However, since
         * we only have a global seqno at the moment, we only have
@@ -39,15 +40,15 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
         * multiple execution contexts (fence contexts) as we allow
         * engines within a single timeline to execute in parallel.
         */
-       return "global";
+       return to_request(fence)->timeline->common->name;
 }
 
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
 {
        return i915_gem_request_completed(to_request(fence));
 }
 
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
        if (i915_fence_signaled(fence))
                return false;
@@ -56,55 +57,33 @@ static bool i915_fence_enable_signaling(struct fence *fence)
        return true;
 }
 
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
                                   bool interruptible,
-                                  signed long timeout_jiffies)
+                                  signed long timeout)
 {
-       s64 timeout_ns, *timeout;
-       int ret;
-
-       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
-               timeout_ns = jiffies_to_nsecs(timeout_jiffies);
-               timeout = &timeout_ns;
-       } else {
-               timeout = NULL;
-       }
-
-       ret = i915_wait_request(to_request(fence),
-                               interruptible, timeout,
-                               NO_WAITBOOST);
-       if (ret == -ETIME)
-               return 0;
-
-       if (ret < 0)
-               return ret;
-
-       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
-               timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
-       return timeout_jiffies;
+       return i915_wait_request(to_request(fence), interruptible, timeout);
 }
 
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
 {
        snprintf(str, size, "%u", fence->seqno);
 }
 
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
                                          int size)
 {
        snprintf(str, size, "%u",
                 intel_engine_get_seqno(to_request(fence)->engine));
 }
 
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
 {
        struct drm_i915_gem_request *req = to_request(fence);
 
        kmem_cache_free(req->i915->requests, req);
 }
 
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
        .get_driver_name = i915_fence_get_driver_name,
        .get_timeline_name = i915_fence_get_timeline_name,
        .enable_signaling = i915_fence_enable_signaling,
@@ -164,8 +143,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
        struct i915_gem_active *active, *next;
 
+       lockdep_assert_held(&request->i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915_gem_request_completed(request));
+
        trace_i915_gem_request_retire(request);
-       list_del(&request->link);
+       list_del_init(&request->link);
 
        /* We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
@@ -214,6 +196,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        }
 
        i915_gem_context_put(request->ctx);
+
+       dma_fence_signal(&request->fence);
        i915_gem_request_put(request);
 }
 
@@ -223,10 +207,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
        struct drm_i915_gem_request *tmp;
 
        lockdep_assert_held(&req->i915->drm.struct_mutex);
-       GEM_BUG_ON(list_empty(&req->link));
+       if (list_empty(&req->link))
+               return;
 
        do {
-               tmp = list_first_entry(&engine->request_list,
+               tmp = list_first_entry(&engine->timeline->requests,
                                       typeof(*tmp), link);
 
                i915_gem_request_retire(tmp);
@@ -253,71 +238,101 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *dev_priv,
+                                     u32 seqno)
 {
+       struct i915_gem_timeline *timeline = &dev_priv->gt.global_timeline;
        struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        int ret;
 
        /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine,
-                                       I915_WAIT_INTERRUPTIBLE |
-                                       I915_WAIT_LOCKED);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
+       if (ret)
+               return ret;
+
        i915_gem_retire_requests(dev_priv);
 
        /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+       if (!i915_seqno_passed(seqno, timeline->next_seqno)) {
                while (intel_kick_waiters(dev_priv) ||
                       intel_kick_signalers(dev_priv))
                        yield();
+               yield();
        }
 
        /* Finally reset hw state */
-       for_each_engine(engine, dev_priv)
-               intel_engine_init_seqno(engine, seqno);
+       for_each_engine(engine, dev_priv, id)
+               intel_engine_init_global_seqno(engine, seqno);
 
        return 0;
 }
 
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
        if (seqno == 0)
                return -EINVAL;
 
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+       ret = i915_gem_init_global_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
 
-       dev_priv->next_seqno = seqno;
+       dev_priv->gt.global_timeline.next_seqno = seqno;
        return 0;
 }
 
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static int i915_gem_get_global_seqno(struct drm_i915_private *dev_priv,
+                                    u32 *seqno)
 {
+       struct i915_gem_timeline *tl = &dev_priv->gt.global_timeline;
+
        /* reserve 0 for non-seqno */
-       if (unlikely(dev_priv->next_seqno == 0)) {
+       if (unlikely(tl->next_seqno == 0)) {
                int ret;
 
-               ret = i915_gem_init_seqno(dev_priv, 0);
+               ret = i915_gem_init_global_seqno(dev_priv, 0);
                if (ret)
                        return ret;
 
-               dev_priv->next_seqno = 1;
+               tl->next_seqno = 1;
        }
 
-       *seqno = dev_priv->next_seqno++;
+       *seqno = tl->next_seqno++;
        return 0;
 }
 
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+       struct drm_i915_gem_request *request =
+               container_of(fence, typeof(*request), submit);
+       struct intel_engine_cs *engine = request->engine;
+
+       /* Will be called from irq-context when using foreign DMA fences */
+
+       switch (state) {
+       case FENCE_COMPLETE:
+               engine->timeline->last_submitted_seqno = request->fence.seqno;
+               engine->submit_request(request);
+               break;
+
+       case FENCE_FREE:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_request_alloc - allocate a request structure
  *
@@ -348,7 +363,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                return ERR_PTR(ret);
 
        /* Move the oldest request to the slab-cache (if not in use!) */
-       req = list_first_entry_or_null(&engine->request_list,
+       req = list_first_entry_or_null(&engine->timeline->requests,
                                       typeof(*req), link);
        if (req && i915_gem_request_completed(req))
                i915_gem_request_retire(req);
@@ -366,8 +381,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * The reference count is incremented atomically. If it is zero,
         * the lookup knows the request is unallocated and complete. Otherwise,
         * it is either still in use, or has been reallocated and reset
-        * with fence_init(). This increment is safe for release as we check
-        * that the request we have a reference to and matches the active
+        * with dma_fence_init(). This increment is safe for release as we
+        * check that the request we have a reference to and matches the active
         * request.
         *
         * Before we increment the refcount, we chase the request->engine
@@ -385,16 +400,20 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (!req)
                return ERR_PTR(-ENOMEM);
 
-       ret = i915_gem_get_seqno(dev_priv, &seqno);
+       ret = i915_gem_get_global_seqno(dev_priv, &seqno);
        if (ret)
                goto err;
 
+       req->timeline = engine->timeline;
+
        spin_lock_init(&req->lock);
-       fence_init(&req->fence,
-                  &i915_fence_ops,
-                  &req->lock,
-                  engine->fence_context,
-                  seqno);
+       dma_fence_init(&req->fence,
+                      &i915_fence_ops,
+                      &req->lock,
+                      req->timeline->fence_context,
+                      seqno);
+
+       i915_sw_fence_init(&req->submit, submit_notify);
 
        INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
@@ -438,6 +457,156 @@ err:
        return ERR_PTR(ret);
 }
 
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+                              struct drm_i915_gem_request *from)
+{
+       int idx, ret;
+
+       GEM_BUG_ON(to == from);
+
+       if (to->timeline == from->timeline)
+               return 0;
+
+       if (to->engine == from->engine) {
+               ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+                                                      &from->submit,
+                                                      GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
+       idx = intel_engine_sync_index(from->engine, to->engine);
+       if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+               return 0;
+
+       trace_i915_gem_ring_sync_to(to, from);
+       if (!i915.semaphores) {
+               if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+                       ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                           &from->fence, 0,
+                                                           GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
+       } else {
+               ret = to->engine->semaphore.sync_to(to, from);
+               if (ret)
+                       return ret;
+       }
+
+       from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+       return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+                                struct dma_fence *fence)
+{
+       struct dma_fence_array *array;
+       int ret;
+       int i;
+
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               return 0;
+
+       if (dma_fence_is_i915(fence))
+               return i915_gem_request_await_request(req, to_request(fence));
+
+       if (!dma_fence_is_array(fence)) {
+               ret = i915_sw_fence_await_dma_fence(&req->submit,
+                                                   fence, I915_FENCE_TIMEOUT,
+                                                   GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
+       /* Note that if the fence-array was created in signal-on-any mode,
+        * we should *not* decompose it into its individual fences. However,
+        * we don't currently store which mode the fence-array is operating
+        * in. Fortunately, the only user of signal-on-any is private to
+        * amdgpu and we should not see any incoming fence-array from
+        * sync-file being in signal-on-any mode.
+        */
+
+       array = to_dma_fence_array(fence);
+       for (i = 0; i < array->num_fences; i++) {
+               struct dma_fence *child = array->fences[i];
+
+               if (dma_fence_is_i915(child))
+                       ret = i915_gem_request_await_request(req,
+                                                            to_request(child));
+               else
+                       ret = i915_sw_fence_await_dma_fence(&req->submit,
+                                                           child, I915_FENCE_TIMEOUT,
+                                                           GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+                             struct drm_i915_gem_object *obj,
+                             bool write)
+{
+       struct dma_fence *excl;
+       int ret = 0;
+
+       if (write) {
+               struct dma_fence **shared;
+               unsigned int count, i;
+
+               ret = reservation_object_get_fences_rcu(obj->resv,
+                                                       &excl, &count, &shared);
+               if (ret)
+                       return ret;
+
+               for (i = 0; i < count; i++) {
+                       ret = i915_gem_request_await_dma_fence(to, shared[i]);
+                       if (ret)
+                               break;
+
+                       dma_fence_put(shared[i]);
+               }
+
+               for (; i < count; i++)
+                       dma_fence_put(shared[i]);
+               kfree(shared);
+       } else {
+               excl = reservation_object_get_excl_rcu(obj->resv);
+       }
+
+       if (excl) {
+               if (ret == 0)
+                       ret = i915_gem_request_await_dma_fence(to, excl);
+
+               dma_fence_put(excl);
+       }
+
+       return ret;
+}
+
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
@@ -468,10 +637,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
+       struct intel_timeline *timeline = request->timeline;
+       struct drm_i915_gem_request *prev;
        u32 request_start;
        u32 reserved_tail;
        int ret;
 
+       lockdep_assert_held(&request->i915->drm.struct_mutex);
+       trace_i915_gem_request_add(request);
+
        /*
         * To ensure that this call will not fail, space for its emissions
         * should already have been reserved in the ring buffer. Let the ring
@@ -495,20 +669,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
                WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
        }
 
-       trace_i915_gem_request_add(request);
-
-       /* Seal the request and mark it as pending execution. Note that
-        * we may inspect this state, without holding any locks, during
-        * hangcheck. Hence we apply the barrier to ensure that we do not
-        * see a more recent value in the hws than we are tracking.
-        */
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       engine->last_submitted_seqno = request->fence.seqno;
-       i915_gem_active_set(&engine->last_request, request);
-       list_add_tail(&request->link, &engine->request_list);
-       list_add_tail(&request->ring_link, &ring->request_list);
-
        /* Record the position of the start of the breadcrumb so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
@@ -529,8 +689,30 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);
 
+       /* Seal the request and mark it as pending execution. Note that
+        * we may inspect this state, without holding any locks, during
+        * hangcheck. Hence we apply the barrier to ensure that we do not
+        * see a more recent value in the hws than we are tracking.
+        */
+
+       prev = i915_gem_active_raw(&timeline->last_request,
+                                  &request->i915->drm.struct_mutex);
+       if (prev)
+               i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+                                            &request->submitq);
+
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = timeline->last_pending_seqno;
+       timeline->last_pending_seqno = request->fence.seqno;
+       i915_gem_active_set(&timeline->last_request, request);
+       list_add_tail(&request->link, &timeline->requests);
+       list_add_tail(&request->ring_link, &ring->request_list);
+
        i915_gem_mark_busy(engine);
-       engine->submit_request(request);
+
+       local_bh_disable();
+       i915_sw_fence_commit(&request->submit);
+       local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 }
 
 static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
@@ -607,76 +789,100 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
        return false;
 }
 
+static long
+__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
+                              unsigned int flags,
+                              long timeout)
+{
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+       DEFINE_WAIT(reset);
+       DEFINE_WAIT(wait);
+
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(q, &reset);
+
+       do {
+               prepare_to_wait(&request->submit.wait, &wait, state);
+
+               if (i915_sw_fence_done(&request->submit))
+                       break;
+
+               if (flags & I915_WAIT_LOCKED &&
+                   i915_reset_in_progress(&request->i915->gpu_error)) {
+                       __set_current_state(TASK_RUNNING);
+                       i915_reset(request->i915);
+                       reset_wait_queue(q, &reset);
+                       continue;
+               }
+
+               if (signal_pending_state(state, current)) {
+                       timeout = -ERESTARTSYS;
+                       break;
+               }
+
+               timeout = io_schedule_timeout(timeout);
+       } while (timeout);
+       finish_wait(&request->submit.wait, &wait);
+
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(q, &reset);
+
+       return timeout;
+}
+
 /**
  * i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
  * @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
  *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
  *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
  */
-int i915_wait_request(struct drm_i915_gem_request *req,
-                     unsigned int flags,
-                     s64 *timeout,
-                     struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+                      unsigned int flags,
+                      long timeout)
 {
        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
        struct intel_wait wait;
-       unsigned long timeout_remain;
-       int ret = 0;
 
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
-       GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+       GEM_BUG_ON(debug_locks &&
+                  !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
                   !!(flags & I915_WAIT_LOCKED));
 #endif
+       GEM_BUG_ON(timeout < 0);
 
        if (i915_gem_request_completed(req))
-               return 0;
-
-       timeout_remain = MAX_SCHEDULE_TIMEOUT;
-       if (timeout) {
-               if (WARN_ON(*timeout < 0))
-                       return -EINVAL;
+               return timeout;
 
-               if (*timeout == 0)
-                       return -ETIME;
-
-               /* Record current time in case interrupted, or wedged */
-               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-               *timeout += ktime_get_raw_ns();
-       }
+       if (!timeout)
+               return -ETIME;
 
        trace_i915_gem_request_wait_begin(req);
 
-       /* This client is about to stall waiting for the GPU. In many cases
-        * this is undesirable and limits the throughput of the system, as
-        * many clients cannot continue processing user input/output whilst
-        * blocked. RPS autotuning may take tens of milliseconds to respond
-        * to the GPU load and thus incurs additional latency for the client.
-        * We can circumvent that by promoting the GPU frequency to maximum
-        * before we wait. This makes the GPU throttle up much more quickly
-        * (good for benchmarks and user experience, e.g. window animations),
-        * but at a cost of spending more power processing the workload
-        * (bad for battery). Not all clients even want their results
-        * immediately and for them we should just let the GPU select its own
-        * frequency to maximise efficiency. To prevent a single client from
-        * forcing the clocks too high for the whole system, we only allow
-        * each client to waitboost once in a busy period.
-        */
-       if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
-               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+       if (!i915_sw_fence_done(&req->submit)) {
+               timeout = __i915_request_wait_for_submit(req, flags, timeout);
+               if (timeout < 0)
+                       goto complete;
+
+               GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+       }
 
        /* Optimistic short spin before touching IRQs */
        if (i915_spin_request(req, state, 5))
@@ -696,16 +902,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 
        for (;;) {
                if (signal_pending_state(state, current)) {
-                       ret = -ERESTARTSYS;
+                       timeout = -ERESTARTSYS;
                        break;
                }
 
-               timeout_remain = io_schedule_timeout(timeout_remain);
-               if (timeout_remain == 0) {
-                       ret = -ETIME;
+               if (!timeout) {
+                       timeout = -ETIME;
                        break;
                }
 
+               timeout = io_schedule_timeout(timeout);
+
                if (intel_wait_complete(&wait))
                        break;
 
@@ -752,47 +959,15 @@ wakeup:
 complete:
        trace_i915_gem_request_wait_end(req);
 
-       if (timeout) {
-               *timeout -= ktime_get_raw_ns();
-               if (*timeout < 0)
-                       *timeout = 0;
-
-               /*
-                * Apparently ktime isn't accurate enough and occasionally has a
-                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-                * things up to make the test happy. We allow up to 1 jiffy.
-                *
-                * This is a regrssion from the timespec->ktime conversion.
-                */
-               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-                       *timeout = 0;
-       }
-
-       if (IS_RPS_USER(rps) &&
-           req->fence.seqno == req->engine->last_submitted_seqno) {
-               /* The GPU is now idle and this client has stalled.
-                * Since no other client has submitted a request in the
-                * meantime, assume that this client is the only one
-                * supplying work to the GPU but is unable to keep that
-                * work supplied because it is waiting. Since the GPU is
-                * then never kept fully busy, RPS autoclocking will
-                * keep the clocks relatively low, causing further delays.
-                * Compensate by giving the synchronous client credit for
-                * a waitboost next time.
-                */
-               spin_lock(&req->i915->rps.client_lock);
-               list_del_init(&rps->link);
-               spin_unlock(&req->i915->rps.client_lock);
-       }
-
-       return ret;
+       return timeout;
 }
 
 static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request, *next;
 
-       list_for_each_entry_safe(request, next, &engine->request_list, link) {
+       list_for_each_entry_safe(request, next,
+                                &engine->timeline->requests, link) {
                if (!i915_gem_request_completed(request))
                        return false;