]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/i915: Derive GEM requests from dma-fence
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 20 Jul 2016 08:21:11 +0000 (09:21 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 20 Jul 2016 08:29:53 +0000 (09:29 +0100)
dma-buf provides a generic fence class for interoperation between
drivers. Internally we use the request structure as a fence, and so with
only a little bit of interfacing we can rebase those requests on top of
dma-buf fences. This will allow us, in the future, to pass those fences
back to userspace or between drivers.

v2: The fence_context needs to be globally unique, not just unique to
this device.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1469002875-2335-4-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 90aef454019393b05a1474c4f5af7ea43992a530..55fd3d9cc44893d77626d78c9b5c4d105a2cdea0 100644 (file)
@@ -768,7 +768,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                        if (req->pid)
                                task = pid_task(req->pid, PIDTYPE_PID);
                        seq_printf(m, "    %x @ %d: %s [%d]\n",
-                                  req->seqno,
+                                  req->fence.seqno,
                                   (int) (jiffies - req->emitted_jiffies),
                                   task ? task->comm : "<unknown>",
                                   task ? task->pid : -1);
index 5cbb11ece60a07d598ec238f0fe7fc9af1cfc0a2..6528536878f2995645431f407d3bbaadc3eefd43 100644 (file)
 
 #include "i915_drv.h"
 
+static const char *i915_fence_get_driver_name(struct fence *fence)
+{
+       return "i915";
+}
+
+static const char *i915_fence_get_timeline_name(struct fence *fence)
+{
+       /* Timelines are bound by eviction to a VM. However, since
+        * we only have a global seqno at the moment, we only have
+        * a single timeline. Note that each timeline will have
+        * multiple execution contexts (fence contexts) as we allow
+        * engines within a single timeline to execute in parallel.
+        */
+       return "global";
+}
+
+static bool i915_fence_signaled(struct fence *fence)
+{
+       return i915_gem_request_completed(to_request(fence));
+}
+
+static bool i915_fence_enable_signaling(struct fence *fence)
+{
+       if (i915_fence_signaled(fence))
+               return false;
+
+       intel_engine_enable_signaling(to_request(fence));
+       return true;
+}
+
+static signed long i915_fence_wait(struct fence *fence,
+                                  bool interruptible,
+                                  signed long timeout_jiffies)
+{
+       s64 timeout_ns, *timeout;
+       int ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+               timeout_ns = jiffies_to_nsecs(timeout_jiffies);
+               timeout = &timeout_ns;
+       } else {
+               timeout = NULL;
+       }
+
+       ret = __i915_wait_request(to_request(fence),
+                                 interruptible, timeout,
+                                 NULL);
+       if (ret == -ETIME)
+               return 0;
+
+       if (ret < 0)
+               return ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
+               timeout_jiffies = nsecs_to_jiffies(timeout_ns);
+
+       return timeout_jiffies;
+}
+
+static void i915_fence_value_str(struct fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%u", fence->seqno);
+}
+
+static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+                                         int size)
+{
+       snprintf(str, size, "%u",
+                intel_engine_get_seqno(to_request(fence)->engine));
+}
+
+static void i915_fence_release(struct fence *fence)
+{
+       struct drm_i915_gem_request *req = to_request(fence);
+
+       kmem_cache_free(req->i915->requests, req);
+}
+
+const struct fence_ops i915_fence_ops = {
+       .get_driver_name = i915_fence_get_driver_name,
+       .get_timeline_name = i915_fence_get_timeline_name,
+       .enable_signaling = i915_fence_enable_signaling,
+       .signaled = i915_fence_signaled,
+       .wait = i915_fence_wait,
+       .release = i915_fence_release,
+       .fence_value_str = i915_fence_value_str,
+       .timeline_value_str = i915_fence_timeline_value_str,
+};
+
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file)
 {
@@ -211,6 +300,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        struct drm_i915_private *dev_priv = engine->i915;
        unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
+       u32 seqno;
        int ret;
 
        if (!req_out)
@@ -238,11 +328,17 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (!req)
                return -ENOMEM;
 
-       ret = i915_gem_get_seqno(dev_priv, &req->seqno);
+       ret = i915_gem_get_seqno(dev_priv, &seqno);
        if (ret)
                goto err;
 
-       kref_init(&req->ref);
+       spin_lock_init(&req->lock);
+       fence_init(&req->fence,
+                  &i915_fence_ops,
+                  &req->lock,
+                  engine->fence_context,
+                  seqno);
+
        req->i915 = dev_priv;
        req->engine = engine;
        req->ctx = ctx;
@@ -385,7 +481,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         */
        request->emitted_jiffies = jiffies;
        request->previous_seqno = engine->last_submitted_seqno;
-       smp_store_mb(engine->last_submitted_seqno, request->seqno);
+       smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
        list_add_tail(&request->list, &engine->request_list);
 
        /* Record the position of the start of the request so that
@@ -556,7 +652,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        set_current_state(state);
        add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-       intel_wait_init(&wait, req->seqno);
+       intel_wait_init(&wait, req->fence.seqno);
        if (intel_engine_add_wait(req->engine, &wait))
                /* In order to check that we haven't missed the interrupt
                 * as we enabled it, we need to kick ourselves to do a
@@ -617,7 +713,7 @@ complete:
                        *timeout = 0;
        }
 
-       if (rps && req->seqno == req->engine->last_submitted_seqno) {
+       if (rps && req->fence.seqno == req->engine->last_submitted_seqno) {
                /* The GPU is now idle and this client has stalled.
                 * Since no other client has submitted a request in the
                 * meantime, assume that this client is the only one
@@ -657,10 +753,3 @@ int i915_wait_request(struct drm_i915_gem_request *req)
 
        return 0;
 }
-
-void i915_gem_request_free(struct kref *req_ref)
-{
-       struct drm_i915_gem_request *req =
-               container_of(req_ref, typeof(*req), ref);
-       kmem_cache_free(req->i915->requests, req);
-}
index ea700befcc28fdd5552e2a8c9b188222b283b54e..6f2c820785f3a6522ca64eceaf026fdb4ac30b6f 100644 (file)
 #ifndef I915_GEM_REQUEST_H
 #define I915_GEM_REQUEST_H
 
+#include <linux/fence.h>
+
+#include "i915_gem.h"
+
 /**
  * Request queue structure.
  *
  * emission time to be associated with the request for tracking how far ahead
  * of the GPU the submission is.
  *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
+ * The requests are reference counted.
  */
 struct drm_i915_gem_request {
-       struct kref ref;
+       struct fence fence;
+       spinlock_t lock;
 
        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
@@ -66,12 +70,6 @@ struct drm_i915_gem_request {
         */
        u32 previous_seqno;
 
-       /** GEM sequence number associated with this request,
-        * when the HWS breadcrumb is equal or greater than this the GPU
-        * has finished processing this request.
-        */
-       u32 seqno;
-
        /** Position in the ringbuffer of the start of the request */
        u32 head;
 
@@ -140,10 +138,16 @@ struct drm_i915_gem_request {
        unsigned int ctx_hw_id;
 };
 
+extern const struct fence_ops i915_fence_ops;
+
+static inline bool fence_is_i915(struct fence *fence)
+{
+       return fence->ops == &i915_fence_ops;
+}
+
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
                       struct i915_gem_context *ctx);
-void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file);
 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
@@ -151,7 +155,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
 static inline u32
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
 {
-       return req ? req->seqno : 0;
+       return req ? req->fence.seqno : 0;
 }
 
 static inline struct intel_engine_cs *
@@ -160,18 +164,25 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req)
        return req ? req->engine : NULL;
 }
 
+static inline struct drm_i915_gem_request *
+to_request(struct fence *fence)
+{
+       /* We assume that NULL fence/request are interoperable */
+       BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+       GEM_BUG_ON(fence && !fence_is_i915(fence));
+       return container_of(fence, struct drm_i915_gem_request, fence);
+}
+
 static inline struct drm_i915_gem_request *
 i915_gem_request_reference(struct drm_i915_gem_request *req)
 {
-       if (req)
-               kref_get(&req->ref);
-       return req;
+       return to_request(fence_get(&req->fence));
 }
 
 static inline void
 i915_gem_request_unreference(struct drm_i915_gem_request *req)
 {
-       kref_put(&req->ref, i915_gem_request_free);
+       fence_put(&req->fence);
 }
 
 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -223,7 +234,7 @@ static inline bool
 i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
        return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-                                req->seqno);
+                                req->fence.seqno);
 }
 
 bool __i915_spin_request(const struct drm_i915_gem_request *request,
index 9d73d2216adc6925f09b97f76a71690daf213374..6daaf4ecd2da501827a203c255dc4882d316e5fc 100644 (file)
@@ -1182,7 +1182,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                        }
 
                        erq = &error->ring[i].requests[count++];
-                       erq->seqno = request->seqno;
+                       erq->seqno = request->fence.seqno;
                        erq->jiffies = request->emitted_jiffies;
                        erq->tail = request->postfix;
                }
index 2112e029db6a929ea7f55c4266ffbe24af860d83..1cc5de129e769670396b255a64b781c0a5b8184d 100644 (file)
@@ -506,7 +506,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
                                                             rq->engine);
 
        wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
-       wqi->fence_id = rq->seqno;
+       wqi->fence_id = rq->fence.seqno;
 
        kunmap_atomic(base);
 }
@@ -601,7 +601,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
                client->b_fail += 1;
 
        guc->submissions[engine_id] += 1;
-       guc->last_seqno[engine_id] = rq->seqno;
+       guc->last_seqno[engine_id] = rq->fence.seqno;
 
        return b_ret;
 }
index 534154e05fbe47b70870e4149e8265caf4a89572..007112d1e04991c775c6208087639b7823555732 100644 (file)
@@ -465,7 +465,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
                           __entry->dev = from->i915->drm.primary->index;
                           __entry->sync_from = from->id;
                           __entry->sync_to = to_req->engine->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->seqno = req->fence.seqno;
                           ),
 
            TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -488,9 +488,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           __entry->flags = flags;
-                          intel_engine_enable_signaling(req);
+                          fence_enable_sw_signaling(&req->fence);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -533,7 +533,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -595,7 +595,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
            TP_fast_assign(
                           __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
-                          __entry->seqno = req->seqno;
+                          __entry->seqno = req->fence.seqno;
                           __entry->blocking =
                                     mutex_is_locked(&req->i915->drm.struct_mutex);
                           ),
index b074f3d6d127fded56cdc49ea9326ced2407d51a..32ada41dedfb70dea13d0988fd618aab259bfbc8 100644 (file)
@@ -436,6 +436,7 @@ static int intel_breadcrumbs_signaler(void *arg)
                         */
                        intel_engine_remove_wait(engine,
                                                 &request->signaling.wait);
+                       fence_signal(&request->fence);
 
                        /* Find the next oldest signal. Note that as we have
                         * not been holding the lock, another client may
@@ -482,7 +483,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
        }
 
        request->signaling.wait.tsk = b->signaler;
-       request->signaling.wait.seqno = request->seqno;
+       request->signaling.wait.seqno = request->fence.seqno;
        i915_gem_request_reference(request);
 
        /* First add ourselves into the list of waiters, but register our
@@ -504,8 +505,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
        p = &b->signals.rb_node;
        while (*p) {
                parent = *p;
-               if (i915_seqno_passed(request->seqno,
-                                     to_signaler(parent)->seqno)) {
+               if (i915_seqno_passed(request->fence.seqno,
+                                     to_signaler(parent)->fence.seqno)) {
                        p = &parent->rb_right;
                        first = false;
                } else {
index e3c9f04ea51ed2315bb139b3d0b5820e6994aa39..f4a35ec78481f850564b20d1db215aeb87dc2b09 100644 (file)
@@ -182,6 +182,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        INIT_LIST_HEAD(&engine->execlist_queue);
        spin_lock_init(&engine->execlist_lock);
 
+       engine->fence_context = fence_context_alloc(1);
+
        intel_engine_init_hangcheck(engine);
        i915_gem_batch_pool_init(&engine->i915->drm, &engine->batch_pool);
 }
index 2e670f15881c0910812da2f432d11469a5c8194c..860dba2cf7b3d96b346c20cb43f7c5fbf49107c3 100644 (file)
@@ -1803,7 +1803,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
                                intel_hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, request->seqno);
+       intel_logical_ring_emit(ringbuf, request->fence.seqno);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
index 94c8ef461721b8a9860529a91e18d81c06724464..af0bd71e3a36c270b59c20e2c253de0da06cd47f 100644 (file)
@@ -1348,7 +1348,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                                           PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller_req->seqno);
+               intel_ring_emit(signaller, signaller_req->fence.seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1386,7 +1386,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller_req->seqno);
+               intel_ring_emit(signaller, signaller_req->fence.seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
@@ -1419,7 +1419,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                if (i915_mmio_reg_valid(mbox_reg)) {
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, signaller_req->seqno);
+                       intel_ring_emit(signaller, signaller_req->fence.seqno);
                }
        }
 
@@ -1455,7 +1455,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, req->fence.seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
@@ -1704,7 +1704,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, req->fence.seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
index df7587ab1ba78f288c918f69694553792297fc74..5cbafc00bfd5b42b55250061693669b1b0ab10b2 100644 (file)
@@ -146,6 +146,7 @@ struct intel_engine_cs {
        unsigned int exec_id;
        unsigned int hw_id;
        unsigned int guc_id; /* XXX same as hw_id? */
+       u64 fence_context;
        u32             mmio_base;
        unsigned int irq_shift;
        struct intel_ringbuffer *buffer;