]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Preallocate next seqno before touching the ring
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 9be450e7c54fbc70ae5bebe31dd7a04c321b2d36..3b9b250ceac44f3cf8d49d7a6eed758e79edb27f 100644 (file)
@@ -1857,11 +1857,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *ring,
-                              u32 seqno)
+                              struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
        obj->ring = ring;
@@ -1922,26 +1922,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        WARN_ON(i915_verify_lists(dev));
 }
 
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 seqno = dev_priv->next_seqno;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int ret, i, j;
 
-       /* reserve 0 for non-seqno */
-       if (++dev_priv->next_seqno == 0)
-               dev_priv->next_seqno = 1;
+       /* The hardware uses various monotonic 32-bit counters, if we
+        * detect that they will wraparound we need to idle the GPU
+        * and reset those counters.
+        */
+       ret = 0;
+       for_each_ring(ring, dev_priv, i) {
+               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+                       ret |= ring->sync_seqno[j] != 0;
+       }
+       if (ret == 0)
+               return ret;
 
-       return seqno;
+       ret = i915_gpu_idle(dev);
+       if (ret)
+               return ret;
+
+       i915_gem_retire_requests(dev);
+       for_each_ring(ring, dev_priv, i) {
+               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+                       ring->sync_seqno[j] = 0;
+       }
+
+       return 0;
 }
 
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 {
-       if (ring->outstanding_lazy_request == 0)
-               ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* reserve 0 for non-seqno */
+       if (dev_priv->next_seqno == 0) {
+               int ret = i915_gem_handle_seqno_wrap(dev);
+               if (ret)
+                       return ret;
 
-       return ring->outstanding_lazy_request;
+               dev_priv->next_seqno = 1;
+       }
+
+       *seqno = dev_priv->next_seqno++;
+       return 0;
 }
 
 int
@@ -1952,7 +1980,6 @@ i915_add_request(struct intel_ring_buffer *ring,
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        u32 request_ring_position;
-       u32 seqno;
        int was_empty;
        int ret;
 
@@ -1971,7 +1998,6 @@ i915_add_request(struct intel_ring_buffer *ring,
        if (request == NULL)
                return -ENOMEM;
 
-       seqno = i915_gem_next_request_seqno(ring);
 
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
@@ -1980,15 +2006,13 @@ i915_add_request(struct intel_ring_buffer *ring,
         */
        request_ring_position = intel_ring_get_tail(ring);
 
-       ret = ring->add_request(ring, &seqno);
+       ret = ring->add_request(ring);
        if (ret) {
                kfree(request);
                return ret;
        }
 
-       trace_i915_gem_request_add(ring, seqno);
-
-       request->seqno = seqno;
+       request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
        request->tail = request_ring_position;
        request->emitted_jiffies = jiffies;
@@ -2006,6 +2030,7 @@ i915_add_request(struct intel_ring_buffer *ring,
                spin_unlock(&file_priv->mm.lock);
        }
 
+       trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
 
        if (!dev_priv->mm.suspended) {
@@ -2022,7 +2047,7 @@ i915_add_request(struct intel_ring_buffer *ring,
        }
 
        if (out_seqno)
-               *out_seqno = seqno;
+               *out_seqno = request->seqno;
        return 0;
 }
 
@@ -2120,7 +2145,6 @@ void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
        uint32_t seqno;
-       int i;
 
        if (list_empty(&ring->request_list))
                return;
@@ -2129,10 +2153,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
-       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
-               if (seqno >= ring->sync_seqno[i])
-                       ring->sync_seqno[i] = 0;
-
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;