pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
ret = 0;
if (seqno == ring->outstanding_lazy_request)
- ret = i915_add_request(ring, NULL, NULL);
+ ret = i915_add_request(ring, NULL);
return ret;
}
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->gtt_list);
+ list_del(&obj->global_list);
ops->put_pages(obj);
obj->pages = NULL;
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
- gtt_list) {
+ global_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
i915_gem_evict_everything(dev_priv->dev);
- list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+ global_list)
i915_gem_object_put_pages(obj);
}
if (ret)
return ret;
- list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
return 0;
}
-int
-i915_add_request(struct intel_ring_buffer *ring,
- struct drm_file *file,
- u32 *out_seqno)
+int __i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *obj,
+ u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
- u32 request_ring_position;
+ u32 request_ring_position, request_start;
int was_empty;
int ret;
+ request_start = intel_ring_get_tail(ring);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
+ request->head = request_start;
request->tail = request_ring_position;
+ request->ctx = ring->last_context;
+ request->batch_obj = obj;
+
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+
+ if (request->ctx)
+ i915_gem_context_reference(request->ctx);
+
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
spin_unlock(&file_priv->mm.lock);
}
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+{
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return true;
+
+ return false;
+}
+
+static bool i915_head_inside_request(const u32 acthd_unmasked,
+ const u32 request_start,
+ const u32 request_end)
+{
+ const u32 acthd = acthd_unmasked & HEAD_ADDR;
+
+ if (request_start < request_end) {
+ if (acthd >= request_start && acthd < request_end)
+ return true;
+ } else if (request_start > request_end) {
+ if (acthd >= request_start || acthd < request_end)
+ return true;
+ }
+
+ return false;
+}
+
+static bool i915_request_guilty(struct drm_i915_gem_request *request,
+ const u32 acthd, bool *inside)
+{
+ /* There is a possibility that unmasked head address
+ * pointing inside the ring, matches the batch_obj address range.
+ * However this is extremely unlikely.
+ */
+
+ if (request->batch_obj) {
+ if (i915_head_inside_object(acthd, request->batch_obj)) {
+ *inside = true;
+ return true;
+ }
+ }
+
+ if (i915_head_inside_request(acthd, request->head, request->tail)) {
+ *inside = false;
+ return true;
+ }
+
+ return false;
+}
+
+static void i915_set_reset_status(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_request *request,
+ u32 acthd)
+{
+ struct i915_ctx_hang_stats *hs = NULL;
+ bool inside, guilty;
+
+ /* Innocent until proven guilty */
+ guilty = false;
+
+ if (ring->hangcheck.action != wait &&
+ i915_request_guilty(request, acthd, &inside)) {
+ DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ ring->name,
+ inside ? "inside" : "flushing",
+ request->batch_obj ?
+ request->batch_obj->gtt_offset : 0,
+ request->ctx ? request->ctx->id : 0,
+ acthd);
+
+ guilty = true;
+ }
+
+ /* If contexts are disabled or this is the default context, use
+ * file_priv->reset_state
+ */
+ if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
+ hs = &request->ctx->hang_stats;
+ else if (request->file_priv)
+ hs = &request->file_priv->hang_stats;
+
+ if (hs) {
+ if (guilty)
+ hs->batch_active++;
+ else
+ hs->batch_pending++;
+ }
+}
+
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+
+ if (request->ctx)
+ i915_gem_context_unreference(request->ctx);
+
+ kfree(request);
+}
+
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
+ u32 completed_seqno;
+ u32 acthd;
+
+ acthd = intel_ring_get_active_head(ring);
+ completed_seqno = ring->get_seqno(ring, false);
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct drm_i915_gem_request,
list);
- list_del(&request->list);
- i915_gem_request_remove_from_client(request);
- kfree(request);
+ if (request->seqno > completed_seqno)
+ i915_set_reset_status(ring, request, acthd);
+
+ i915_gem_free_request(request);
}
while (!list_empty(&ring->active_list)) {
*/
ring->last_retired_head = request->tail;
- list_del(&request->list);
- i915_gem_request_remove_from_client(request);
- kfree(request);
+ i915_gem_free_request(request);
}
/* Move any buffers on the active list that are no longer referenced
idle = true;
for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty)
- i915_add_request(ring, NULL, NULL);
+ i915_add_request(ring, NULL);
idle &= list_empty(&ring->request_list);
}
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
+ i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
- list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
return fence - dev_priv->fence_regs;
}
+struct write_fence {
+ struct drm_device *dev;
+ struct drm_i915_gem_object *obj;
+ int fence;
+};
+
static void i915_gem_write_fence__ipi(void *data)
{
+ struct write_fence *args = data;
+
+ /* Required for SNB+ with LLC */
wbinvd();
+
+ /* Required for VLV */
+ i915_gem_write_fence(args->dev, args->fence, args->obj);
}
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fence_reg = fence_number(dev_priv, fence);
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct write_fence args = {
+ .dev = obj->base.dev,
+ .fence = fence_number(dev_priv, fence),
+ .obj = enable ? obj : NULL,
+ };
/* In order to fully serialize access to the fenced region and
* the update to the fence register we need to take extreme
* SNB+ we need to take a step further and emit an explicit wbinvd()
* on each processor in order to manually flush all memory
* transactions before updating the fence register.
+ *
+ * However, Valleyview complicates matter. There the wbinvd is
+ * insufficient and unlike SNB/IVB requires the serialising
+ * register write. (Note that that register write by itself is
+ * conversely not sufficient for SNB+.) To compromise, we do both.
*/
- if (HAS_LLC(obj->base.dev))
- on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
- i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
+ if (INTEL_INFO(args.dev)->gen >= 6)
+ on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
+ else
+ i915_gem_write_fence(args.dev, args.fence, args.obj);
if (enable) {
- obj->fence_reg = fence_reg;
+ obj->fence_reg = args.fence;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
struct drm_i915_gem_object *obj;
int err = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
if (obj->gtt_space == NULL) {
printk(KERN_ERR "object found on GTT list with no space reserved\n");
err++;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
+ size_t gtt_max = map_and_fenceable ?
+ dev_priv->gtt.mappable_end : dev_priv->gtt.total;
int ret;
fence_size = i915_gem_get_gtt_size(dev,
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->base.size >
- (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
- DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ if (obj->base.size > gtt_max) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
+ obj->base.size,
+ map_and_fenceable ? "mappable" : "total",
+ gtt_max);
return -E2BIG;
}
return -ENOMEM;
}
- search_free:
- if (map_and_fenceable)
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
- size, alignment, obj->cache_level,
- 0, dev_priv->gtt.mappable_end);
- else
- ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
- size, alignment, obj->cache_level);
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment,
+ obj->cache_level, 0, gtt_max);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
return ret;
}
- list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+ list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_space = node;
obj->map_and_fenceable = mappable && fenceable;
- i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
const struct drm_i915_gem_object_ops *ops)
{
INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
dev_priv->mm.interruptible = was_interruptible;
}
- obj->pages_pin_count = 0;
+ /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
+ * before progressing. */
+ if (obj->stolen)
+ i915_gem_object_unpin_pages(obj);
+
+ if (WARN_ON(obj->pages_pin_count))
+ obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
goto cleanup_bsd_ring;
}
+ if (HAS_VEBOX(dev)) {
+ ret = intel_init_vebox_ring_buffer(dev);
+ if (ret)
+ goto cleanup_blt_ring;
+ }
+
+
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
- goto cleanup_blt_ring;
+ goto cleanup_vebox_ring;
return 0;
+cleanup_vebox_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
}
cnt = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;