X-Git-Url: https://git.kernelconcepts.de/?p=karo-tx-linux.git;a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fintel_ringbuffer.c;h=8527ea05124be9d2566a23e974d60136fe478398;hp=664118d8c1d6426353ed97bb61b1113369a7678a;hb=e13af9a8340685cfe25d0c9f708da7121e0f51dd;hpb=ee114b97e67b2a572f94982567a21ac4ee17c133 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..8527ea05124b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_offset && + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring) if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_offset; + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) { ret = -ENOMEM; @@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); @@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); @@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) gen6_gt_force_wake_get(dev_priv); spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | @@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); @@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (ring->irq_refcount.pm++ == 0) { + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~ring->irq_enable_mask); I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); POSTING_READ(GEN6_PMIMR); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (--ring->irq_refcount.pm == 0) { + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { u32 pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE_IMR(ring, ~0); I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); POSTING_READ(GEN6_PMIMR); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static int @@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } else { struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = obj->gtt_offset; + u32 cs_offset = i915_gem_obj_ggtt_offset(obj); if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring) goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; @@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); @@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;