]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'drm-radeon-testing' of ../drm-radeon-next into drm-core-next
authorDave Airlie <airlied@redhat.com>
Tue, 3 Jan 2012 09:43:28 +0000 (09:43 +0000)
committerDave Airlie <airlied@redhat.com>
Tue, 3 Jan 2012 09:45:12 +0000 (09:45 +0000)
This merges the evergreen HDMI audio support.

* 'drm-radeon-testing' of ../drm-radeon-next:
  drm/radeon/kms: define TMDS/LVTM HDMI enabling bits
  drm/radeon/kms: workaround invalid AVI infoframe checksum issue
  drm/radeon/kms: setup HDMI mode on Evergreen encoders
  drm/radeon/kms: support for audio on Evergreen
  drm/radeon/kms: minor HDMI audio cleanups
  drm/radeon/kms: do not force DVI mode on DCE4 if audio is on
ridge

Conflicts:
drivers/gpu/drm/radeon/evergreen.c

1  2 
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h

index 1934728e2465b0e281e644d03d22221e0eb8f86d,a7da8d43716190774f2b6e37e3a7dc812b313b6c..ccde2c9540e29f871fecbbbe5dd0ad2337d46a2d
@@@ -40,8 -40,6 +40,8 @@@
  static void evergreen_gpu_init(struct radeon_device *rdev);
  void evergreen_fini(struct radeon_device *rdev);
  void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 +extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
 +                                   int ring, u32 cp_int_cntl);
  
  void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  {
@@@ -84,7 -82,6 +84,7 @@@ u32 evergreen_page_flip(struct radeon_d
  {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
 +      int i;
  
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
               (u32)crtc_base);
  
        /* Wait for update_pending to go high. */
 -      while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
 +      for (i = 0; i < rdev->usec_timeout; i++) {
 +              if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
 +                      break;
 +              udelay(1);
 +      }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
  
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@@ -164,57 -157,6 +164,57 @@@ int sumo_get_temp(struct radeon_device 
        return actual_temp * 1000;
  }
  
 +void sumo_pm_init_profile(struct radeon_device *rdev)
 +{
 +      int idx;
 +
 +      /* default */
 +      rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 +      rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 +      rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 +
 +      /* low,mid sh/mh */
 +      if (rdev->flags & RADEON_IS_MOBILITY)
 +              idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 +      else
 +              idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 +
 +      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +
 +      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 +
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 +
 +      /* high sh/mh */
 +      idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 +      rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
 +              rdev->pm.power_state[idx].num_clock_modes - 1;
 +
 +      rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
 +              rdev->pm.power_state[idx].num_clock_modes - 1;
 +}
 +
  void evergreen_pm_misc(struct radeon_device *rdev)
  {
        int req_ps_idx = rdev->pm.requested_power_state_index;
@@@ -1277,7 -1219,7 +1277,7 @@@ void evergreen_mc_program(struct radeon
                WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
                        rdev->mc.vram_end >> 12);
        }
 -      WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
 +      WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
        if (rdev->flags & RADEON_IS_IGP) {
                tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
                tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
   */
  void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  {
 +      struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
 +
        /* set to DX10/11 mode */
 -      radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
 -      radeon_ring_write(rdev, 1);
 +      radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
 +      radeon_ring_write(ring, 1);
        /* FIXME: implement */
 -      radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 -      radeon_ring_write(rdev,
 +      radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 +      radeon_ring_write(ring,
  #ifdef __BIG_ENDIAN
                          (2 << 0) |
  #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
 -      radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
 -      radeon_ring_write(rdev, ib->length_dw);
 +      radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
 +      radeon_ring_write(ring, ib->length_dw);
  }
  
  
@@@ -1364,73 -1304,71 +1364,73 @@@ static int evergreen_cp_load_microcode(
  
  static int evergreen_cp_start(struct radeon_device *rdev)
  {
 +      struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
        uint32_t cp_me;
  
 -      r = radeon_ring_lock(rdev, 7);
 +      r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
 -      radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
 -      radeon_ring_write(rdev, 0x1);
 -      radeon_ring_write(rdev, 0x0);
 -      radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
 -      radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
 -      radeon_ring_write(rdev, 0);
 -      radeon_ring_write(rdev, 0);
 -      radeon_ring_unlock_commit(rdev);
 +      radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
 +      radeon_ring_write(ring, 0x1);
 +      radeon_ring_write(ring, 0x0);
 +      radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
 +      radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
 +      radeon_ring_write(ring, 0);
 +      radeon_ring_write(ring, 0);
 +      radeon_ring_unlock_commit(rdev, ring);
  
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
  
 -      r = radeon_ring_lock(rdev, evergreen_default_size + 19);
 +      r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
  
        /* setup clear context state */
 -      radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 -      radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 +      radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 +      radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  
        for (i = 0; i < evergreen_default_size; i++)
 -              radeon_ring_write(rdev, evergreen_default_state[i]);
 +              radeon_ring_write(ring, evergreen_default_state[i]);
  
 -      radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 -      radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
 +      radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 +      radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  
        /* set clear context state */
 -      radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
 -      radeon_ring_write(rdev, 0);
 +      radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
 +      radeon_ring_write(ring, 0);
  
        /* SQ_VTX_BASE_VTX_LOC */
 -      radeon_ring_write(rdev, 0xc0026f00);
 -      radeon_ring_write(rdev, 0x00000000);
 -      radeon_ring_write(rdev, 0x00000000);
 -      radeon_ring_write(rdev, 0x00000000);
 +      radeon_ring_write(ring, 0xc0026f00);
 +      radeon_ring_write(ring, 0x00000000);
 +      radeon_ring_write(ring, 0x00000000);
 +      radeon_ring_write(ring, 0x00000000);
  
        /* Clear consts */
 -      radeon_ring_write(rdev, 0xc0036f00);
 -      radeon_ring_write(rdev, 0x00000bc4);
 -      radeon_ring_write(rdev, 0xffffffff);
 -      radeon_ring_write(rdev, 0xffffffff);
 -      radeon_ring_write(rdev, 0xffffffff);
 +      radeon_ring_write(ring, 0xc0036f00);
 +      radeon_ring_write(ring, 0x00000bc4);
 +      radeon_ring_write(ring, 0xffffffff);
 +      radeon_ring_write(ring, 0xffffffff);
 +      radeon_ring_write(ring, 0xffffffff);
  
 -      radeon_ring_write(rdev, 0xc0026900);
 -      radeon_ring_write(rdev, 0x00000316);
 -      radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
 -      radeon_ring_write(rdev, 0x00000010); /*  */
 +      radeon_ring_write(ring, 0xc0026900);
 +      radeon_ring_write(ring, 0x00000316);
 +      radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
 +      radeon_ring_write(ring, 0x00000010); /*  */
  
 -      radeon_ring_unlock_commit(rdev);
 +      radeon_ring_unlock_commit(rdev, ring);
  
        return 0;
  }
  
  int evergreen_cp_resume(struct radeon_device *rdev)
  {
 +      struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
        RREG32(GRBM_SOFT_RESET);
  
        /* Set ring buffer size */
 -      rb_bufsz = drm_order(rdev->cp.ring_size / 8);
 +      rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
  #endif
        WREG32(CP_RB_CNTL, tmp);
 -      WREG32(CP_SEM_WAIT_TIMER, 0x4);
 +      WREG32(CP_SEM_WAIT_TIMER, 0x0);
  
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
 -      rdev->cp.wptr = 0;
 -      WREG32(CP_RB_WPTR, rdev->cp.wptr);
 +      ring->wptr = 0;
 +      WREG32(CP_RB_WPTR, ring->wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
  
 -      WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
 +      WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  
 -      rdev->cp.rptr = RREG32(CP_RB_RPTR);
 +      ring->rptr = RREG32(CP_RB_RPTR);
  
        evergreen_cp_start(rdev);
 -      rdev->cp.ready = true;
 -      r = radeon_ring_test(rdev);
 +      ring->ready = true;
 +      r = radeon_ring_test(rdev, ring);
        if (r) {
 -              rdev->cp.ready = false;
 +              ring->ready = false;
                return r;
        }
        return 0;
@@@ -2359,7 -2297,7 +2359,7 @@@ int evergreen_mc_init(struct radeon_dev
        return 0;
  }
  
 -bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
 +bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  {
        u32 srbm_status;
        u32 grbm_status;
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
 -              r100_gpu_lockup_update(lockup, &rdev->cp);
 +              r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
 -      r = radeon_ring_lock(rdev, 2);
 +      r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
 -              radeon_ring_write(rdev, 0x80000000);
 -              radeon_ring_write(rdev, 0x80000000);
 -              radeon_ring_unlock_commit(rdev);
 +              radeon_ring_write(ring, 0x80000000);
 +              radeon_ring_write(ring, 0x80000000);
 +              radeon_ring_unlock_commit(rdev, ring);
        }
 -      rdev->cp.rptr = RREG32(CP_RB_RPTR);
 -      return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
 +      ring->rptr = RREG32(CP_RB_RPTR);
 +      return r100_gpu_cp_is_lockup(rdev, lockup, ring);
  }
  
  static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@@ -2476,13 -2414,7 +2476,13 @@@ void evergreen_disable_interrupt_state(
  {
        u32 tmp;
  
 -      WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 +      if (rdev->family >= CHIP_CAYMAN) {
 +              cayman_cp_int_cntl_setup(rdev, 0,
 +                                       CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 +              cayman_cp_int_cntl_setup(rdev, 1, 0);
 +              cayman_cp_int_cntl_setup(rdev, 2, 0);
 +      } else
 +              WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  int evergreen_irq_set(struct radeon_device *rdev)
  {
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
 +      u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  
 -      if (rdev->irq.sw_int) {
 -              DRM_DEBUG("evergreen_irq_set: sw int\n");
 -              cp_int_cntl |= RB_INT_ENABLE;
 -              cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 +      if (rdev->family >= CHIP_CAYMAN) {
 +              /* enable CP interrupts on all rings */
 +              if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
 +                      DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
 +                      cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 +              }
 +              if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
 +                      DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
 +                      cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
 +              }
 +              if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
 +                      DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
 +                      cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
 +              }
 +      } else {
 +              if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
 +                      DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
 +                      cp_int_cntl |= RB_INT_ENABLE;
 +                      cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 +              }
        }
 +
        if (rdev->irq.crtc_vblank_int[0] ||
            rdev->irq.pflip[0]) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
                grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
        }
  
 -      WREG32(CP_INT_CNTL, cp_int_cntl);
 +      if (rdev->family >= CHIP_CAYMAN) {
 +              cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
 +              cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
 +              cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
 +      } else
 +              WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@@ -3053,24 -2962,11 +3053,24 @@@ restart_ih
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
 -                      radeon_fence_process(rdev);
 +                      radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
 -                      radeon_fence_process(rdev);
 +                      if (rdev->family >= CHIP_CAYMAN) {
 +                              switch (src_data) {
 +                              case 0:
 +                                      radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 +                                      break;
 +                              case 1:
 +                                      radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
 +                                      break;
 +                              case 2:
 +                                      radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
 +                                      break;
 +                              }
 +                      } else
 +                              radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
  
  static int evergreen_startup(struct radeon_device *rdev)
  {
 +      struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
  
        /* enable pcie gen2 link */
        if (r)
                return r;
  
 +      r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
 +      if (r) {
 +              dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
 +              return r;
 +      }
 +
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
        }
        evergreen_irq_set(rdev);
  
 -      r = radeon_ring_init(rdev, rdev->cp.ring_size);
 +      r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
 +                           R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 +                           0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
  
 +      r = radeon_ib_pool_start(rdev);
 +      if (r)
 +              return r;
 +
 +      r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
 +      if (r) {
 +              DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 +              rdev->accel_working = false;
++      }
++
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
                return r;
        }
  
@@@ -3212,29 -3094,38 +3217,30 @@@ int evergreen_resume(struct radeon_devi
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
  
 +      rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
                DRM_ERROR("evergreen startup failed on resume\n");
                return r;
        }
  
 -      r = r600_ib_test(rdev);
 -      if (r) {
 -              DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 -              return r;
 -      }
 -
 -      r = r600_audio_init(rdev);
 -      if (r) {
 -              DRM_ERROR("radeon: audio resume failed\n");
 -              return r;
 -      }
 -
        return r;
  
  }
  
  int evergreen_suspend(struct radeon_device *rdev)
  {
 +      struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 +
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
 +      radeon_ib_pool_suspend(rdev);
 +      r600_blit_suspend(rdev);
        r700_cp_stop(rdev);
 -      rdev->cp.ready = false;
 +      ring->ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
 -      r600_blit_suspend(rdev);
  
        return 0;
  }
@@@ -3309,8 -3200,8 +3315,8 @@@ int evergreen_init(struct radeon_devic
        if (r)
                return r;
  
 -      rdev->cp.ring_obj = NULL;
 -      r600_ring_init(rdev, 1024 * 1024);
 +      rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 +      r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
  
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
        if (r)
                return r;
  
 +      r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
 +      if (r) {
 +              dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
 +              rdev->accel_working = false;
 +      }
 +
        r = evergreen_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
 +              r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
 -      if (rdev->accel_working) {
 -              r = radeon_ib_pool_init(rdev);
 -              if (r) {
 -                      DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
 -                      rdev->accel_working = false;
 -              }
 -              r = r600_ib_test(rdev);
 -              if (r) {
 -                      DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 -                      rdev->accel_working = false;
 -              }
 -      }
        return 0;
  }
  
  void evergreen_fini(struct radeon_device *rdev)
  {
+       r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
 -      radeon_ib_pool_fini(rdev);
 +      r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
 +      radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
index 7d7f2155e34c305729f8487c55fc8c2c741ccc44,fd38ba405a5d8d99bf0ccebd0e34e7474e175c4f..4215de95477e4a863c83183d90133c9d07358ee4
  #define EVERGREEN_P1PLL_SS_CNTL                         0x414
  #define EVERGREEN_P2PLL_SS_CNTL                         0x454
  #       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+ #define EVERGREEN_AUDIO_PLL1_MUL                      0x5b0
+ #define EVERGREEN_AUDIO_PLL1_DIV                      0x5b4
+ #define EVERGREEN_AUDIO_PLL1_UNK                      0x5bc
+ #define EVERGREEN_AUDIO_ENABLE                                0x5e78
+ #define EVERGREEN_AUDIO_VENDOR_ID                     0x5ec0
  /* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
  #define EVERGREEN_GRPH_ENABLE                           0x6800
  #define EVERGREEN_GRPH_CONTROL                          0x6804
  #       define EVERGREEN_GRPH_DEPTH_8BPP                0
  #       define EVERGREEN_GRPH_DEPTH_16BPP               1
  #       define EVERGREEN_GRPH_DEPTH_32BPP               2
 +#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
 +#       define EVERGREEN_ADDR_SURF_2_BANK               0
 +#       define EVERGREEN_ADDR_SURF_4_BANK               1
 +#       define EVERGREEN_ADDR_SURF_8_BANK               2
 +#       define EVERGREEN_ADDR_SURF_16_BANK              3
 +#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
 +#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
 +#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
 +#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
 +#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
 +#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
  #       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
  /* 8 BPP */
  #       define EVERGREEN_GRPH_FORMAT_INDEXED            0
  #       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
  #       define EVERGREEN_GRPH_FORMAT_RGB111110          6
  #       define EVERGREEN_GRPH_FORMAT_BGR101111          7
 +#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
 +#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
 +#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
 +#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
 +#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
 +#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
 +#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
 +#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
 +#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
 +#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
 +#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
 +#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
  #       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
  #       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
  #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
  #define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
  #define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
  
+ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+ #define EVERGREEN_HDMI_BASE                           0x7030
+ #define EVERGREEN_HDMI_CONFIG_OFFSET                  0xf0
  #endif