]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'topic/drm-misc-2016-10-24' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Tue, 25 Oct 2016 06:35:20 +0000 (16:35 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 25 Oct 2016 06:35:20 +0000 (16:35 +1000)
First -misc pull for 4.10:
- drm_format rework from Laurent
- reservation patches from Chris that missed 4.9.
- aspect ratio support in infoframe helpers and drm mode/edid code
  (Shashank Sharma)
- rotation rework from Ville (first parts at least)
- another attempt at the CRC debugfs interface from Tomeu
- piles and piles of misc patches all over

* tag 'topic/drm-misc-2016-10-24' of git://anongit.freedesktop.org/drm-intel: (55 commits)
  drm: Use u64 for intermediate dotclock calculations
  drm/i915: Use the per-plane rotation property
  drm/omap: Use per-plane rotation property
  drm/omap: Set rotation property initial value to BIT(DRM_ROTATE_0) insted of 0
  drm/atmel-hlcdc: Use per-plane rotation property
  drm/arm: Use per-plane rotation property
  drm: Add support for optional per-plane rotation property
  drm/atomic: Reject attempts to use multiple rotation angles at once
  drm: Add drm_rotation_90_or_270()
  dma-buf/sync_file: hold reference to fence when creating sync_file
  drm/virtio: kconfig: Fixup white space.
  drm/fence: release fence reference when canceling event
  drm/i915: Handle early failure during intel_get_load_detect_pipe
  drm/fb_cma_helper: do not free fbdev if there is none
  drm: fix sparse warnings on undeclared symbols in crc debugfs
  gpu: Remove depends on RESET_CONTROLLER when not a provider
  i915: don't call drm_atomic_state_put on invalid pointer
  drm: Don't export the drm_fb_get_bpp_depth() function
  drm/arm: mali-dp: Replace drm_fb_get_bpp_depth() with drm_format_plane_cpp()
  drm: vmwgfx: Replace drm_fb_get_bpp_depth() with drm_format_info()
  ...

1  2 
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_pm.c

index 1205790ed960c755780e2efa32300ac775c15fa1,8ea72d821525ecb73046899d2a6cf09b4774c875..800055c39cdbbe86366e12bbccc2d9f1d131e556
@@@ -104,8 -104,8 +104,8 @@@ int drm_debugfs_create_files(const stru
                ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
                                          root, tmp, &drm_debugfs_fops);
                if (!ent) {
 -                      DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
 -                                root->d_name.name, files[i].name);
 +                      DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
 +                                root, files[i].name);
                        kfree(tmp);
                        ret = -1;
                        goto fail;
@@@ -415,5 -415,37 +415,37 @@@ void drm_debugfs_connector_remove(struc
        connector->debugfs_entry = NULL;
  }
  
- #endif /* CONFIG_DEBUG_FS */
+ int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+ {
+       struct drm_minor *minor = crtc->dev->primary;
+       struct dentry *root;
+       char *name;
+       name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+       if (!name)
+               return -ENOMEM;
+       root = debugfs_create_dir(name, minor->debugfs_root);
+       kfree(name);
+       if (!root)
+               return -ENOMEM;
+       crtc->debugfs_entry = root;
+       if (drm_debugfs_crtc_crc_add(crtc))
+               goto error;
  
+       return 0;
+ error:
+       drm_debugfs_crtc_remove(crtc);
+       return -ENOMEM;
+ }
+ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+ {
+       debugfs_remove_recursive(crtc->debugfs_entry);
+       crtc->debugfs_entry = NULL;
+ }
+ #endif /* CONFIG_DEBUG_FS */
index 0370b842d9cc20c2fdae37406c82a9546106e69b,9ffca2478e029e80bf53c0774a595d36ec25171b..3755ef935af49df30cc573cee27868340cb8f312
@@@ -409,20 -409,16 +409,16 @@@ int etnaviv_gem_cpu_prep(struct drm_gem
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
        struct drm_device *dev = obj->dev;
        bool write = !!(op & ETNA_PREP_WRITE);
-       int ret;
-       if (op & ETNA_PREP_NOSYNC) {
-               if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
-                                                         write))
-                       return -EBUSY;
-       } else {
-               unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
-               ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
-                                                         write, true, remain);
-               if (ret <= 0)
-                       return ret == 0 ? -ETIMEDOUT : ret;
-       }
+       unsigned long remain =
+               op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+       long lret;
+       lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+                                                  write, true, remain);
+       if (lret < 0)
+               return lret;
+       else if (lret == 0)
+               return remain == 0 ? -EBUSY : -ETIMEDOUT;
  
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
                if (!etnaviv_obj->sgt) {
@@@ -748,22 -744,19 +744,22 @@@ static struct page **etnaviv_gem_userpt
        int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
        struct page **pvec;
        uintptr_t ptr;
 +      unsigned int flags = 0;
  
        pvec = drm_malloc_ab(npages, sizeof(struct page *));
        if (!pvec)
                return ERR_PTR(-ENOMEM);
  
 +      if (!etnaviv_obj->userptr.ro)
 +              flags |= FOLL_WRITE;
 +
        pinned = 0;
        ptr = etnaviv_obj->userptr.ptr;
  
        down_read(&mm->mmap_sem);
        while (pinned < npages) {
                ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
 -                                          !etnaviv_obj->userptr.ro, 0,
 -                                          pvec + pinned, NULL);
 +                                          flags, pvec + pinned, NULL);
                if (ret < 0)
                        break;
  
index fbcfed63a76e16ec59f465c96e9b5c7c8d37ffe2,6c5c36eba6cbb2f998d5c3446d31d10b1f1d615a..4c21d2ec2c51b86206963fd56e63935d6bf70f22
@@@ -2139,7 -2139,7 +2139,7 @@@ intel_fill_fb_ggtt_view(struct i915_ggt
                        const struct drm_framebuffer *fb,
                        unsigned int rotation)
  {
-       if (intel_rotation_90_or_270(rotation)) {
+       if (drm_rotation_90_or_270(rotation)) {
                *view = i915_ggtt_view_rotated;
                view->params.rotated = to_intel_framebuffer(fb)->rot_info;
        } else {
@@@ -2260,7 -2260,7 +2260,7 @@@ void intel_unpin_fb_obj(struct drm_fram
  static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
                          unsigned int rotation)
  {
-       if (intel_rotation_90_or_270(rotation))
+       if (drm_rotation_90_or_270(rotation))
                return to_intel_framebuffer(fb)->rotated[plane].pitch;
        else
                return fb->pitches[plane];
@@@ -2296,7 -2296,7 +2296,7 @@@ void intel_add_fb_offsets(int *x, int *
        const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
        unsigned int rotation = state->base.rotation;
  
-       if (intel_rotation_90_or_270(rotation)) {
+       if (drm_rotation_90_or_270(rotation)) {
                *x += intel_fb->rotated[plane].x;
                *y += intel_fb->rotated[plane].y;
        } else {
@@@ -2360,7 -2360,7 +2360,7 @@@ static u32 intel_adjust_tile_offset(in
                intel_tile_dims(dev_priv, &tile_width, &tile_height,
                                fb->modifier[plane], cpp);
  
-               if (intel_rotation_90_or_270(rotation)) {
+               if (drm_rotation_90_or_270(rotation)) {
                        pitch_tiles = pitch / tile_height;
                        swap(tile_width, tile_height);
                } else {
@@@ -2416,7 -2416,7 +2416,7 @@@ static u32 _intel_compute_tile_offset(c
                intel_tile_dims(dev_priv, &tile_width, &tile_height,
                                fb_modifier, cpp);
  
-               if (intel_rotation_90_or_270(rotation)) {
+               if (drm_rotation_90_or_270(rotation)) {
                        pitch_tiles = pitch / tile_height;
                        swap(tile_width, tile_height);
                } else {
@@@ -2976,7 -2976,7 +2976,7 @@@ int skl_check_plane_surface(struct inte
        int ret;
  
        /* Rotate src coordinates to match rotated GTT view */
-       if (intel_rotation_90_or_270(rotation))
+       if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
                                fb->width, fb->height, DRM_ROTATE_270);
  
@@@ -3276,7 -3276,7 +3276,7 @@@ u32 skl_plane_stride(const struct drm_f
         * The stride is either expressed as a multiple of 64 bytes chunks for
         * linear buffers or in number of tiles for tiled buffers.
         */
-       if (intel_rotation_90_or_270(rotation)) {
+       if (drm_rotation_90_or_270(rotation)) {
                int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  
                stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
@@@ -3408,8 -3408,6 +3408,8 @@@ static void skylake_update_primary_plan
        dst_w--;
        dst_h--;
  
 +      intel_crtc->dspaddr_offset = surf_addr;
 +
        intel_crtc->adjusted_x = src_x;
        intel_crtc->adjusted_y = src_y;
  
@@@ -3584,7 -3582,7 +3584,7 @@@ void intel_prepare_reset(struct drm_i91
        return;
  
  err:
-       drm_atomic_state_free(state);
+       drm_atomic_state_put(state);
  }
  
  void intel_finish_reset(struct drm_i915_private *dev_priv)
                intel_runtime_pm_disable_interrupts(dev_priv);
                intel_runtime_pm_enable_interrupts(dev_priv);
  
 +              intel_pps_unlock_regs_wa(dev_priv);
                intel_modeset_init_hw(dev);
  
                spin_lock_irq(&dev_priv->irq_lock);
                intel_hpd_init(dev_priv);
        }
  
+       if (state)
+               drm_atomic_state_put(state);
        drm_modeset_drop_locks(ctx);
        drm_modeset_acquire_fini(ctx);
        mutex_unlock(&dev->mode_config.mutex);
@@@ -4667,7 -4666,7 +4669,7 @@@ skl_update_scaler(struct intel_crtc_sta
                to_intel_crtc(crtc_state->base.crtc);
        int need_scaling;
  
-       need_scaling = intel_rotation_90_or_270(rotation) ?
+       need_scaling = drm_rotation_90_or_270(rotation) ?
                (src_h != dst_w || src_w != dst_h):
                (src_w != dst_w || src_h != dst_h);
  
@@@ -6885,7 -6884,7 +6887,7 @@@ static void intel_crtc_disable_noatomic
  
        dev_priv->display.crtc_disable(crtc_state, state);
  
-       drm_atomic_state_free(state);
+       drm_atomic_state_put(state);
  
        DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
                      crtc->base.id, crtc->name);
@@@ -9512,24 -9511,6 +9514,24 @@@ static void ironlake_compute_dpll(struc
        if (intel_crtc_has_dp_encoder(crtc_state))
                dpll |= DPLL_SDVO_HIGH_SPEED;
  
 +      /*
 +       * The high speed IO clock is only really required for
 +       * SDVO/HDMI/DP, but we also enable it for CRT to make it
 +       * possible to share the DPLL between CRT and HDMI. Enabling
 +       * the clock needlessly does no real harm, except use up a
 +       * bit of power potentially.
 +       *
 +       * We'll limit this to IVB with 3 pipes, since it has only two
 +       * DPLLs and so DPLL sharing is the only way to get three pipes
 +       * driving PCH ports at the same time. On SNB we could do this,
 +       * and potentially avoid enabling the second DPLL, but it's not
 +       * clear if it''s a win or loss power wise. No point in doing
 +       * this on ILK at all since it has a fixed DPLL<->pipe mapping.
 +       */
 +      if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
 +          intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
 +              dpll |= DPLL_SDVO_HIGH_SPEED;
 +
        /* compute bitmask from p1 value */
        dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        /* also FPA1 */
        return true;
  
  fail:
-       drm_atomic_state_free(state);
-       drm_atomic_state_free(restore_state);
-       restore_state = state = NULL;
+       if (state) {
+               drm_atomic_state_put(state);
+               state = NULL;
+       }
+       if (restore_state) {
+               drm_atomic_state_put(restore_state);
+               restore_state = NULL;
+       }
  
        if (ret == -EDEADLK) {
                drm_modeset_backoff(ctx);
@@@ -11300,10 -11286,9 +11307,9 @@@ void intel_release_load_detect_pipe(str
                return;
  
        ret = drm_atomic_commit(state);
-       if (ret) {
+       if (ret)
                DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
-               drm_atomic_state_free(state);
-       }
+       drm_atomic_state_put(state);
  }
  
  static int i9xx_pll_refclk(struct drm_device *dev,
@@@ -12371,8 -12356,7 +12377,7 @@@ retry
                        goto retry;
                }
  
-               if (ret)
-                       drm_atomic_state_free(state);
+               drm_atomic_state_put(state);
  
                if (ret == 0 && event) {
                        spin_lock_irq(&dev->event_lock);
@@@ -14385,8 -14369,8 +14390,8 @@@ static void intel_atomic_commit_tail(st
                 * SKL workaround: bspec recommends we disable the SAGV when we
                 * have more then one pipe enabled
                 */
 -              if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
 -                      skl_disable_sagv(dev_priv);
 +              if (!intel_can_enable_sagv(state))
 +                      intel_disable_sagv(dev_priv);
  
                intel_modeset_verify_disabled(dev);
        }
                intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
        }
  
 -      if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
 -          skl_can_enable_sagv(state))
 -              skl_enable_sagv(dev_priv);
 +      if (intel_state->modeset && intel_can_enable_sagv(state))
 +              intel_enable_sagv(dev_priv);
  
        drm_atomic_helper_commit_hw_done(state);
  
  
        drm_atomic_helper_commit_cleanup_done(state);
  
-       drm_atomic_state_free(state);
+       drm_atomic_state_put(state);
  
        /* As one of the primary mmio accessors, KMS has a high likelihood
         * of triggering bugs in unclaimed access. After we finish
@@@ -14540,6 -14525,7 +14545,7 @@@ static int intel_atomic_commit(struct d
        intel_shared_dpll_commit(state);
        intel_atomic_track_fbs(state);
  
+       drm_atomic_state_get(state);
        if (nonblock)
                queue_work(system_unbound_wq, &state->commit_work);
        else
@@@ -14581,9 -14567,8 +14587,8 @@@ retry
                goto retry;
        }
  
-       if (ret)
  out:
-               drm_atomic_state_free(state);
+       drm_atomic_state_put(state);
  }
  
  /*
@@@ -14901,6 -14886,7 +14906,7 @@@ static struct drm_plane *intel_primary_
        struct intel_plane *primary = NULL;
        struct intel_plane_state *state = NULL;
        const uint32_t *intel_primary_formats;
+       unsigned int supported_rotations;
        unsigned int num_formats;
        int ret;
  
        if (ret)
                goto fail;
  
-       if (INTEL_INFO(dev)->gen >= 4)
-               intel_create_rotation_property(dev, primary);
+       if (INTEL_GEN(dev) >= 9) {
+               supported_rotations =
+                       DRM_ROTATE_0 | DRM_ROTATE_90 |
+                       DRM_ROTATE_180 | DRM_ROTATE_270;
+       } else if (INTEL_GEN(dev) >= 4) {
+               supported_rotations =
+                       DRM_ROTATE_0 | DRM_ROTATE_180;
+       } else {
+               supported_rotations = DRM_ROTATE_0;
+       }
+       if (INTEL_GEN(dev) >= 4)
+               drm_plane_create_rotation_property(&primary->base,
+                                                  DRM_ROTATE_0,
+                                                  supported_rotations);
  
        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  
        return NULL;
  }
  
- void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
- {
-       if (!dev->mode_config.rotation_property) {
-               unsigned long flags = DRM_ROTATE_0 |
-                       DRM_ROTATE_180;
-               if (INTEL_INFO(dev)->gen >= 9)
-                       flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-               dev->mode_config.rotation_property =
-                       drm_mode_create_rotation_property(dev, flags);
-       }
-       if (dev->mode_config.rotation_property)
-               drm_object_attach_property(&plane->base.base,
-                               dev->mode_config.rotation_property,
-                               plane->base.state->rotation);
- }
  static int
  intel_check_cursor_plane(struct drm_plane *plane,
                         struct intel_crtc_state *crtc_state,
@@@ -15131,17 -15112,11 +15132,11 @@@ static struct drm_plane *intel_cursor_p
        if (ret)
                goto fail;
  
-       if (INTEL_INFO(dev)->gen >= 4) {
-               if (!dev->mode_config.rotation_property)
-                       dev->mode_config.rotation_property =
-                               drm_mode_create_rotation_property(dev,
-                                                       DRM_ROTATE_0 |
-                                                       DRM_ROTATE_180);
-               if (dev->mode_config.rotation_property)
-                       drm_object_attach_property(&cursor->base.base,
-                               dev->mode_config.rotation_property,
-                               state->base.rotation);
-       }
+       if (INTEL_GEN(dev) >= 4)
+               drm_plane_create_rotation_property(&cursor->base,
+                                                  DRM_ROTATE_0,
+                                                  DRM_ROTATE_0 |
+                                                  DRM_ROTATE_180);
  
        if (INTEL_INFO(dev)->gen >=9)
                state->scaler_id = -1;
@@@ -16314,7 -16289,7 +16309,7 @@@ retry
                 * BIOS-programmed watermarks untouched and hope for the best.
                 */
                WARN(true, "Could not determine valid watermarks for inherited state\n");
-               goto fail;
+               goto put_state;
        }
  
        /* Write calculated watermark values back */
                dev_priv->display.optimize_watermarks(cs);
        }
  
-       drm_atomic_state_free(state);
+ put_state:
+       drm_atomic_state_put(state);
  fail:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
@@@ -16963,10 -16939,9 +16959,9 @@@ void intel_display_resume(struct drm_de
        drm_modeset_acquire_fini(&ctx);
        mutex_unlock(&dev->mode_config.mutex);
  
-       if (ret) {
+       if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-               drm_atomic_state_free(state);
-       }
+       drm_atomic_state_put(state);
  }
  
  void intel_modeset_gem_init(struct drm_device *dev)
index a19ec06f9e4281e5cecadb7e960080b853eb5139,5145ff264c8ebfdd69ed3e03519019aae8233be6..3ffba2def09cdb33f43ed2f627d9b852ffb9a937
@@@ -263,7 -263,6 +263,7 @@@ struct intel_panel 
                bool enabled;
                bool combination_mode;  /* gen 2/4 only */
                bool active_low_pwm;
 +              bool alternate_pwm_increment;   /* lpt+ */
  
                /* PWM chip */
                bool util_pin_active_low;       /* bxt+ */
@@@ -1285,15 -1284,6 +1285,6 @@@ int intel_plane_atomic_calc_changes(str
  unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
                               uint64_t fb_modifier, unsigned int cpp);
  
- static inline bool
- intel_rotation_90_or_270(unsigned int rotation)
- {
-       return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
- }
- void intel_create_rotation_property(struct drm_device *dev,
-                                       struct intel_plane *plane);
  void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
                                    enum pipe pipe);
  
@@@ -1742,9 -1732,9 +1733,9 @@@ void ilk_wm_get_hw_state(struct drm_dev
  void skl_wm_get_hw_state(struct drm_device *dev);
  void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
 -bool skl_can_enable_sagv(struct drm_atomic_state *state);
 -int skl_enable_sagv(struct drm_i915_private *dev_priv);
 -int skl_disable_sagv(struct drm_i915_private *dev_priv);
 +bool intel_can_enable_sagv(struct drm_atomic_state *state);
 +int intel_enable_sagv(struct drm_i915_private *dev_priv);
 +int intel_disable_sagv(struct drm_i915_private *dev_priv);
  bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
                               const struct skl_ddb_allocation *new,
                               enum pipe pipe);
index a2f751cd187a2fe2d8552758b71326439aa205de,1472400ddce362e9a182661a8b6e1aabd67b4aea..e2f0a32279e70a4a02b5c659538e7b8e25cbb7bb
@@@ -2126,35 -2126,33 +2126,35 @@@ static void intel_read_wm_latency(struc
                wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
                                GEN9_MEM_LATENCY_LEVEL_MASK;
  
 +              /*
 +               * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
 +               * need to be disabled. We make sure to sanitize the values out
 +               * of the punit to satisfy this requirement.
 +               */
 +              for (level = 1; level <= max_level; level++) {
 +                      if (wm[level] == 0) {
 +                              for (i = level + 1; i <= max_level; i++)
 +                                      wm[i] = 0;
 +                              break;
 +                      }
 +              }
 +
                /*
                 * WaWmMemoryReadLatency:skl
                 *
                 * punit doesn't take into account the read latency so we need
 -               * to add 2us to the various latency levels we retrieve from
 -               * the punit.
 -               *   - W0 is a bit special in that it's the only level that
 -               *   can't be disabled if we want to have display working, so
 -               *   we always add 2us there.
 -               *   - For levels >=1, punit returns 0us latency when they are
 -               *   disabled, so we respect that and don't add 2us then
 -               *
 -               * Additionally, if a level n (n > 1) has a 0us latency, all
 -               * levels m (m >= n) need to be disabled. We make sure to
 -               * sanitize the values out of the punit to satisfy this
 -               * requirement.
 +               * to add 2us to the various latency levels we retrieve from the
 +               * punit when level 0 response data us 0us.
                 */
 -              wm[0] += 2;
 -              for (level = 1; level <= max_level; level++)
 -                      if (wm[level] != 0)
 +              if (wm[0] == 0) {
 +                      wm[0] += 2;
 +                      for (level = 1; level <= max_level; level++) {
 +                              if (wm[level] == 0)
 +                                      break;
                                wm[level] += 2;
 -                      else {
 -                              for (i = level + 1; i <= max_level; i++)
 -                                      wm[i] = 0;
 -
 -                              break;
                        }
 +              }
 +
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                uint64_t sskpd = I915_READ64(MCH_SSKPD);
  
@@@ -2879,19 -2877,6 +2879,19 @@@ skl_wm_plane_id(const struct intel_plan
        }
  }
  
 +static bool
 +intel_has_sagv(struct drm_i915_private *dev_priv)
 +{
 +      if (IS_KABYLAKE(dev_priv))
 +              return true;
 +
 +      if (IS_SKYLAKE(dev_priv) &&
 +          dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
 +              return true;
 +
 +      return false;
 +}
 +
  /*
   * SAGV dynamically adjusts the system agent voltage and clock frequencies
   * depending on power and performance requirements. The display engine access
   *  - We're not using an interlaced display configuration
   */
  int
 -skl_enable_sagv(struct drm_i915_private *dev_priv)
 +intel_enable_sagv(struct drm_i915_private *dev_priv)
  {
        int ret;
  
 -      if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
 -          dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
 +      if (!intel_has_sagv(dev_priv))
 +              return 0;
 +
 +      if (dev_priv->sagv_status == I915_SAGV_ENABLED)
                return 0;
  
        DRM_DEBUG_KMS("Enabling the SAGV\n");
         * Some skl systems, pre-release machines in particular,
         * don't actually have an SAGV.
         */
 -      if (ret == -ENXIO) {
 +      if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
 -              dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
 +              dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
        } else if (ret < 0) {
                DRM_ERROR("Failed to enable the SAGV\n");
                return ret;
        }
  
 -      dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
 +      dev_priv->sagv_status = I915_SAGV_ENABLED;
        return 0;
  }
  
  static int
 -skl_do_sagv_disable(struct drm_i915_private *dev_priv)
 +intel_do_sagv_disable(struct drm_i915_private *dev_priv)
  {
        int ret;
        uint32_t temp = GEN9_SAGV_DISABLE;
  }
  
  int
 -skl_disable_sagv(struct drm_i915_private *dev_priv)
 +intel_disable_sagv(struct drm_i915_private *dev_priv)
  {
        int ret, result;
  
 -      if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
 -          dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
 +      if (!intel_has_sagv(dev_priv))
 +              return 0;
 +
 +      if (dev_priv->sagv_status == I915_SAGV_DISABLED)
                return 0;
  
        DRM_DEBUG_KMS("Disabling the SAGV\n");
        mutex_lock(&dev_priv->rps.hw_lock);
  
        /* bspec says to keep retrying for at least 1 ms */
 -      ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
 +      ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
        mutex_unlock(&dev_priv->rps.hw_lock);
  
        if (ret == -ETIMEDOUT) {
         * Some skl systems, pre-release machines in particular,
         * don't actually have an SAGV.
         */
 -      if (result == -ENXIO) {
 +      if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
 -              dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
 +              dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
        } else if (result < 0) {
                DRM_ERROR("Failed to disable the SAGV\n");
                return result;
        }
  
 -      dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
 +      dev_priv->sagv_status = I915_SAGV_DISABLED;
        return 0;
  }
  
 -bool skl_can_enable_sagv(struct drm_atomic_state *state)
 +bool intel_can_enable_sagv(struct drm_atomic_state *state)
  {
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        int level, plane;
  
 +      if (!intel_has_sagv(dev_priv))
 +              return false;
 +
        /*
         * SKL workaround: bspec recommends we disable the SAGV when we have
         * more then one pipe enabled
@@@ -3173,7 -3151,7 +3173,7 @@@ skl_plane_downscale_amount(const struc
        src_h = drm_rect_height(&pstate->base.src);
        dst_w = drm_rect_width(&pstate->base.dst);
        dst_h = drm_rect_height(&pstate->base.dst);
-       if (intel_rotation_90_or_270(pstate->base.rotation))
+       if (drm_rotation_90_or_270(pstate->base.rotation))
                swap(dst_w, dst_h);
  
        downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@@ -3204,7 -3182,7 +3204,7 @@@ skl_plane_relative_data_rate(const stru
        width = drm_rect_width(&intel_pstate->base.src) >> 16;
        height = drm_rect_height(&intel_pstate->base.src) >> 16;
  
-       if (intel_rotation_90_or_270(pstate->rotation))
+       if (drm_rotation_90_or_270(pstate->rotation))
                swap(width, height);
  
        /* for planar format */
@@@ -3304,7 -3282,7 +3304,7 @@@ skl_ddb_min_alloc(const struct drm_plan
        src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
        src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
  
-       if (intel_rotation_90_or_270(pstate->rotation))
+       if (drm_rotation_90_or_270(pstate->rotation))
                swap(src_w, src_h);
  
        /* Halve UV plane width and height for NV12 */
        else
                plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
  
-       if (intel_rotation_90_or_270(pstate->rotation)) {
+       if (drm_rotation_90_or_270(pstate->rotation)) {
                switch (plane_bpp) {
                case 1:
                        min_scanlines = 32;
@@@ -3494,14 -3472,29 +3494,14 @@@ static uint32_t skl_wm_method1(uint32_
  }
  
  static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
 -                             uint32_t horiz_pixels, uint8_t cpp,
 -                             uint64_t tiling, uint32_t latency)
 +                             uint32_t latency, uint32_t plane_blocks_per_line)
  {
        uint32_t ret;
 -      uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t wm_intermediate_val;
  
        if (latency == 0)
                return UINT_MAX;
  
 -      plane_bytes_per_line = horiz_pixels * cpp;
 -
 -      if (tiling == I915_FORMAT_MOD_Y_TILED ||
 -          tiling == I915_FORMAT_MOD_Yf_TILED) {
 -              plane_bytes_per_line *= 4;
 -              plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 -              plane_blocks_per_line /= 4;
 -      } else if (tiling == DRM_FORMAT_MOD_NONE) {
 -              plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
 -      } else {
 -              plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 -      }
 -
        wm_intermediate_val = latency * pixel_rate;
        ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
                                plane_blocks_per_line;
@@@ -3552,7 -3545,6 +3552,7 @@@ static int skl_compute_plane_wm(const s
        uint8_t cpp;
        uint32_t width = 0, height = 0;
        uint32_t plane_pixel_rate;
 +      uint32_t y_tile_minimum, y_min_scanlines;
  
        if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
                *enabled = false;
        width = drm_rect_width(&intel_pstate->base.src) >> 16;
        height = drm_rect_height(&intel_pstate->base.src) >> 16;
  
-       if (intel_rotation_90_or_270(pstate->rotation))
+       if (drm_rotation_90_or_270(pstate->rotation))
                swap(width, height);
  
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
  
-       if (intel_rotation_90_or_270(pstate->rotation)) {
++      if (drm_rotation_90_or_270(pstate->rotation)) {
 +              int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
 +                      drm_format_plane_cpp(fb->pixel_format, 1) :
 +                      drm_format_plane_cpp(fb->pixel_format, 0);
 +
 +              switch (cpp) {
 +              case 1:
 +                      y_min_scanlines = 16;
 +                      break;
 +              case 2:
 +                      y_min_scanlines = 8;
 +                      break;
 +              default:
 +                      WARN(1, "Unsupported pixel depth for rotation");
 +              case 4:
 +                      y_min_scanlines = 4;
 +                      break;
 +              }
 +      } else {
 +              y_min_scanlines = 4;
 +      }
 +
 +      plane_bytes_per_line = width * cpp;
 +      if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 +          fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
 +              plane_blocks_per_line =
 +                    DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
 +              plane_blocks_per_line /= y_min_scanlines;
 +      } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
 +              plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
 +                                      + 1;
 +      } else {
 +              plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 +      }
 +
        method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
        method2 = skl_wm_method2(plane_pixel_rate,
                                 cstate->base.adjusted_mode.crtc_htotal,
 -                               width,
 -                               cpp,
 -                               fb->modifier[0],
 -                               latency);
 +                               latency,
 +                               plane_blocks_per_line);
  
 -      plane_bytes_per_line = width * cpp;
 -      plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 +      y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
  
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
            fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
 -              uint32_t min_scanlines = 4;
 -              uint32_t y_tile_minimum;
 -              if (drm_rotation_90_or_270(pstate->rotation)) {
 -                      int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
 -                              drm_format_plane_cpp(fb->pixel_format, 1) :
 -                              drm_format_plane_cpp(fb->pixel_format, 0);
 -
 -                      switch (cpp) {
 -                      case 1:
 -                              min_scanlines = 16;
 -                              break;
 -                      case 2:
 -                              min_scanlines = 8;
 -                              break;
 -                      case 8:
 -                              WARN(1, "Unsupported pixel depth for rotation");
 -                      }
 -              }
 -              y_tile_minimum = plane_blocks_per_line * min_scanlines;
                selected_result = max(method2, y_tile_minimum);
        } else {
                if ((ddb_allocation / plane_blocks_per_line) >= 1)
  
        if (level >= 1 && level <= 7) {
                if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 -                  fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
 -                      res_lines += 4;
 -              else
 +                  fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
 +                      res_blocks += y_tile_minimum;
 +                      res_lines += y_min_scanlines;
 +              } else {
                        res_blocks++;
 +              }
        }
  
        if (res_blocks >= ddb_allocation || res_lines > 31) {
@@@ -3962,41 -3939,6 +3962,41 @@@ pipes_modified(struct drm_atomic_state 
        return ret;
  }
  
 +int
 +skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 +{
 +      struct drm_atomic_state *state = cstate->base.state;
 +      struct drm_device *dev = state->dev;
 +      struct drm_crtc *crtc = cstate->base.crtc;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 +      struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
 +      struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
 +      struct drm_plane_state *plane_state;
 +      struct drm_plane *plane;
 +      enum pipe pipe = intel_crtc->pipe;
 +      int id;
 +
 +      WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
 +
 +      drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
 +              id = skl_wm_plane_id(to_intel_plane(plane));
 +
 +              if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
 +                                      &new_ddb->plane[pipe][id]) &&
 +                  skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
 +                                      &new_ddb->y_plane[pipe][id]))
 +                      continue;
 +
 +              plane_state = drm_atomic_get_plane_state(state, plane);
 +              if (IS_ERR(plane_state))
 +                      return PTR_ERR(plane_state);
 +      }
 +
 +      return 0;
 +}
 +
  static int
  skl_compute_ddb(struct drm_atomic_state *state)
  {
                if (ret)
                        return ret;
  
 -              ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
 +              ret = skl_ddb_add_affected_planes(cstate);
                if (ret)
                        return ret;
        }