]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/intel_pm.c
Merge tag 'drm-intel-next-2013-06-01' of git://people.freedesktop.org/~danvet/drm...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_pm.c
index 1373552732e633e4c0ccd5a366f6ed3443485f44..49a188718f9da9297fed0b44c82f11708f92a153 100644 (file)
@@ -2335,7 +2335,8 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 static void hsw_compute_wm_parameters(struct drm_device *dev,
                                      struct hsw_pipe_wm_parameters *params,
                                      uint32_t *wm,
-                                     struct hsw_wm_maximums *lp_max_1_2)
+                                     struct hsw_wm_maximums *lp_max_1_2,
+                                     struct hsw_wm_maximums *lp_max_5_6)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
@@ -2391,15 +2392,17 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
        }
 
        if (pipes_active > 1) {
-               lp_max_1_2->pri = sprites_enabled ? 128 : 256;
-               lp_max_1_2->spr = 128;
-               lp_max_1_2->cur = 64;
+               lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
+               lp_max_1_2->spr = lp_max_5_6->spr = 128;
+               lp_max_1_2->cur = lp_max_5_6->cur = 64;
        } else {
                lp_max_1_2->pri = sprites_enabled ? 384 : 768;
+               lp_max_5_6->pri = sprites_enabled ? 128 : 768;
                lp_max_1_2->spr = 384;
-               lp_max_1_2->cur = 255;
+               lp_max_5_6->spr = 640;
+               lp_max_1_2->cur = lp_max_5_6->cur = 255;
        }
-       lp_max_1_2->fbc = 15;
+       lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
 }
 
 static void hsw_compute_wm_results(struct drm_device *dev,
@@ -2457,6 +2460,32 @@ static void hsw_compute_wm_results(struct drm_device *dev,
        }
 }
 
+/* Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same. */
+struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+                                          struct hsw_wm_values *r2)
+{
+       int i, val_r1 = 0, val_r2 = 0;
+
+       for (i = 0; i < 3; i++) {
+               if (r1->wm_lp[i] & WM3_LP_EN)
+                       val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
+               if (r2->wm_lp[i] & WM3_LP_EN)
+                       val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+       }
+
+       if (val_r1 == val_r2) {
+               if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+                       return r2;
+               else
+                       return r1;
+       } else if (val_r1 > val_r2) {
+               return r1;
+       } else {
+               return r2;
+       }
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
@@ -2557,14 +2586,27 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
 static void haswell_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct hsw_wm_maximums lp_max_1_2;
+       struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
        struct hsw_pipe_wm_parameters params[3];
-       struct hsw_wm_values results;
+       struct hsw_wm_values results_1_2, results_5_6, *best_results;
        uint32_t wm[5];
+       enum hsw_data_buf_partitioning partitioning;
+
+       hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
 
-       hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2);
-       hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results);
-       hsw_write_wm_values(dev_priv, &results, HSW_DATA_BUF_PART_1_2);
+       hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+       if (lp_max_1_2.pri != lp_max_5_6.pri) {
+               hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
+                                      &results_5_6);
+               best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+       } else {
+               best_results = &results_1_2;
+       }
+
+       partitioning = (best_results == &results_1_2) ?
+                      HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+
+       hsw_write_wm_values(dev_priv, best_results, partitioning);
 }
 
 static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
@@ -3074,7 +3116,7 @@ static void gen6_disable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC_CONTROL, 0);
        I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
        I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, 0);
+       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
        /* Complete PM interrupt masking here doesn't race with the rps work
         * item again unmasking PM interrupts because that is using a different
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -3084,7 +3126,7 @@ static void gen6_disable_rps(struct drm_device *dev)
        dev_priv->rps.pm_iir = 0;
        spin_unlock_irq(&dev_priv->rps.lock);
 
-       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 }
 
 static void valleyview_disable_rps(struct drm_device *dev)
@@ -3265,12 +3307,15 @@ static void gen6_enable_rps(struct drm_device *dev)
        gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
        /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
        spin_lock_irq(&dev_priv->rps.lock);
-       WARN_ON(dev_priv->rps.pm_iir != 0);
-       I915_WRITE(GEN6_PMIMR, 0);
+       /* FIXME: Our interrupt enabling sequence is bonghits.
+        * dev_priv->rps.pm_iir really should be 0 here. */
+       dev_priv->rps.pm_iir = 0;
+       I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
        spin_unlock_irq(&dev_priv->rps.lock);
-       /* enable all PM interrupts */
+       /* unmask all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
 
        rc6vids = 0;
@@ -3533,7 +3578,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
        valleyview_set_rps(dev_priv->dev, rpe);
 
        /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+       I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
        spin_lock_irq(&dev_priv->rps.lock);
        WARN_ON(dev_priv->rps.pm_iir != 0);
        I915_WRITE(GEN6_PMIMR, 0);