]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/acpi/processor_idle.c
cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state
[karo-tx-linux.git] / drivers / acpi / processor_idle.c
index 431ab11c8c1b6736aec984754c9f6002d3542ad8..9cd08cecb3479a8546976b0efca2a8a7ebca46dd 100644 (file)
@@ -741,22 +741,24 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 /**
  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  * @dev: the target CPU
- * @state: the state data
+ * @index: index of target state
  *
  * This is equivalent to the HALT instruction.
  */
 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
-                             struct cpuidle_state *state)
+                               int index)
 {
        ktime_t  kt1, kt2;
        s64 idle_time;
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
+               return -EINVAL;
 
        local_irq_disable();
 
@@ -764,7 +766,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        if (acpi_idle_suspend) {
                local_irq_enable();
                cpu_relax();
-               return 0;
+               return -EINVAL;
        }
 
        lapic_timer_state_broadcast(pr, cx, 1);
@@ -773,37 +775,46 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        kt2 = ktime_get_real();
        idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        local_irq_enable();
        cx->usage++;
        lapic_timer_state_broadcast(pr, cx, 0);
 
-       return idle_time;
+       return index;
 }
 
 /**
  * acpi_idle_enter_simple - enters an ACPI state without BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @index: the index of suggested state
  */
 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-                                 struct cpuidle_state *state)
+                               int index)
 {
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        ktime_t  kt1, kt2;
        s64 idle_time_ns;
        s64 idle_time;
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
-
-       if (acpi_idle_suspend)
-               return(acpi_idle_enter_c1(dev, state));
+               return -EINVAL;
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EINVAL;
+       }
+
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -815,7 +826,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
                if (unlikely(need_resched())) {
                        current_thread_info()->status |= TS_POLLING;
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
@@ -837,6 +848,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
        idle_time = idle_time_ns;
        do_div(idle_time, NSEC_PER_USEC);
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        /* Tell the scheduler how much we idled: */
        sched_clock_idle_wakeup_event(idle_time_ns);
 
@@ -848,7 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
        lapic_timer_state_broadcast(pr, cx, 0);
        cx->time += idle_time;
-       return idle_time;
+       return index;
 }
 
 static int c3_cpu_count;
@@ -857,14 +871,15 @@ static DEFINE_SPINLOCK(c3_lock);
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @index: the index of suggested state
  *
  * If BM is detected, the deepest non-C3 idle state is entered instead.
  */
 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
-                             struct cpuidle_state *state)
+                               int index)
 {
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        ktime_t  kt1, kt2;
        s64 idle_time_ns;
@@ -872,22 +887,26 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
+               return -EINVAL;
 
-       if (acpi_idle_suspend)
-               return(acpi_idle_enter_c1(dev, state));
+
+       if (acpi_idle_suspend) {
+               cpu_relax();
+               return -EINVAL;
+       }
 
        if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
-               if (dev->safe_state) {
-                       dev->last_state = dev->safe_state;
-                       return dev->safe_state->enter(dev, dev->safe_state);
+               if (dev->safe_state_index >= 0) {
+                       return dev->states[dev->safe_state_index].enter(dev,
+                                               dev->safe_state_index);
                } else {
                        local_irq_disable();
                        acpi_safe_halt();
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
@@ -904,7 +923,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                if (unlikely(need_resched())) {
                        current_thread_info()->status |= TS_POLLING;
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
@@ -954,6 +973,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        idle_time = idle_time_ns;
        do_div(idle_time, NSEC_PER_USEC);
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        /* Tell the scheduler how much we idled: */
        sched_clock_idle_wakeup_event(idle_time_ns);
 
@@ -965,7 +987,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 
        lapic_timer_state_broadcast(pr, cx, 0);
        cx->time += idle_time;
-       return idle_time;
+       return index;
 }
 
 struct cpuidle_driver acpi_idle_driver = {
@@ -992,6 +1014,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
        }
 
        dev->cpu = pr->id;
+       dev->safe_state_index = -1;
        for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
                dev->states[i].name[0] = '\0';
                dev->states[i].desc[0] = '\0';
@@ -1027,13 +1050,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
                                state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
                        state->enter = acpi_idle_enter_c1;
-                       dev->safe_state = state;
+                       dev->safe_state_index = count;
                        break;
 
                        case ACPI_STATE_C2:
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
                        state->enter = acpi_idle_enter_simple;
-                       dev->safe_state = state;
+                       dev->safe_state_index = count;
                        break;
 
                        case ACPI_STATE_C3: