]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/main.c
regulator: max8997: Convert max8997_safeout_ops to set_voltage_sel and list_voltage_table
[karo-tx-linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 static LIST_HEAD(dpm_prepared_list);
49 static LIST_HEAD(dpm_suspended_list);
50 static LIST_HEAD(dpm_late_early_list);
51 static LIST_HEAD(dpm_noirq_list);
52
53 struct suspend_stats suspend_stats;
54 static DEFINE_MUTEX(dpm_list_mtx);
55 static pm_message_t pm_transition;
56
57 static int async_error;
58
59 /**
60  * device_pm_sleep_init - Initialize system suspend-related device fields.
61  * @dev: Device object being initialized.
62  */
63 void device_pm_sleep_init(struct device *dev)
64 {
65         dev->power.is_prepared = false;
66         dev->power.is_suspended = false;
67         init_completion(&dev->power.completion);
68         complete_all(&dev->power.completion);
69         dev->power.wakeup = NULL;
70         INIT_LIST_HEAD(&dev->power.entry);
71 }
72
73 /**
74  * device_pm_lock - Lock the list of active devices used by the PM core.
75  */
76 void device_pm_lock(void)
77 {
78         mutex_lock(&dpm_list_mtx);
79 }
80
81 /**
82  * device_pm_unlock - Unlock the list of active devices used by the PM core.
83  */
84 void device_pm_unlock(void)
85 {
86         mutex_unlock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_add - Add a device to the PM core's list of active devices.
91  * @dev: Device to add to the list.
92  */
93 void device_pm_add(struct device *dev)
94 {
95         pr_debug("PM: Adding info for %s:%s\n",
96                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97         mutex_lock(&dpm_list_mtx);
98         if (dev->parent && dev->parent->power.is_prepared)
99                 dev_warn(dev, "parent %s should not be sleeping\n",
100                         dev_name(dev->parent));
101         list_add_tail(&dev->power.entry, &dpm_list);
102         dev_pm_qos_constraints_init(dev);
103         mutex_unlock(&dpm_list_mtx);
104 }
105
106 /**
107  * device_pm_remove - Remove a device from the PM core's list of active devices.
108  * @dev: Device to be removed from the list.
109  */
110 void device_pm_remove(struct device *dev)
111 {
112         pr_debug("PM: Removing info for %s:%s\n",
113                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114         complete_all(&dev->power.completion);
115         mutex_lock(&dpm_list_mtx);
116         dev_pm_qos_constraints_destroy(dev);
117         list_del_init(&dev->power.entry);
118         mutex_unlock(&dpm_list_mtx);
119         device_wakeup_disable(dev);
120         pm_runtime_remove(dev);
121 }
122
123 /**
124  * device_pm_move_before - Move device in the PM core's list of active devices.
125  * @deva: Device to move in dpm_list.
126  * @devb: Device @deva should come before.
127  */
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130         pr_debug("PM: Moving %s:%s before %s:%s\n",
131                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133         /* Delete deva from dpm_list and reinsert before devb. */
134         list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136
137 /**
138  * device_pm_move_after - Move device in the PM core's list of active devices.
139  * @deva: Device to move in dpm_list.
140  * @devb: Device @deva should come after.
141  */
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144         pr_debug("PM: Moving %s:%s after %s:%s\n",
145                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147         /* Delete deva from dpm_list and reinsert after devb. */
148         list_move(&deva->power.entry, &devb->power.entry);
149 }
150
151 /**
152  * device_pm_move_last - Move device to end of the PM core's list of devices.
153  * @dev: Device to move in dpm_list.
154  */
155 void device_pm_move_last(struct device *dev)
156 {
157         pr_debug("PM: Moving %s:%s to end of list\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         list_move_tail(&dev->power.entry, &dpm_list);
160 }
161
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164         ktime_t calltime = ktime_set(0, 0);
165
166         if (pm_print_times_enabled) {
167                 pr_info("calling  %s+ @ %i, parent: %s\n",
168                         dev_name(dev), task_pid_nr(current),
169                         dev->parent ? dev_name(dev->parent) : "none");
170                 calltime = ktime_get();
171         }
172
173         return calltime;
174 }
175
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177                                   int error)
178 {
179         ktime_t delta, rettime;
180
181         if (pm_print_times_enabled) {
182                 rettime = ktime_get();
183                 delta = ktime_sub(rettime, calltime);
184                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
186         }
187 }
188
189 /**
190  * dpm_wait - Wait for a PM operation to complete.
191  * @dev: Device to wait for.
192  * @async: If unset, wait only if the device's power.async_suspend flag is set.
193  */
194 static void dpm_wait(struct device *dev, bool async)
195 {
196         if (!dev)
197                 return;
198
199         if (async || (pm_async_enabled && dev->power.async_suspend))
200                 wait_for_completion(&dev->power.completion);
201 }
202
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205         dpm_wait(dev, *((bool *)async_ptr));
206         return 0;
207 }
208
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211        device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213
214 /**
215  * pm_op - Return the PM operation appropriate for given PM event.
216  * @ops: PM operations to choose from.
217  * @state: PM transition of the system being carried out.
218  */
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 {
221         switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223         case PM_EVENT_SUSPEND:
224                 return ops->suspend;
225         case PM_EVENT_RESUME:
226                 return ops->resume;
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229         case PM_EVENT_FREEZE:
230         case PM_EVENT_QUIESCE:
231                 return ops->freeze;
232         case PM_EVENT_HIBERNATE:
233                 return ops->poweroff;
234         case PM_EVENT_THAW:
235         case PM_EVENT_RECOVER:
236                 return ops->thaw;
237                 break;
238         case PM_EVENT_RESTORE:
239                 return ops->restore;
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
241         }
242
243         return NULL;
244 }
245
246 /**
247  * pm_late_early_op - Return the PM operation appropriate for given PM event.
248  * @ops: PM operations to choose from.
249  * @state: PM transition of the system being carried out.
250  *
251  * Runtime PM is disabled for @dev while this function is being executed.
252  */
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
254                                       pm_message_t state)
255 {
256         switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258         case PM_EVENT_SUSPEND:
259                 return ops->suspend_late;
260         case PM_EVENT_RESUME:
261                 return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264         case PM_EVENT_FREEZE:
265         case PM_EVENT_QUIESCE:
266                 return ops->freeze_late;
267         case PM_EVENT_HIBERNATE:
268                 return ops->poweroff_late;
269         case PM_EVENT_THAW:
270         case PM_EVENT_RECOVER:
271                 return ops->thaw_early;
272         case PM_EVENT_RESTORE:
273                 return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_noirq_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * The driver of @dev will not receive interrupts while this function is being
286  * executed.
287  */
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_noirq;
294         case PM_EVENT_RESUME:
295                 return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_noirq;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_noirq;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_noirq;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 static char *pm_verb(int event)
315 {
316         switch (event) {
317         case PM_EVENT_SUSPEND:
318                 return "suspend";
319         case PM_EVENT_RESUME:
320                 return "resume";
321         case PM_EVENT_FREEZE:
322                 return "freeze";
323         case PM_EVENT_QUIESCE:
324                 return "quiesce";
325         case PM_EVENT_HIBERNATE:
326                 return "hibernate";
327         case PM_EVENT_THAW:
328                 return "thaw";
329         case PM_EVENT_RESTORE:
330                 return "restore";
331         case PM_EVENT_RECOVER:
332                 return "recover";
333         default:
334                 return "(unknown PM event)";
335         }
336 }
337
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
339 {
340         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342                 ", may wakeup" : "");
343 }
344
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
346                         int error)
347 {
348         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349                 dev_name(dev), pm_verb(state.event), info, error);
350 }
351
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
353 {
354         ktime_t calltime;
355         u64 usecs64;
356         int usecs;
357
358         calltime = ktime_get();
359         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360         do_div(usecs64, NSEC_PER_USEC);
361         usecs = usecs64;
362         if (usecs == 0)
363                 usecs = 1;
364         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365                 info ?: "", info ? " " : "", pm_verb(state.event),
366                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
367 }
368
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370                             pm_message_t state, char *info)
371 {
372         ktime_t calltime;
373         int error;
374
375         if (!cb)
376                 return 0;
377
378         calltime = initcall_debug_start(dev);
379
380         pm_dev_dbg(dev, state, info);
381         error = cb(dev);
382         suspend_report_result(cb, error);
383
384         initcall_debug_report(dev, calltime, error);
385
386         return error;
387 }
388
389 /*------------------------- Resume routines -------------------------*/
390
391 /**
392  * device_resume_noirq - Execute an "early resume" callback for given device.
393  * @dev: Device to handle.
394  * @state: PM transition of the system being carried out.
395  *
396  * The driver of @dev will not receive interrupts while this function is being
397  * executed.
398  */
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
400 {
401         pm_callback_t callback = NULL;
402         char *info = NULL;
403         int error = 0;
404
405         TRACE_DEVICE(dev);
406         TRACE_RESUME(0);
407
408         if (dev->power.syscore)
409                 goto Out;
410
411         if (dev->pm_domain) {
412                 info = "noirq power domain ";
413                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414         } else if (dev->type && dev->type->pm) {
415                 info = "noirq type ";
416                 callback = pm_noirq_op(dev->type->pm, state);
417         } else if (dev->class && dev->class->pm) {
418                 info = "noirq class ";
419                 callback = pm_noirq_op(dev->class->pm, state);
420         } else if (dev->bus && dev->bus->pm) {
421                 info = "noirq bus ";
422                 callback = pm_noirq_op(dev->bus->pm, state);
423         }
424
425         if (!callback && dev->driver && dev->driver->pm) {
426                 info = "noirq driver ";
427                 callback = pm_noirq_op(dev->driver->pm, state);
428         }
429
430         error = dpm_run_callback(callback, dev, state, info);
431
432  Out:
433         TRACE_RESUME(error);
434         return error;
435 }
436
437 /**
438  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439  * @state: PM transition of the system being carried out.
440  *
441  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442  * enable device drivers to receive interrupts.
443  */
444 static void dpm_resume_noirq(pm_message_t state)
445 {
446         ktime_t starttime = ktime_get();
447
448         mutex_lock(&dpm_list_mtx);
449         while (!list_empty(&dpm_noirq_list)) {
450                 struct device *dev = to_device(dpm_noirq_list.next);
451                 int error;
452
453                 get_device(dev);
454                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
455                 mutex_unlock(&dpm_list_mtx);
456
457                 error = device_resume_noirq(dev, state);
458                 if (error) {
459                         suspend_stats.failed_resume_noirq++;
460                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461                         dpm_save_failed_dev(dev_name(dev));
462                         pm_dev_err(dev, state, " noirq", error);
463                 }
464
465                 mutex_lock(&dpm_list_mtx);
466                 put_device(dev);
467         }
468         mutex_unlock(&dpm_list_mtx);
469         dpm_show_time(starttime, state, "noirq");
470         resume_device_irqs();
471         cpuidle_resume();
472 }
473
474 /**
475  * device_resume_early - Execute an "early resume" callback for given device.
476  * @dev: Device to handle.
477  * @state: PM transition of the system being carried out.
478  *
479  * Runtime PM is disabled for @dev while this function is being executed.
480  */
481 static int device_resume_early(struct device *dev, pm_message_t state)
482 {
483         pm_callback_t callback = NULL;
484         char *info = NULL;
485         int error = 0;
486
487         TRACE_DEVICE(dev);
488         TRACE_RESUME(0);
489
490         if (dev->power.syscore)
491                 goto Out;
492
493         if (dev->pm_domain) {
494                 info = "early power domain ";
495                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
496         } else if (dev->type && dev->type->pm) {
497                 info = "early type ";
498                 callback = pm_late_early_op(dev->type->pm, state);
499         } else if (dev->class && dev->class->pm) {
500                 info = "early class ";
501                 callback = pm_late_early_op(dev->class->pm, state);
502         } else if (dev->bus && dev->bus->pm) {
503                 info = "early bus ";
504                 callback = pm_late_early_op(dev->bus->pm, state);
505         }
506
507         if (!callback && dev->driver && dev->driver->pm) {
508                 info = "early driver ";
509                 callback = pm_late_early_op(dev->driver->pm, state);
510         }
511
512         error = dpm_run_callback(callback, dev, state, info);
513
514  Out:
515         TRACE_RESUME(error);
516         return error;
517 }
518
519 /**
520  * dpm_resume_early - Execute "early resume" callbacks for all devices.
521  * @state: PM transition of the system being carried out.
522  */
523 static void dpm_resume_early(pm_message_t state)
524 {
525         ktime_t starttime = ktime_get();
526
527         mutex_lock(&dpm_list_mtx);
528         while (!list_empty(&dpm_late_early_list)) {
529                 struct device *dev = to_device(dpm_late_early_list.next);
530                 int error;
531
532                 get_device(dev);
533                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
534                 mutex_unlock(&dpm_list_mtx);
535
536                 error = device_resume_early(dev, state);
537                 if (error) {
538                         suspend_stats.failed_resume_early++;
539                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
540                         dpm_save_failed_dev(dev_name(dev));
541                         pm_dev_err(dev, state, " early", error);
542                 }
543
544                 mutex_lock(&dpm_list_mtx);
545                 put_device(dev);
546         }
547         mutex_unlock(&dpm_list_mtx);
548         dpm_show_time(starttime, state, "early");
549 }
550
551 /**
552  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
553  * @state: PM transition of the system being carried out.
554  */
555 void dpm_resume_start(pm_message_t state)
556 {
557         dpm_resume_noirq(state);
558         dpm_resume_early(state);
559 }
560 EXPORT_SYMBOL_GPL(dpm_resume_start);
561
562 /**
563  * device_resume - Execute "resume" callbacks for given device.
564  * @dev: Device to handle.
565  * @state: PM transition of the system being carried out.
566  * @async: If true, the device is being resumed asynchronously.
567  */
568 static int device_resume(struct device *dev, pm_message_t state, bool async)
569 {
570         pm_callback_t callback = NULL;
571         char *info = NULL;
572         int error = 0;
573
574         TRACE_DEVICE(dev);
575         TRACE_RESUME(0);
576
577         if (dev->power.syscore)
578                 goto Complete;
579
580         dpm_wait(dev->parent, async);
581         device_lock(dev);
582
583         /*
584          * This is a fib.  But we'll allow new children to be added below
585          * a resumed device, even if the device hasn't been completed yet.
586          */
587         dev->power.is_prepared = false;
588
589         if (!dev->power.is_suspended)
590                 goto Unlock;
591
592         pm_runtime_enable(dev);
593
594         if (dev->pm_domain) {
595                 info = "power domain ";
596                 callback = pm_op(&dev->pm_domain->ops, state);
597                 goto Driver;
598         }
599
600         if (dev->type && dev->type->pm) {
601                 info = "type ";
602                 callback = pm_op(dev->type->pm, state);
603                 goto Driver;
604         }
605
606         if (dev->class) {
607                 if (dev->class->pm) {
608                         info = "class ";
609                         callback = pm_op(dev->class->pm, state);
610                         goto Driver;
611                 } else if (dev->class->resume) {
612                         info = "legacy class ";
613                         callback = dev->class->resume;
614                         goto End;
615                 }
616         }
617
618         if (dev->bus) {
619                 if (dev->bus->pm) {
620                         info = "bus ";
621                         callback = pm_op(dev->bus->pm, state);
622                 } else if (dev->bus->resume) {
623                         info = "legacy bus ";
624                         callback = dev->bus->resume;
625                         goto End;
626                 }
627         }
628
629  Driver:
630         if (!callback && dev->driver && dev->driver->pm) {
631                 info = "driver ";
632                 callback = pm_op(dev->driver->pm, state);
633         }
634
635  End:
636         error = dpm_run_callback(callback, dev, state, info);
637         dev->power.is_suspended = false;
638
639  Unlock:
640         device_unlock(dev);
641
642  Complete:
643         complete_all(&dev->power.completion);
644
645         TRACE_RESUME(error);
646
647         return error;
648 }
649
650 static void async_resume(void *data, async_cookie_t cookie)
651 {
652         struct device *dev = (struct device *)data;
653         int error;
654
655         error = device_resume(dev, pm_transition, true);
656         if (error)
657                 pm_dev_err(dev, pm_transition, " async", error);
658         put_device(dev);
659 }
660
661 static bool is_async(struct device *dev)
662 {
663         return dev->power.async_suspend && pm_async_enabled
664                 && !pm_trace_is_enabled();
665 }
666
667 /**
668  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
669  * @state: PM transition of the system being carried out.
670  *
671  * Execute the appropriate "resume" callback for all devices whose status
672  * indicates that they are suspended.
673  */
674 void dpm_resume(pm_message_t state)
675 {
676         struct device *dev;
677         ktime_t starttime = ktime_get();
678
679         might_sleep();
680
681         mutex_lock(&dpm_list_mtx);
682         pm_transition = state;
683         async_error = 0;
684
685         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686                 INIT_COMPLETION(dev->power.completion);
687                 if (is_async(dev)) {
688                         get_device(dev);
689                         async_schedule(async_resume, dev);
690                 }
691         }
692
693         while (!list_empty(&dpm_suspended_list)) {
694                 dev = to_device(dpm_suspended_list.next);
695                 get_device(dev);
696                 if (!is_async(dev)) {
697                         int error;
698
699                         mutex_unlock(&dpm_list_mtx);
700
701                         error = device_resume(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume++;
704                                 dpm_save_failed_step(SUSPEND_RESUME);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, "", error);
707                         }
708
709                         mutex_lock(&dpm_list_mtx);
710                 }
711                 if (!list_empty(&dev->power.entry))
712                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
713                 put_device(dev);
714         }
715         mutex_unlock(&dpm_list_mtx);
716         async_synchronize_full();
717         dpm_show_time(starttime, state, NULL);
718 }
719
720 /**
721  * device_complete - Complete a PM transition for given device.
722  * @dev: Device to handle.
723  * @state: PM transition of the system being carried out.
724  */
725 static void device_complete(struct device *dev, pm_message_t state)
726 {
727         void (*callback)(struct device *) = NULL;
728         char *info = NULL;
729
730         if (dev->power.syscore)
731                 return;
732
733         device_lock(dev);
734
735         if (dev->pm_domain) {
736                 info = "completing power domain ";
737                 callback = dev->pm_domain->ops.complete;
738         } else if (dev->type && dev->type->pm) {
739                 info = "completing type ";
740                 callback = dev->type->pm->complete;
741         } else if (dev->class && dev->class->pm) {
742                 info = "completing class ";
743                 callback = dev->class->pm->complete;
744         } else if (dev->bus && dev->bus->pm) {
745                 info = "completing bus ";
746                 callback = dev->bus->pm->complete;
747         }
748
749         if (!callback && dev->driver && dev->driver->pm) {
750                 info = "completing driver ";
751                 callback = dev->driver->pm->complete;
752         }
753
754         if (callback) {
755                 pm_dev_dbg(dev, state, info);
756                 callback(dev);
757         }
758
759         device_unlock(dev);
760
761         pm_runtime_put_sync(dev);
762 }
763
764 /**
765  * dpm_complete - Complete a PM transition for all non-sysdev devices.
766  * @state: PM transition of the system being carried out.
767  *
768  * Execute the ->complete() callbacks for all devices whose PM status is not
769  * DPM_ON (this allows new devices to be registered).
770  */
771 void dpm_complete(pm_message_t state)
772 {
773         struct list_head list;
774
775         might_sleep();
776
777         INIT_LIST_HEAD(&list);
778         mutex_lock(&dpm_list_mtx);
779         while (!list_empty(&dpm_prepared_list)) {
780                 struct device *dev = to_device(dpm_prepared_list.prev);
781
782                 get_device(dev);
783                 dev->power.is_prepared = false;
784                 list_move(&dev->power.entry, &list);
785                 mutex_unlock(&dpm_list_mtx);
786
787                 device_complete(dev, state);
788
789                 mutex_lock(&dpm_list_mtx);
790                 put_device(dev);
791         }
792         list_splice(&list, &dpm_list);
793         mutex_unlock(&dpm_list_mtx);
794 }
795
796 /**
797  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
798  * @state: PM transition of the system being carried out.
799  *
800  * Execute "resume" callbacks for all devices and complete the PM transition of
801  * the system.
802  */
803 void dpm_resume_end(pm_message_t state)
804 {
805         dpm_resume(state);
806         dpm_complete(state);
807 }
808 EXPORT_SYMBOL_GPL(dpm_resume_end);
809
810
811 /*------------------------- Suspend routines -------------------------*/
812
813 /**
814  * resume_event - Return a "resume" message for given "suspend" sleep state.
815  * @sleep_state: PM message representing a sleep state.
816  *
817  * Return a PM message representing the resume event corresponding to given
818  * sleep state.
819  */
820 static pm_message_t resume_event(pm_message_t sleep_state)
821 {
822         switch (sleep_state.event) {
823         case PM_EVENT_SUSPEND:
824                 return PMSG_RESUME;
825         case PM_EVENT_FREEZE:
826         case PM_EVENT_QUIESCE:
827                 return PMSG_RECOVER;
828         case PM_EVENT_HIBERNATE:
829                 return PMSG_RESTORE;
830         }
831         return PMSG_ON;
832 }
833
834 /**
835  * device_suspend_noirq - Execute a "late suspend" callback for given device.
836  * @dev: Device to handle.
837  * @state: PM transition of the system being carried out.
838  *
839  * The driver of @dev will not receive interrupts while this function is being
840  * executed.
841  */
842 static int device_suspend_noirq(struct device *dev, pm_message_t state)
843 {
844         pm_callback_t callback = NULL;
845         char *info = NULL;
846
847         if (dev->power.syscore)
848                 return 0;
849
850         if (dev->pm_domain) {
851                 info = "noirq power domain ";
852                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
853         } else if (dev->type && dev->type->pm) {
854                 info = "noirq type ";
855                 callback = pm_noirq_op(dev->type->pm, state);
856         } else if (dev->class && dev->class->pm) {
857                 info = "noirq class ";
858                 callback = pm_noirq_op(dev->class->pm, state);
859         } else if (dev->bus && dev->bus->pm) {
860                 info = "noirq bus ";
861                 callback = pm_noirq_op(dev->bus->pm, state);
862         }
863
864         if (!callback && dev->driver && dev->driver->pm) {
865                 info = "noirq driver ";
866                 callback = pm_noirq_op(dev->driver->pm, state);
867         }
868
869         return dpm_run_callback(callback, dev, state, info);
870 }
871
872 /**
873  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
874  * @state: PM transition of the system being carried out.
875  *
876  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
877  * handlers for all non-sysdev devices.
878  */
879 static int dpm_suspend_noirq(pm_message_t state)
880 {
881         ktime_t starttime = ktime_get();
882         int error = 0;
883
884         cpuidle_pause();
885         suspend_device_irqs();
886         mutex_lock(&dpm_list_mtx);
887         while (!list_empty(&dpm_late_early_list)) {
888                 struct device *dev = to_device(dpm_late_early_list.prev);
889
890                 get_device(dev);
891                 mutex_unlock(&dpm_list_mtx);
892
893                 error = device_suspend_noirq(dev, state);
894
895                 mutex_lock(&dpm_list_mtx);
896                 if (error) {
897                         pm_dev_err(dev, state, " noirq", error);
898                         suspend_stats.failed_suspend_noirq++;
899                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
900                         dpm_save_failed_dev(dev_name(dev));
901                         put_device(dev);
902                         break;
903                 }
904                 if (!list_empty(&dev->power.entry))
905                         list_move(&dev->power.entry, &dpm_noirq_list);
906                 put_device(dev);
907
908                 if (pm_wakeup_pending()) {
909                         error = -EBUSY;
910                         break;
911                 }
912         }
913         mutex_unlock(&dpm_list_mtx);
914         if (error)
915                 dpm_resume_noirq(resume_event(state));
916         else
917                 dpm_show_time(starttime, state, "noirq");
918         return error;
919 }
920
921 /**
922  * device_suspend_late - Execute a "late suspend" callback for given device.
923  * @dev: Device to handle.
924  * @state: PM transition of the system being carried out.
925  *
926  * Runtime PM is disabled for @dev while this function is being executed.
927  */
928 static int device_suspend_late(struct device *dev, pm_message_t state)
929 {
930         pm_callback_t callback = NULL;
931         char *info = NULL;
932
933         if (dev->power.syscore)
934                 return 0;
935
936         if (dev->pm_domain) {
937                 info = "late power domain ";
938                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
939         } else if (dev->type && dev->type->pm) {
940                 info = "late type ";
941                 callback = pm_late_early_op(dev->type->pm, state);
942         } else if (dev->class && dev->class->pm) {
943                 info = "late class ";
944                 callback = pm_late_early_op(dev->class->pm, state);
945         } else if (dev->bus && dev->bus->pm) {
946                 info = "late bus ";
947                 callback = pm_late_early_op(dev->bus->pm, state);
948         }
949
950         if (!callback && dev->driver && dev->driver->pm) {
951                 info = "late driver ";
952                 callback = pm_late_early_op(dev->driver->pm, state);
953         }
954
955         return dpm_run_callback(callback, dev, state, info);
956 }
957
958 /**
959  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
960  * @state: PM transition of the system being carried out.
961  */
962 static int dpm_suspend_late(pm_message_t state)
963 {
964         ktime_t starttime = ktime_get();
965         int error = 0;
966
967         mutex_lock(&dpm_list_mtx);
968         while (!list_empty(&dpm_suspended_list)) {
969                 struct device *dev = to_device(dpm_suspended_list.prev);
970
971                 get_device(dev);
972                 mutex_unlock(&dpm_list_mtx);
973
974                 error = device_suspend_late(dev, state);
975
976                 mutex_lock(&dpm_list_mtx);
977                 if (error) {
978                         pm_dev_err(dev, state, " late", error);
979                         suspend_stats.failed_suspend_late++;
980                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
981                         dpm_save_failed_dev(dev_name(dev));
982                         put_device(dev);
983                         break;
984                 }
985                 if (!list_empty(&dev->power.entry))
986                         list_move(&dev->power.entry, &dpm_late_early_list);
987                 put_device(dev);
988
989                 if (pm_wakeup_pending()) {
990                         error = -EBUSY;
991                         break;
992                 }
993         }
994         mutex_unlock(&dpm_list_mtx);
995         if (error)
996                 dpm_resume_early(resume_event(state));
997         else
998                 dpm_show_time(starttime, state, "late");
999
1000         return error;
1001 }
1002
1003 /**
1004  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1005  * @state: PM transition of the system being carried out.
1006  */
1007 int dpm_suspend_end(pm_message_t state)
1008 {
1009         int error = dpm_suspend_late(state);
1010         if (error)
1011                 return error;
1012
1013         error = dpm_suspend_noirq(state);
1014         if (error) {
1015                 dpm_resume_early(resume_event(state));
1016                 return error;
1017         }
1018
1019         return 0;
1020 }
1021 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1022
1023 /**
1024  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1025  * @dev: Device to suspend.
1026  * @state: PM transition of the system being carried out.
1027  * @cb: Suspend callback to execute.
1028  */
1029 static int legacy_suspend(struct device *dev, pm_message_t state,
1030                           int (*cb)(struct device *dev, pm_message_t state))
1031 {
1032         int error;
1033         ktime_t calltime;
1034
1035         calltime = initcall_debug_start(dev);
1036
1037         error = cb(dev, state);
1038         suspend_report_result(cb, error);
1039
1040         initcall_debug_report(dev, calltime, error);
1041
1042         return error;
1043 }
1044
1045 /**
1046  * device_suspend - Execute "suspend" callbacks for given device.
1047  * @dev: Device to handle.
1048  * @state: PM transition of the system being carried out.
1049  * @async: If true, the device is being suspended asynchronously.
1050  */
1051 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052 {
1053         pm_callback_t callback = NULL;
1054         char *info = NULL;
1055         int error = 0;
1056
1057         dpm_wait_for_children(dev, async);
1058
1059         if (async_error)
1060                 goto Complete;
1061
1062         /*
1063          * If a device configured to wake up the system from sleep states
1064          * has been suspended at run time and there's a resume request pending
1065          * for it, this is equivalent to the device signaling wakeup, so the
1066          * system suspend operation should be aborted.
1067          */
1068         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1069                 pm_wakeup_event(dev, 0);
1070
1071         if (pm_wakeup_pending()) {
1072                 async_error = -EBUSY;
1073                 goto Complete;
1074         }
1075
1076         if (dev->power.syscore)
1077                 goto Complete;
1078
1079         device_lock(dev);
1080
1081         if (dev->pm_domain) {
1082                 info = "power domain ";
1083                 callback = pm_op(&dev->pm_domain->ops, state);
1084                 goto Run;
1085         }
1086
1087         if (dev->type && dev->type->pm) {
1088                 info = "type ";
1089                 callback = pm_op(dev->type->pm, state);
1090                 goto Run;
1091         }
1092
1093         if (dev->class) {
1094                 if (dev->class->pm) {
1095                         info = "class ";
1096                         callback = pm_op(dev->class->pm, state);
1097                         goto Run;
1098                 } else if (dev->class->suspend) {
1099                         pm_dev_dbg(dev, state, "legacy class ");
1100                         error = legacy_suspend(dev, state, dev->class->suspend);
1101                         goto End;
1102                 }
1103         }
1104
1105         if (dev->bus) {
1106                 if (dev->bus->pm) {
1107                         info = "bus ";
1108                         callback = pm_op(dev->bus->pm, state);
1109                 } else if (dev->bus->suspend) {
1110                         pm_dev_dbg(dev, state, "legacy bus ");
1111                         error = legacy_suspend(dev, state, dev->bus->suspend);
1112                         goto End;
1113                 }
1114         }
1115
1116  Run:
1117         if (!callback && dev->driver && dev->driver->pm) {
1118                 info = "driver ";
1119                 callback = pm_op(dev->driver->pm, state);
1120         }
1121
1122         error = dpm_run_callback(callback, dev, state, info);
1123
1124  End:
1125         if (!error) {
1126                 dev->power.is_suspended = true;
1127                 if (dev->power.wakeup_path
1128                     && dev->parent && !dev->parent->power.ignore_children)
1129                         dev->parent->power.wakeup_path = true;
1130         }
1131
1132         device_unlock(dev);
1133
1134  Complete:
1135         complete_all(&dev->power.completion);
1136
1137         if (error)
1138                 async_error = error;
1139         else if (dev->power.is_suspended)
1140                 __pm_runtime_disable(dev, false);
1141
1142         return error;
1143 }
1144
1145 static void async_suspend(void *data, async_cookie_t cookie)
1146 {
1147         struct device *dev = (struct device *)data;
1148         int error;
1149
1150         error = __device_suspend(dev, pm_transition, true);
1151         if (error) {
1152                 dpm_save_failed_dev(dev_name(dev));
1153                 pm_dev_err(dev, pm_transition, " async", error);
1154         }
1155
1156         put_device(dev);
1157 }
1158
1159 static int device_suspend(struct device *dev)
1160 {
1161         INIT_COMPLETION(dev->power.completion);
1162
1163         if (pm_async_enabled && dev->power.async_suspend) {
1164                 get_device(dev);
1165                 async_schedule(async_suspend, dev);
1166                 return 0;
1167         }
1168
1169         return __device_suspend(dev, pm_transition, false);
1170 }
1171
1172 /**
1173  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1174  * @state: PM transition of the system being carried out.
1175  */
1176 int dpm_suspend(pm_message_t state)
1177 {
1178         ktime_t starttime = ktime_get();
1179         int error = 0;
1180
1181         might_sleep();
1182
1183         mutex_lock(&dpm_list_mtx);
1184         pm_transition = state;
1185         async_error = 0;
1186         while (!list_empty(&dpm_prepared_list)) {
1187                 struct device *dev = to_device(dpm_prepared_list.prev);
1188
1189                 get_device(dev);
1190                 mutex_unlock(&dpm_list_mtx);
1191
1192                 error = device_suspend(dev);
1193
1194                 mutex_lock(&dpm_list_mtx);
1195                 if (error) {
1196                         pm_dev_err(dev, state, "", error);
1197                         dpm_save_failed_dev(dev_name(dev));
1198                         put_device(dev);
1199                         break;
1200                 }
1201                 if (!list_empty(&dev->power.entry))
1202                         list_move(&dev->power.entry, &dpm_suspended_list);
1203                 put_device(dev);
1204                 if (async_error)
1205                         break;
1206         }
1207         mutex_unlock(&dpm_list_mtx);
1208         async_synchronize_full();
1209         if (!error)
1210                 error = async_error;
1211         if (error) {
1212                 suspend_stats.failed_suspend++;
1213                 dpm_save_failed_step(SUSPEND_SUSPEND);
1214         } else
1215                 dpm_show_time(starttime, state, NULL);
1216         return error;
1217 }
1218
1219 /**
1220  * device_prepare - Prepare a device for system power transition.
1221  * @dev: Device to handle.
1222  * @state: PM transition of the system being carried out.
1223  *
1224  * Execute the ->prepare() callback(s) for given device.  No new children of the
1225  * device may be registered after this function has returned.
1226  */
1227 static int device_prepare(struct device *dev, pm_message_t state)
1228 {
1229         int (*callback)(struct device *) = NULL;
1230         char *info = NULL;
1231         int error = 0;
1232
1233         if (dev->power.syscore)
1234                 return 0;
1235
1236         /*
1237          * If a device's parent goes into runtime suspend at the wrong time,
1238          * it won't be possible to resume the device.  To prevent this we
1239          * block runtime suspend here, during the prepare phase, and allow
1240          * it again during the complete phase.
1241          */
1242         pm_runtime_get_noresume(dev);
1243
1244         device_lock(dev);
1245
1246         dev->power.wakeup_path = device_may_wakeup(dev);
1247
1248         if (dev->pm_domain) {
1249                 info = "preparing power domain ";
1250                 callback = dev->pm_domain->ops.prepare;
1251         } else if (dev->type && dev->type->pm) {
1252                 info = "preparing type ";
1253                 callback = dev->type->pm->prepare;
1254         } else if (dev->class && dev->class->pm) {
1255                 info = "preparing class ";
1256                 callback = dev->class->pm->prepare;
1257         } else if (dev->bus && dev->bus->pm) {
1258                 info = "preparing bus ";
1259                 callback = dev->bus->pm->prepare;
1260         }
1261
1262         if (!callback && dev->driver && dev->driver->pm) {
1263                 info = "preparing driver ";
1264                 callback = dev->driver->pm->prepare;
1265         }
1266
1267         if (callback) {
1268                 error = callback(dev);
1269                 suspend_report_result(callback, error);
1270         }
1271
1272         device_unlock(dev);
1273
1274         return error;
1275 }
1276
1277 /**
1278  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1279  * @state: PM transition of the system being carried out.
1280  *
1281  * Execute the ->prepare() callback(s) for all devices.
1282  */
1283 int dpm_prepare(pm_message_t state)
1284 {
1285         int error = 0;
1286
1287         might_sleep();
1288
1289         mutex_lock(&dpm_list_mtx);
1290         while (!list_empty(&dpm_list)) {
1291                 struct device *dev = to_device(dpm_list.next);
1292
1293                 get_device(dev);
1294                 mutex_unlock(&dpm_list_mtx);
1295
1296                 error = device_prepare(dev, state);
1297
1298                 mutex_lock(&dpm_list_mtx);
1299                 if (error) {
1300                         if (error == -EAGAIN) {
1301                                 put_device(dev);
1302                                 error = 0;
1303                                 continue;
1304                         }
1305                         printk(KERN_INFO "PM: Device %s not prepared "
1306                                 "for power transition: code %d\n",
1307                                 dev_name(dev), error);
1308                         put_device(dev);
1309                         break;
1310                 }
1311                 dev->power.is_prepared = true;
1312                 if (!list_empty(&dev->power.entry))
1313                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1314                 put_device(dev);
1315         }
1316         mutex_unlock(&dpm_list_mtx);
1317         return error;
1318 }
1319
1320 /**
1321  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1322  * @state: PM transition of the system being carried out.
1323  *
1324  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1325  * callbacks for them.
1326  */
1327 int dpm_suspend_start(pm_message_t state)
1328 {
1329         int error;
1330
1331         error = dpm_prepare(state);
1332         if (error) {
1333                 suspend_stats.failed_prepare++;
1334                 dpm_save_failed_step(SUSPEND_PREPARE);
1335         } else
1336                 error = dpm_suspend(state);
1337         return error;
1338 }
1339 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1340
1341 void __suspend_report_result(const char *function, void *fn, int ret)
1342 {
1343         if (ret)
1344                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1345 }
1346 EXPORT_SYMBOL_GPL(__suspend_report_result);
1347
1348 /**
1349  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1350  * @dev: Device to wait for.
1351  * @subordinate: Device that needs to wait for @dev.
1352  */
1353 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1354 {
1355         dpm_wait(dev, subordinate->power.async_suspend);
1356         return async_error;
1357 }
1358 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1359
1360 /**
1361  * dpm_for_each_dev - device iterator.
1362  * @data: data for the callback.
1363  * @fn: function to be called for each device.
1364  *
1365  * Iterate over devices in dpm_list, and call @fn for each device,
1366  * passing it @data.
1367  */
1368 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1369 {
1370         struct device *dev;
1371
1372         if (!fn)
1373                 return;
1374
1375         device_pm_lock();
1376         list_for_each_entry(dev, &dpm_list, power.entry)
1377                 fn(dev, data);
1378         device_pm_unlock();
1379 }
1380 EXPORT_SYMBOL_GPL(dpm_for_each_dev);