]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/main.c
Merge remote-tracking branch 'driver-core/driver-core-next'
[karo-tx-linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 static LIST_HEAD(dpm_prepared_list);
49 static LIST_HEAD(dpm_suspended_list);
50 static LIST_HEAD(dpm_late_early_list);
51 static LIST_HEAD(dpm_noirq_list);
52
53 struct suspend_stats suspend_stats;
54 static DEFINE_MUTEX(dpm_list_mtx);
55 static pm_message_t pm_transition;
56
57 static int async_error;
58
59 /**
60  * device_pm_sleep_init - Initialize system suspend-related device fields.
61  * @dev: Device object being initialized.
62  */
63 void device_pm_sleep_init(struct device *dev)
64 {
65         dev->power.is_prepared = false;
66         dev->power.is_suspended = false;
67         init_completion(&dev->power.completion);
68         complete_all(&dev->power.completion);
69         dev->power.wakeup = NULL;
70         INIT_LIST_HEAD(&dev->power.entry);
71 }
72
73 /**
74  * device_pm_lock - Lock the list of active devices used by the PM core.
75  */
76 void device_pm_lock(void)
77 {
78         mutex_lock(&dpm_list_mtx);
79 }
80
81 /**
82  * device_pm_unlock - Unlock the list of active devices used by the PM core.
83  */
84 void device_pm_unlock(void)
85 {
86         mutex_unlock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_add - Add a device to the PM core's list of active devices.
91  * @dev: Device to add to the list.
92  */
93 void device_pm_add(struct device *dev)
94 {
95         pr_debug("PM: Adding info for %s:%s\n",
96                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97         mutex_lock(&dpm_list_mtx);
98         if (dev->parent && dev->parent->power.is_prepared)
99                 dev_warn(dev, "parent %s should not be sleeping\n",
100                         dev_name(dev->parent));
101         list_add_tail(&dev->power.entry, &dpm_list);
102         dev_pm_qos_constraints_init(dev);
103         mutex_unlock(&dpm_list_mtx);
104 }
105
106 /**
107  * device_pm_remove - Remove a device from the PM core's list of active devices.
108  * @dev: Device to be removed from the list.
109  */
110 void device_pm_remove(struct device *dev)
111 {
112         pr_debug("PM: Removing info for %s:%s\n",
113                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114         complete_all(&dev->power.completion);
115         mutex_lock(&dpm_list_mtx);
116         dev_pm_qos_constraints_destroy(dev);
117         list_del_init(&dev->power.entry);
118         mutex_unlock(&dpm_list_mtx);
119         device_wakeup_disable(dev);
120         pm_runtime_remove(dev);
121 }
122
123 /**
124  * device_pm_move_before - Move device in the PM core's list of active devices.
125  * @deva: Device to move in dpm_list.
126  * @devb: Device @deva should come before.
127  */
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130         pr_debug("PM: Moving %s:%s before %s:%s\n",
131                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133         /* Delete deva from dpm_list and reinsert before devb. */
134         list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136
137 /**
138  * device_pm_move_after - Move device in the PM core's list of active devices.
139  * @deva: Device to move in dpm_list.
140  * @devb: Device @deva should come after.
141  */
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144         pr_debug("PM: Moving %s:%s after %s:%s\n",
145                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147         /* Delete deva from dpm_list and reinsert after devb. */
148         list_move(&deva->power.entry, &devb->power.entry);
149 }
150
151 /**
152  * device_pm_move_last - Move device to end of the PM core's list of devices.
153  * @dev: Device to move in dpm_list.
154  */
155 void device_pm_move_last(struct device *dev)
156 {
157         pr_debug("PM: Moving %s:%s to end of list\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         list_move_tail(&dev->power.entry, &dpm_list);
160 }
161
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164         ktime_t calltime = ktime_set(0, 0);
165
166         if (pm_print_times_enabled) {
167                 pr_info("calling  %s+ @ %i, parent: %s\n",
168                         dev_name(dev), task_pid_nr(current),
169                         dev->parent ? dev_name(dev->parent) : "none");
170                 calltime = ktime_get();
171         }
172
173         return calltime;
174 }
175
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177                                   int error)
178 {
179         ktime_t delta, rettime;
180
181         if (pm_print_times_enabled) {
182                 rettime = ktime_get();
183                 delta = ktime_sub(rettime, calltime);
184                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
186         }
187 }
188
189 /**
190  * dpm_wait - Wait for a PM operation to complete.
191  * @dev: Device to wait for.
192  * @async: If unset, wait only if the device's power.async_suspend flag is set.
193  */
194 static void dpm_wait(struct device *dev, bool async)
195 {
196         if (!dev)
197                 return;
198
199         if (async || (pm_async_enabled && dev->power.async_suspend))
200                 wait_for_completion(&dev->power.completion);
201 }
202
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205         dpm_wait(dev, *((bool *)async_ptr));
206         return 0;
207 }
208
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211        device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213
214 /**
215  * pm_op - Return the PM operation appropriate for given PM event.
216  * @ops: PM operations to choose from.
217  * @state: PM transition of the system being carried out.
218  */
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 {
221         switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223         case PM_EVENT_SUSPEND:
224                 return ops->suspend;
225         case PM_EVENT_RESUME:
226                 return ops->resume;
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229         case PM_EVENT_FREEZE:
230         case PM_EVENT_QUIESCE:
231                 return ops->freeze;
232         case PM_EVENT_HIBERNATE:
233                 return ops->poweroff;
234         case PM_EVENT_THAW:
235         case PM_EVENT_RECOVER:
236                 return ops->thaw;
237                 break;
238         case PM_EVENT_RESTORE:
239                 return ops->restore;
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
241         }
242
243         return NULL;
244 }
245
246 /**
247  * pm_late_early_op - Return the PM operation appropriate for given PM event.
248  * @ops: PM operations to choose from.
249  * @state: PM transition of the system being carried out.
250  *
251  * Runtime PM is disabled for @dev while this function is being executed.
252  */
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
254                                       pm_message_t state)
255 {
256         switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258         case PM_EVENT_SUSPEND:
259                 return ops->suspend_late;
260         case PM_EVENT_RESUME:
261                 return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264         case PM_EVENT_FREEZE:
265         case PM_EVENT_QUIESCE:
266                 return ops->freeze_late;
267         case PM_EVENT_HIBERNATE:
268                 return ops->poweroff_late;
269         case PM_EVENT_THAW:
270         case PM_EVENT_RECOVER:
271                 return ops->thaw_early;
272         case PM_EVENT_RESTORE:
273                 return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_noirq_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * The driver of @dev will not receive interrupts while this function is being
286  * executed.
287  */
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_noirq;
294         case PM_EVENT_RESUME:
295                 return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_noirq;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_noirq;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_noirq;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 static char *pm_verb(int event)
315 {
316         switch (event) {
317         case PM_EVENT_SUSPEND:
318                 return "suspend";
319         case PM_EVENT_RESUME:
320                 return "resume";
321         case PM_EVENT_FREEZE:
322                 return "freeze";
323         case PM_EVENT_QUIESCE:
324                 return "quiesce";
325         case PM_EVENT_HIBERNATE:
326                 return "hibernate";
327         case PM_EVENT_THAW:
328                 return "thaw";
329         case PM_EVENT_RESTORE:
330                 return "restore";
331         case PM_EVENT_RECOVER:
332                 return "recover";
333         default:
334                 return "(unknown PM event)";
335         }
336 }
337
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
339 {
340         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342                 ", may wakeup" : "");
343 }
344
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
346                         int error)
347 {
348         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349                 dev_name(dev), pm_verb(state.event), info, error);
350 }
351
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
353 {
354         ktime_t calltime;
355         u64 usecs64;
356         int usecs;
357
358         calltime = ktime_get();
359         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360         do_div(usecs64, NSEC_PER_USEC);
361         usecs = usecs64;
362         if (usecs == 0)
363                 usecs = 1;
364         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365                 info ?: "", info ? " " : "", pm_verb(state.event),
366                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
367 }
368
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370                             pm_message_t state, char *info)
371 {
372         ktime_t calltime;
373         int error;
374
375         if (!cb)
376                 return 0;
377
378         calltime = initcall_debug_start(dev);
379
380         pm_dev_dbg(dev, state, info);
381         error = cb(dev);
382         suspend_report_result(cb, error);
383
384         initcall_debug_report(dev, calltime, error);
385
386         return error;
387 }
388
389 /*------------------------- Resume routines -------------------------*/
390
391 /**
392  * device_resume_noirq - Execute an "early resume" callback for given device.
393  * @dev: Device to handle.
394  * @state: PM transition of the system being carried out.
395  *
396  * The driver of @dev will not receive interrupts while this function is being
397  * executed.
398  */
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
400 {
401         pm_callback_t callback = NULL;
402         char *info = NULL;
403         int error = 0;
404
405         TRACE_DEVICE(dev);
406         TRACE_RESUME(0);
407
408         if (dev->power.syscore)
409                 goto Out;
410
411         if (dev->pm_domain) {
412                 info = "noirq power domain ";
413                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414         } else if (dev->type && dev->type->pm) {
415                 info = "noirq type ";
416                 callback = pm_noirq_op(dev->type->pm, state);
417         } else if (dev->class && dev->class->pm) {
418                 info = "noirq class ";
419                 callback = pm_noirq_op(dev->class->pm, state);
420         } else if (dev->bus && dev->bus->pm) {
421                 info = "noirq bus ";
422                 callback = pm_noirq_op(dev->bus->pm, state);
423         }
424
425         if (!callback && dev->driver && dev->driver->pm) {
426                 info = "noirq driver ";
427                 callback = pm_noirq_op(dev->driver->pm, state);
428         }
429
430         error = dpm_run_callback(callback, dev, state, info);
431
432  Out:
433         TRACE_RESUME(error);
434         return error;
435 }
436
437 /**
438  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439  * @state: PM transition of the system being carried out.
440  *
441  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442  * enable device drivers to receive interrupts.
443  */
444 static void dpm_resume_noirq(pm_message_t state)
445 {
446         ktime_t starttime = ktime_get();
447
448         mutex_lock(&dpm_list_mtx);
449         while (!list_empty(&dpm_noirq_list)) {
450                 struct device *dev = to_device(dpm_noirq_list.next);
451                 int error;
452
453                 get_device(dev);
454                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
455                 mutex_unlock(&dpm_list_mtx);
456
457                 error = device_resume_noirq(dev, state);
458                 if (error) {
459                         suspend_stats.failed_resume_noirq++;
460                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461                         dpm_save_failed_dev(dev_name(dev));
462                         pm_dev_err(dev, state, " noirq", error);
463                 }
464
465                 mutex_lock(&dpm_list_mtx);
466                 put_device(dev);
467         }
468         mutex_unlock(&dpm_list_mtx);
469         dpm_show_time(starttime, state, "noirq");
470         resume_device_irqs();
471         cpuidle_resume();
472 }
473
474 /**
475  * device_resume_early - Execute an "early resume" callback for given device.
476  * @dev: Device to handle.
477  * @state: PM transition of the system being carried out.
478  *
479  * Runtime PM is disabled for @dev while this function is being executed.
480  */
481 static int device_resume_early(struct device *dev, pm_message_t state)
482 {
483         pm_callback_t callback = NULL;
484         char *info = NULL;
485         int error = 0;
486
487         TRACE_DEVICE(dev);
488         TRACE_RESUME(0);
489
490         if (dev->power.syscore)
491                 goto Out;
492
493         if (dev->pm_domain) {
494                 info = "early power domain ";
495                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
496         } else if (dev->type && dev->type->pm) {
497                 info = "early type ";
498                 callback = pm_late_early_op(dev->type->pm, state);
499         } else if (dev->class && dev->class->pm) {
500                 info = "early class ";
501                 callback = pm_late_early_op(dev->class->pm, state);
502         } else if (dev->bus && dev->bus->pm) {
503                 info = "early bus ";
504                 callback = pm_late_early_op(dev->bus->pm, state);
505         }
506
507         if (!callback && dev->driver && dev->driver->pm) {
508                 info = "early driver ";
509                 callback = pm_late_early_op(dev->driver->pm, state);
510         }
511
512         error = dpm_run_callback(callback, dev, state, info);
513
514  Out:
515         TRACE_RESUME(error);
516         return error;
517 }
518
519 /**
520  * dpm_resume_early - Execute "early resume" callbacks for all devices.
521  * @state: PM transition of the system being carried out.
522  */
523 static void dpm_resume_early(pm_message_t state)
524 {
525         ktime_t starttime = ktime_get();
526
527         mutex_lock(&dpm_list_mtx);
528         while (!list_empty(&dpm_late_early_list)) {
529                 struct device *dev = to_device(dpm_late_early_list.next);
530                 int error;
531
532                 get_device(dev);
533                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
534                 mutex_unlock(&dpm_list_mtx);
535
536                 error = device_resume_early(dev, state);
537                 if (error) {
538                         suspend_stats.failed_resume_early++;
539                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
540                         dpm_save_failed_dev(dev_name(dev));
541                         pm_dev_err(dev, state, " early", error);
542                 }
543
544                 mutex_lock(&dpm_list_mtx);
545                 put_device(dev);
546         }
547         mutex_unlock(&dpm_list_mtx);
548         dpm_show_time(starttime, state, "early");
549 }
550
551 /**
552  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
553  * @state: PM transition of the system being carried out.
554  */
555 void dpm_resume_start(pm_message_t state)
556 {
557         dpm_resume_noirq(state);
558         dpm_resume_early(state);
559 }
560 EXPORT_SYMBOL_GPL(dpm_resume_start);
561
562 /**
563  * device_resume - Execute "resume" callbacks for given device.
564  * @dev: Device to handle.
565  * @state: PM transition of the system being carried out.
566  * @async: If true, the device is being resumed asynchronously.
567  */
568 static int device_resume(struct device *dev, pm_message_t state, bool async)
569 {
570         pm_callback_t callback = NULL;
571         char *info = NULL;
572         int error = 0;
573         bool put = false;
574
575         TRACE_DEVICE(dev);
576         TRACE_RESUME(0);
577
578         if (dev->power.syscore)
579                 goto Complete;
580
581         dpm_wait(dev->parent, async);
582         device_lock(dev);
583
584         /*
585          * This is a fib.  But we'll allow new children to be added below
586          * a resumed device, even if the device hasn't been completed yet.
587          */
588         dev->power.is_prepared = false;
589
590         if (!dev->power.is_suspended)
591                 goto Unlock;
592
593         pm_runtime_enable(dev);
594         put = true;
595
596         if (dev->pm_domain) {
597                 info = "power domain ";
598                 callback = pm_op(&dev->pm_domain->ops, state);
599                 goto Driver;
600         }
601
602         if (dev->type && dev->type->pm) {
603                 info = "type ";
604                 callback = pm_op(dev->type->pm, state);
605                 goto Driver;
606         }
607
608         if (dev->class) {
609                 if (dev->class->pm) {
610                         info = "class ";
611                         callback = pm_op(dev->class->pm, state);
612                         goto Driver;
613                 } else if (dev->class->resume) {
614                         info = "legacy class ";
615                         callback = dev->class->resume;
616                         goto End;
617                 }
618         }
619
620         if (dev->bus) {
621                 if (dev->bus->pm) {
622                         info = "bus ";
623                         callback = pm_op(dev->bus->pm, state);
624                 } else if (dev->bus->resume) {
625                         info = "legacy bus ";
626                         callback = dev->bus->resume;
627                         goto End;
628                 }
629         }
630
631  Driver:
632         if (!callback && dev->driver && dev->driver->pm) {
633                 info = "driver ";
634                 callback = pm_op(dev->driver->pm, state);
635         }
636
637  End:
638         error = dpm_run_callback(callback, dev, state, info);
639         dev->power.is_suspended = false;
640
641  Unlock:
642         device_unlock(dev);
643
644  Complete:
645         complete_all(&dev->power.completion);
646
647         TRACE_RESUME(error);
648
649         if (put)
650                 pm_runtime_put_sync(dev);
651
652         return error;
653 }
654
655 static void async_resume(void *data, async_cookie_t cookie)
656 {
657         struct device *dev = (struct device *)data;
658         int error;
659
660         error = device_resume(dev, pm_transition, true);
661         if (error)
662                 pm_dev_err(dev, pm_transition, " async", error);
663         put_device(dev);
664 }
665
666 static bool is_async(struct device *dev)
667 {
668         return dev->power.async_suspend && pm_async_enabled
669                 && !pm_trace_is_enabled();
670 }
671
672 /**
673  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
674  * @state: PM transition of the system being carried out.
675  *
676  * Execute the appropriate "resume" callback for all devices whose status
677  * indicates that they are suspended.
678  */
679 void dpm_resume(pm_message_t state)
680 {
681         struct device *dev;
682         ktime_t starttime = ktime_get();
683
684         might_sleep();
685
686         mutex_lock(&dpm_list_mtx);
687         pm_transition = state;
688         async_error = 0;
689
690         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
691                 INIT_COMPLETION(dev->power.completion);
692                 if (is_async(dev)) {
693                         get_device(dev);
694                         async_schedule(async_resume, dev);
695                 }
696         }
697
698         while (!list_empty(&dpm_suspended_list)) {
699                 dev = to_device(dpm_suspended_list.next);
700                 get_device(dev);
701                 if (!is_async(dev)) {
702                         int error;
703
704                         mutex_unlock(&dpm_list_mtx);
705
706                         error = device_resume(dev, state, false);
707                         if (error) {
708                                 suspend_stats.failed_resume++;
709                                 dpm_save_failed_step(SUSPEND_RESUME);
710                                 dpm_save_failed_dev(dev_name(dev));
711                                 pm_dev_err(dev, state, "", error);
712                         }
713
714                         mutex_lock(&dpm_list_mtx);
715                 }
716                 if (!list_empty(&dev->power.entry))
717                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
718                 put_device(dev);
719         }
720         mutex_unlock(&dpm_list_mtx);
721         async_synchronize_full();
722         dpm_show_time(starttime, state, NULL);
723 }
724
725 /**
726  * device_complete - Complete a PM transition for given device.
727  * @dev: Device to handle.
728  * @state: PM transition of the system being carried out.
729  */
730 static void device_complete(struct device *dev, pm_message_t state)
731 {
732         void (*callback)(struct device *) = NULL;
733         char *info = NULL;
734
735         if (dev->power.syscore)
736                 return;
737
738         device_lock(dev);
739
740         if (dev->pm_domain) {
741                 info = "completing power domain ";
742                 callback = dev->pm_domain->ops.complete;
743         } else if (dev->type && dev->type->pm) {
744                 info = "completing type ";
745                 callback = dev->type->pm->complete;
746         } else if (dev->class && dev->class->pm) {
747                 info = "completing class ";
748                 callback = dev->class->pm->complete;
749         } else if (dev->bus && dev->bus->pm) {
750                 info = "completing bus ";
751                 callback = dev->bus->pm->complete;
752         }
753
754         if (!callback && dev->driver && dev->driver->pm) {
755                 info = "completing driver ";
756                 callback = dev->driver->pm->complete;
757         }
758
759         if (callback) {
760                 pm_dev_dbg(dev, state, info);
761                 callback(dev);
762         }
763
764         device_unlock(dev);
765 }
766
767 /**
768  * dpm_complete - Complete a PM transition for all non-sysdev devices.
769  * @state: PM transition of the system being carried out.
770  *
771  * Execute the ->complete() callbacks for all devices whose PM status is not
772  * DPM_ON (this allows new devices to be registered).
773  */
774 void dpm_complete(pm_message_t state)
775 {
776         struct list_head list;
777
778         might_sleep();
779
780         INIT_LIST_HEAD(&list);
781         mutex_lock(&dpm_list_mtx);
782         while (!list_empty(&dpm_prepared_list)) {
783                 struct device *dev = to_device(dpm_prepared_list.prev);
784
785                 get_device(dev);
786                 dev->power.is_prepared = false;
787                 list_move(&dev->power.entry, &list);
788                 mutex_unlock(&dpm_list_mtx);
789
790                 device_complete(dev, state);
791
792                 mutex_lock(&dpm_list_mtx);
793                 put_device(dev);
794         }
795         list_splice(&list, &dpm_list);
796         mutex_unlock(&dpm_list_mtx);
797 }
798
799 /**
800  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
801  * @state: PM transition of the system being carried out.
802  *
803  * Execute "resume" callbacks for all devices and complete the PM transition of
804  * the system.
805  */
806 void dpm_resume_end(pm_message_t state)
807 {
808         dpm_resume(state);
809         dpm_complete(state);
810 }
811 EXPORT_SYMBOL_GPL(dpm_resume_end);
812
813
814 /*------------------------- Suspend routines -------------------------*/
815
816 /**
817  * resume_event - Return a "resume" message for given "suspend" sleep state.
818  * @sleep_state: PM message representing a sleep state.
819  *
820  * Return a PM message representing the resume event corresponding to given
821  * sleep state.
822  */
823 static pm_message_t resume_event(pm_message_t sleep_state)
824 {
825         switch (sleep_state.event) {
826         case PM_EVENT_SUSPEND:
827                 return PMSG_RESUME;
828         case PM_EVENT_FREEZE:
829         case PM_EVENT_QUIESCE:
830                 return PMSG_RECOVER;
831         case PM_EVENT_HIBERNATE:
832                 return PMSG_RESTORE;
833         }
834         return PMSG_ON;
835 }
836
837 /**
838  * device_suspend_noirq - Execute a "late suspend" callback for given device.
839  * @dev: Device to handle.
840  * @state: PM transition of the system being carried out.
841  *
842  * The driver of @dev will not receive interrupts while this function is being
843  * executed.
844  */
845 static int device_suspend_noirq(struct device *dev, pm_message_t state)
846 {
847         pm_callback_t callback = NULL;
848         char *info = NULL;
849
850         if (dev->power.syscore)
851                 return 0;
852
853         if (dev->pm_domain) {
854                 info = "noirq power domain ";
855                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
856         } else if (dev->type && dev->type->pm) {
857                 info = "noirq type ";
858                 callback = pm_noirq_op(dev->type->pm, state);
859         } else if (dev->class && dev->class->pm) {
860                 info = "noirq class ";
861                 callback = pm_noirq_op(dev->class->pm, state);
862         } else if (dev->bus && dev->bus->pm) {
863                 info = "noirq bus ";
864                 callback = pm_noirq_op(dev->bus->pm, state);
865         }
866
867         if (!callback && dev->driver && dev->driver->pm) {
868                 info = "noirq driver ";
869                 callback = pm_noirq_op(dev->driver->pm, state);
870         }
871
872         return dpm_run_callback(callback, dev, state, info);
873 }
874
875 /**
876  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
877  * @state: PM transition of the system being carried out.
878  *
879  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
880  * handlers for all non-sysdev devices.
881  */
882 static int dpm_suspend_noirq(pm_message_t state)
883 {
884         ktime_t starttime = ktime_get();
885         int error = 0;
886
887         cpuidle_pause();
888         suspend_device_irqs();
889         mutex_lock(&dpm_list_mtx);
890         while (!list_empty(&dpm_late_early_list)) {
891                 struct device *dev = to_device(dpm_late_early_list.prev);
892
893                 get_device(dev);
894                 mutex_unlock(&dpm_list_mtx);
895
896                 error = device_suspend_noirq(dev, state);
897
898                 mutex_lock(&dpm_list_mtx);
899                 if (error) {
900                         pm_dev_err(dev, state, " noirq", error);
901                         suspend_stats.failed_suspend_noirq++;
902                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
903                         dpm_save_failed_dev(dev_name(dev));
904                         put_device(dev);
905                         break;
906                 }
907                 if (!list_empty(&dev->power.entry))
908                         list_move(&dev->power.entry, &dpm_noirq_list);
909                 put_device(dev);
910
911                 if (pm_wakeup_pending()) {
912                         error = -EBUSY;
913                         break;
914                 }
915         }
916         mutex_unlock(&dpm_list_mtx);
917         if (error)
918                 dpm_resume_noirq(resume_event(state));
919         else
920                 dpm_show_time(starttime, state, "noirq");
921         return error;
922 }
923
924 /**
925  * device_suspend_late - Execute a "late suspend" callback for given device.
926  * @dev: Device to handle.
927  * @state: PM transition of the system being carried out.
928  *
929  * Runtime PM is disabled for @dev while this function is being executed.
930  */
931 static int device_suspend_late(struct device *dev, pm_message_t state)
932 {
933         pm_callback_t callback = NULL;
934         char *info = NULL;
935
936         if (dev->power.syscore)
937                 return 0;
938
939         if (dev->pm_domain) {
940                 info = "late power domain ";
941                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
942         } else if (dev->type && dev->type->pm) {
943                 info = "late type ";
944                 callback = pm_late_early_op(dev->type->pm, state);
945         } else if (dev->class && dev->class->pm) {
946                 info = "late class ";
947                 callback = pm_late_early_op(dev->class->pm, state);
948         } else if (dev->bus && dev->bus->pm) {
949                 info = "late bus ";
950                 callback = pm_late_early_op(dev->bus->pm, state);
951         }
952
953         if (!callback && dev->driver && dev->driver->pm) {
954                 info = "late driver ";
955                 callback = pm_late_early_op(dev->driver->pm, state);
956         }
957
958         return dpm_run_callback(callback, dev, state, info);
959 }
960
961 /**
962  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
963  * @state: PM transition of the system being carried out.
964  */
965 static int dpm_suspend_late(pm_message_t state)
966 {
967         ktime_t starttime = ktime_get();
968         int error = 0;
969
970         mutex_lock(&dpm_list_mtx);
971         while (!list_empty(&dpm_suspended_list)) {
972                 struct device *dev = to_device(dpm_suspended_list.prev);
973
974                 get_device(dev);
975                 mutex_unlock(&dpm_list_mtx);
976
977                 error = device_suspend_late(dev, state);
978
979                 mutex_lock(&dpm_list_mtx);
980                 if (error) {
981                         pm_dev_err(dev, state, " late", error);
982                         suspend_stats.failed_suspend_late++;
983                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
984                         dpm_save_failed_dev(dev_name(dev));
985                         put_device(dev);
986                         break;
987                 }
988                 if (!list_empty(&dev->power.entry))
989                         list_move(&dev->power.entry, &dpm_late_early_list);
990                 put_device(dev);
991
992                 if (pm_wakeup_pending()) {
993                         error = -EBUSY;
994                         break;
995                 }
996         }
997         mutex_unlock(&dpm_list_mtx);
998         if (error)
999                 dpm_resume_early(resume_event(state));
1000         else
1001                 dpm_show_time(starttime, state, "late");
1002
1003         return error;
1004 }
1005
1006 /**
1007  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1008  * @state: PM transition of the system being carried out.
1009  */
1010 int dpm_suspend_end(pm_message_t state)
1011 {
1012         int error = dpm_suspend_late(state);
1013         if (error)
1014                 return error;
1015
1016         error = dpm_suspend_noirq(state);
1017         if (error) {
1018                 dpm_resume_early(state);
1019                 return error;
1020         }
1021
1022         return 0;
1023 }
1024 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1025
1026 /**
1027  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1028  * @dev: Device to suspend.
1029  * @state: PM transition of the system being carried out.
1030  * @cb: Suspend callback to execute.
1031  */
1032 static int legacy_suspend(struct device *dev, pm_message_t state,
1033                           int (*cb)(struct device *dev, pm_message_t state))
1034 {
1035         int error;
1036         ktime_t calltime;
1037
1038         calltime = initcall_debug_start(dev);
1039
1040         error = cb(dev, state);
1041         suspend_report_result(cb, error);
1042
1043         initcall_debug_report(dev, calltime, error);
1044
1045         return error;
1046 }
1047
1048 /**
1049  * device_suspend - Execute "suspend" callbacks for given device.
1050  * @dev: Device to handle.
1051  * @state: PM transition of the system being carried out.
1052  * @async: If true, the device is being suspended asynchronously.
1053  */
1054 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1055 {
1056         pm_callback_t callback = NULL;
1057         char *info = NULL;
1058         int error = 0;
1059
1060         dpm_wait_for_children(dev, async);
1061
1062         if (async_error)
1063                 goto Complete;
1064
1065         pm_runtime_get_noresume(dev);
1066         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1067                 pm_wakeup_event(dev, 0);
1068
1069         if (pm_wakeup_pending()) {
1070                 pm_runtime_put_sync(dev);
1071                 async_error = -EBUSY;
1072                 goto Complete;
1073         }
1074
1075         if (dev->power.syscore)
1076                 goto Complete;
1077
1078         device_lock(dev);
1079
1080         if (dev->pm_domain) {
1081                 info = "power domain ";
1082                 callback = pm_op(&dev->pm_domain->ops, state);
1083                 goto Run;
1084         }
1085
1086         if (dev->type && dev->type->pm) {
1087                 info = "type ";
1088                 callback = pm_op(dev->type->pm, state);
1089                 goto Run;
1090         }
1091
1092         if (dev->class) {
1093                 if (dev->class->pm) {
1094                         info = "class ";
1095                         callback = pm_op(dev->class->pm, state);
1096                         goto Run;
1097                 } else if (dev->class->suspend) {
1098                         pm_dev_dbg(dev, state, "legacy class ");
1099                         error = legacy_suspend(dev, state, dev->class->suspend);
1100                         goto End;
1101                 }
1102         }
1103
1104         if (dev->bus) {
1105                 if (dev->bus->pm) {
1106                         info = "bus ";
1107                         callback = pm_op(dev->bus->pm, state);
1108                 } else if (dev->bus->suspend) {
1109                         pm_dev_dbg(dev, state, "legacy bus ");
1110                         error = legacy_suspend(dev, state, dev->bus->suspend);
1111                         goto End;
1112                 }
1113         }
1114
1115  Run:
1116         if (!callback && dev->driver && dev->driver->pm) {
1117                 info = "driver ";
1118                 callback = pm_op(dev->driver->pm, state);
1119         }
1120
1121         error = dpm_run_callback(callback, dev, state, info);
1122
1123  End:
1124         if (!error) {
1125                 dev->power.is_suspended = true;
1126                 if (dev->power.wakeup_path
1127                     && dev->parent && !dev->parent->power.ignore_children)
1128                         dev->parent->power.wakeup_path = true;
1129         }
1130
1131         device_unlock(dev);
1132
1133  Complete:
1134         complete_all(&dev->power.completion);
1135
1136         if (error) {
1137                 pm_runtime_put_sync(dev);
1138                 async_error = error;
1139         } else if (dev->power.is_suspended) {
1140                 __pm_runtime_disable(dev, false);
1141         }
1142
1143         return error;
1144 }
1145
1146 static void async_suspend(void *data, async_cookie_t cookie)
1147 {
1148         struct device *dev = (struct device *)data;
1149         int error;
1150
1151         error = __device_suspend(dev, pm_transition, true);
1152         if (error) {
1153                 dpm_save_failed_dev(dev_name(dev));
1154                 pm_dev_err(dev, pm_transition, " async", error);
1155         }
1156
1157         put_device(dev);
1158 }
1159
1160 static int device_suspend(struct device *dev)
1161 {
1162         INIT_COMPLETION(dev->power.completion);
1163
1164         if (pm_async_enabled && dev->power.async_suspend) {
1165                 get_device(dev);
1166                 async_schedule(async_suspend, dev);
1167                 return 0;
1168         }
1169
1170         return __device_suspend(dev, pm_transition, false);
1171 }
1172
1173 /**
1174  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1175  * @state: PM transition of the system being carried out.
1176  */
1177 int dpm_suspend(pm_message_t state)
1178 {
1179         ktime_t starttime = ktime_get();
1180         int error = 0;
1181
1182         might_sleep();
1183
1184         mutex_lock(&dpm_list_mtx);
1185         pm_transition = state;
1186         async_error = 0;
1187         while (!list_empty(&dpm_prepared_list)) {
1188                 struct device *dev = to_device(dpm_prepared_list.prev);
1189
1190                 get_device(dev);
1191                 mutex_unlock(&dpm_list_mtx);
1192
1193                 error = device_suspend(dev);
1194
1195                 mutex_lock(&dpm_list_mtx);
1196                 if (error) {
1197                         pm_dev_err(dev, state, "", error);
1198                         dpm_save_failed_dev(dev_name(dev));
1199                         put_device(dev);
1200                         break;
1201                 }
1202                 if (!list_empty(&dev->power.entry))
1203                         list_move(&dev->power.entry, &dpm_suspended_list);
1204                 put_device(dev);
1205                 if (async_error)
1206                         break;
1207         }
1208         mutex_unlock(&dpm_list_mtx);
1209         async_synchronize_full();
1210         if (!error)
1211                 error = async_error;
1212         if (error) {
1213                 suspend_stats.failed_suspend++;
1214                 dpm_save_failed_step(SUSPEND_SUSPEND);
1215         } else
1216                 dpm_show_time(starttime, state, NULL);
1217         return error;
1218 }
1219
1220 /**
1221  * device_prepare - Prepare a device for system power transition.
1222  * @dev: Device to handle.
1223  * @state: PM transition of the system being carried out.
1224  *
1225  * Execute the ->prepare() callback(s) for given device.  No new children of the
1226  * device may be registered after this function has returned.
1227  */
1228 static int device_prepare(struct device *dev, pm_message_t state)
1229 {
1230         int (*callback)(struct device *) = NULL;
1231         char *info = NULL;
1232         int error = 0;
1233
1234         if (dev->power.syscore)
1235                 return 0;
1236
1237         device_lock(dev);
1238
1239         dev->power.wakeup_path = device_may_wakeup(dev);
1240
1241         if (dev->pm_domain) {
1242                 info = "preparing power domain ";
1243                 callback = dev->pm_domain->ops.prepare;
1244         } else if (dev->type && dev->type->pm) {
1245                 info = "preparing type ";
1246                 callback = dev->type->pm->prepare;
1247         } else if (dev->class && dev->class->pm) {
1248                 info = "preparing class ";
1249                 callback = dev->class->pm->prepare;
1250         } else if (dev->bus && dev->bus->pm) {
1251                 info = "preparing bus ";
1252                 callback = dev->bus->pm->prepare;
1253         }
1254
1255         if (!callback && dev->driver && dev->driver->pm) {
1256                 info = "preparing driver ";
1257                 callback = dev->driver->pm->prepare;
1258         }
1259
1260         if (callback) {
1261                 error = callback(dev);
1262                 suspend_report_result(callback, error);
1263         }
1264
1265         device_unlock(dev);
1266
1267         return error;
1268 }
1269
1270 /**
1271  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1272  * @state: PM transition of the system being carried out.
1273  *
1274  * Execute the ->prepare() callback(s) for all devices.
1275  */
1276 int dpm_prepare(pm_message_t state)
1277 {
1278         int error = 0;
1279
1280         might_sleep();
1281
1282         mutex_lock(&dpm_list_mtx);
1283         while (!list_empty(&dpm_list)) {
1284                 struct device *dev = to_device(dpm_list.next);
1285
1286                 get_device(dev);
1287                 mutex_unlock(&dpm_list_mtx);
1288
1289                 error = device_prepare(dev, state);
1290
1291                 mutex_lock(&dpm_list_mtx);
1292                 if (error) {
1293                         if (error == -EAGAIN) {
1294                                 put_device(dev);
1295                                 error = 0;
1296                                 continue;
1297                         }
1298                         printk(KERN_INFO "PM: Device %s not prepared "
1299                                 "for power transition: code %d\n",
1300                                 dev_name(dev), error);
1301                         put_device(dev);
1302                         break;
1303                 }
1304                 dev->power.is_prepared = true;
1305                 if (!list_empty(&dev->power.entry))
1306                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1307                 put_device(dev);
1308         }
1309         mutex_unlock(&dpm_list_mtx);
1310         return error;
1311 }
1312
1313 /**
1314  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1315  * @state: PM transition of the system being carried out.
1316  *
1317  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1318  * callbacks for them.
1319  */
1320 int dpm_suspend_start(pm_message_t state)
1321 {
1322         int error;
1323
1324         error = dpm_prepare(state);
1325         if (error) {
1326                 suspend_stats.failed_prepare++;
1327                 dpm_save_failed_step(SUSPEND_PREPARE);
1328         } else
1329                 error = dpm_suspend(state);
1330         return error;
1331 }
1332 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1333
1334 void __suspend_report_result(const char *function, void *fn, int ret)
1335 {
1336         if (ret)
1337                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1338 }
1339 EXPORT_SYMBOL_GPL(__suspend_report_result);
1340
1341 /**
1342  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1343  * @dev: Device to wait for.
1344  * @subordinate: Device that needs to wait for @dev.
1345  */
1346 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1347 {
1348         dpm_wait(dev, subordinate->power.async_suspend);
1349         return async_error;
1350 }
1351 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1352
1353 /**
1354  * dpm_for_each_dev - device iterator.
1355  * @data: data for the callback.
1356  * @fn: function to be called for each device.
1357  *
1358  * Iterate over devices in dpm_list, and call @fn for each device,
1359  * passing it @data.
1360  */
1361 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1362 {
1363         struct device *dev;
1364
1365         if (!fn)
1366                 return;
1367
1368         device_pm_lock();
1369         list_for_each_entry(dev, &dpm_list, power.entry)
1370                 fn(dev, data);
1371         device_pm_unlock();
1372 }
1373 EXPORT_SYMBOL_GPL(dpm_for_each_dev);