]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/main.c
Merge commit '6bb27d7349db51b50c40534710fe164ca0d58902' into omap-timer-for-v3.10
[karo-tx-linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 static LIST_HEAD(dpm_prepared_list);
49 static LIST_HEAD(dpm_suspended_list);
50 static LIST_HEAD(dpm_late_early_list);
51 static LIST_HEAD(dpm_noirq_list);
52
53 struct suspend_stats suspend_stats;
54 static DEFINE_MUTEX(dpm_list_mtx);
55 static pm_message_t pm_transition;
56
57 static int async_error;
58
59 /**
60  * device_pm_sleep_init - Initialize system suspend-related device fields.
61  * @dev: Device object being initialized.
62  */
63 void device_pm_sleep_init(struct device *dev)
64 {
65         dev->power.is_prepared = false;
66         dev->power.is_suspended = false;
67         init_completion(&dev->power.completion);
68         complete_all(&dev->power.completion);
69         dev->power.wakeup = NULL;
70         INIT_LIST_HEAD(&dev->power.entry);
71 }
72
73 /**
74  * device_pm_lock - Lock the list of active devices used by the PM core.
75  */
76 void device_pm_lock(void)
77 {
78         mutex_lock(&dpm_list_mtx);
79 }
80
81 /**
82  * device_pm_unlock - Unlock the list of active devices used by the PM core.
83  */
84 void device_pm_unlock(void)
85 {
86         mutex_unlock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_add - Add a device to the PM core's list of active devices.
91  * @dev: Device to add to the list.
92  */
93 void device_pm_add(struct device *dev)
94 {
95         pr_debug("PM: Adding info for %s:%s\n",
96                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97         mutex_lock(&dpm_list_mtx);
98         if (dev->parent && dev->parent->power.is_prepared)
99                 dev_warn(dev, "parent %s should not be sleeping\n",
100                         dev_name(dev->parent));
101         list_add_tail(&dev->power.entry, &dpm_list);
102         dev_pm_qos_constraints_init(dev);
103         mutex_unlock(&dpm_list_mtx);
104 }
105
106 /**
107  * device_pm_remove - Remove a device from the PM core's list of active devices.
108  * @dev: Device to be removed from the list.
109  */
110 void device_pm_remove(struct device *dev)
111 {
112         pr_debug("PM: Removing info for %s:%s\n",
113                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114         complete_all(&dev->power.completion);
115         mutex_lock(&dpm_list_mtx);
116         dev_pm_qos_constraints_destroy(dev);
117         list_del_init(&dev->power.entry);
118         mutex_unlock(&dpm_list_mtx);
119         device_wakeup_disable(dev);
120         pm_runtime_remove(dev);
121 }
122
123 /**
124  * device_pm_move_before - Move device in the PM core's list of active devices.
125  * @deva: Device to move in dpm_list.
126  * @devb: Device @deva should come before.
127  */
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130         pr_debug("PM: Moving %s:%s before %s:%s\n",
131                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133         /* Delete deva from dpm_list and reinsert before devb. */
134         list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136
137 /**
138  * device_pm_move_after - Move device in the PM core's list of active devices.
139  * @deva: Device to move in dpm_list.
140  * @devb: Device @deva should come after.
141  */
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144         pr_debug("PM: Moving %s:%s after %s:%s\n",
145                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147         /* Delete deva from dpm_list and reinsert after devb. */
148         list_move(&deva->power.entry, &devb->power.entry);
149 }
150
151 /**
152  * device_pm_move_last - Move device to end of the PM core's list of devices.
153  * @dev: Device to move in dpm_list.
154  */
155 void device_pm_move_last(struct device *dev)
156 {
157         pr_debug("PM: Moving %s:%s to end of list\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         list_move_tail(&dev->power.entry, &dpm_list);
160 }
161
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164         ktime_t calltime = ktime_set(0, 0);
165
166         if (pm_print_times_enabled) {
167                 pr_info("calling  %s+ @ %i, parent: %s\n",
168                         dev_name(dev), task_pid_nr(current),
169                         dev->parent ? dev_name(dev->parent) : "none");
170                 calltime = ktime_get();
171         }
172
173         return calltime;
174 }
175
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177                                   int error)
178 {
179         ktime_t delta, rettime;
180
181         if (pm_print_times_enabled) {
182                 rettime = ktime_get();
183                 delta = ktime_sub(rettime, calltime);
184                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
186         }
187 }
188
189 /**
190  * dpm_wait - Wait for a PM operation to complete.
191  * @dev: Device to wait for.
192  * @async: If unset, wait only if the device's power.async_suspend flag is set.
193  */
194 static void dpm_wait(struct device *dev, bool async)
195 {
196         if (!dev)
197                 return;
198
199         if (async || (pm_async_enabled && dev->power.async_suspend))
200                 wait_for_completion(&dev->power.completion);
201 }
202
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205         dpm_wait(dev, *((bool *)async_ptr));
206         return 0;
207 }
208
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211        device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213
214 /**
215  * pm_op - Return the PM operation appropriate for given PM event.
216  * @ops: PM operations to choose from.
217  * @state: PM transition of the system being carried out.
218  */
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 {
221         switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223         case PM_EVENT_SUSPEND:
224                 return ops->suspend;
225         case PM_EVENT_RESUME:
226                 return ops->resume;
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229         case PM_EVENT_FREEZE:
230         case PM_EVENT_QUIESCE:
231                 return ops->freeze;
232         case PM_EVENT_HIBERNATE:
233                 return ops->poweroff;
234         case PM_EVENT_THAW:
235         case PM_EVENT_RECOVER:
236                 return ops->thaw;
237                 break;
238         case PM_EVENT_RESTORE:
239                 return ops->restore;
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
241         }
242
243         return NULL;
244 }
245
246 /**
247  * pm_late_early_op - Return the PM operation appropriate for given PM event.
248  * @ops: PM operations to choose from.
249  * @state: PM transition of the system being carried out.
250  *
251  * Runtime PM is disabled for @dev while this function is being executed.
252  */
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
254                                       pm_message_t state)
255 {
256         switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258         case PM_EVENT_SUSPEND:
259                 return ops->suspend_late;
260         case PM_EVENT_RESUME:
261                 return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264         case PM_EVENT_FREEZE:
265         case PM_EVENT_QUIESCE:
266                 return ops->freeze_late;
267         case PM_EVENT_HIBERNATE:
268                 return ops->poweroff_late;
269         case PM_EVENT_THAW:
270         case PM_EVENT_RECOVER:
271                 return ops->thaw_early;
272         case PM_EVENT_RESTORE:
273                 return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_noirq_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * The driver of @dev will not receive interrupts while this function is being
286  * executed.
287  */
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_noirq;
294         case PM_EVENT_RESUME:
295                 return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_noirq;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_noirq;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_noirq;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 static char *pm_verb(int event)
315 {
316         switch (event) {
317         case PM_EVENT_SUSPEND:
318                 return "suspend";
319         case PM_EVENT_RESUME:
320                 return "resume";
321         case PM_EVENT_FREEZE:
322                 return "freeze";
323         case PM_EVENT_QUIESCE:
324                 return "quiesce";
325         case PM_EVENT_HIBERNATE:
326                 return "hibernate";
327         case PM_EVENT_THAW:
328                 return "thaw";
329         case PM_EVENT_RESTORE:
330                 return "restore";
331         case PM_EVENT_RECOVER:
332                 return "recover";
333         default:
334                 return "(unknown PM event)";
335         }
336 }
337
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
339 {
340         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342                 ", may wakeup" : "");
343 }
344
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
346                         int error)
347 {
348         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349                 dev_name(dev), pm_verb(state.event), info, error);
350 }
351
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
353 {
354         ktime_t calltime;
355         u64 usecs64;
356         int usecs;
357
358         calltime = ktime_get();
359         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360         do_div(usecs64, NSEC_PER_USEC);
361         usecs = usecs64;
362         if (usecs == 0)
363                 usecs = 1;
364         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365                 info ?: "", info ? " " : "", pm_verb(state.event),
366                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
367 }
368
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370                             pm_message_t state, char *info)
371 {
372         ktime_t calltime;
373         int error;
374
375         if (!cb)
376                 return 0;
377
378         calltime = initcall_debug_start(dev);
379
380         pm_dev_dbg(dev, state, info);
381         error = cb(dev);
382         suspend_report_result(cb, error);
383
384         initcall_debug_report(dev, calltime, error);
385
386         return error;
387 }
388
389 /*------------------------- Resume routines -------------------------*/
390
391 /**
392  * device_resume_noirq - Execute an "early resume" callback for given device.
393  * @dev: Device to handle.
394  * @state: PM transition of the system being carried out.
395  *
396  * The driver of @dev will not receive interrupts while this function is being
397  * executed.
398  */
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
400 {
401         pm_callback_t callback = NULL;
402         char *info = NULL;
403         int error = 0;
404
405         TRACE_DEVICE(dev);
406         TRACE_RESUME(0);
407
408         if (dev->power.syscore)
409                 goto Out;
410
411         if (dev->pm_domain) {
412                 info = "noirq power domain ";
413                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414         } else if (dev->type && dev->type->pm) {
415                 info = "noirq type ";
416                 callback = pm_noirq_op(dev->type->pm, state);
417         } else if (dev->class && dev->class->pm) {
418                 info = "noirq class ";
419                 callback = pm_noirq_op(dev->class->pm, state);
420         } else if (dev->bus && dev->bus->pm) {
421                 info = "noirq bus ";
422                 callback = pm_noirq_op(dev->bus->pm, state);
423         }
424
425         if (!callback && dev->driver && dev->driver->pm) {
426                 info = "noirq driver ";
427                 callback = pm_noirq_op(dev->driver->pm, state);
428         }
429
430         error = dpm_run_callback(callback, dev, state, info);
431
432  Out:
433         TRACE_RESUME(error);
434         return error;
435 }
436
437 /**
438  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439  * @state: PM transition of the system being carried out.
440  *
441  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442  * enable device drivers to receive interrupts.
443  */
444 static void dpm_resume_noirq(pm_message_t state)
445 {
446         ktime_t starttime = ktime_get();
447
448         mutex_lock(&dpm_list_mtx);
449         while (!list_empty(&dpm_noirq_list)) {
450                 struct device *dev = to_device(dpm_noirq_list.next);
451                 int error;
452
453                 get_device(dev);
454                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
455                 mutex_unlock(&dpm_list_mtx);
456
457                 error = device_resume_noirq(dev, state);
458                 if (error) {
459                         suspend_stats.failed_resume_noirq++;
460                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461                         dpm_save_failed_dev(dev_name(dev));
462                         pm_dev_err(dev, state, " noirq", error);
463                 }
464
465                 mutex_lock(&dpm_list_mtx);
466                 put_device(dev);
467         }
468         mutex_unlock(&dpm_list_mtx);
469         dpm_show_time(starttime, state, "noirq");
470         resume_device_irqs();
471         cpuidle_resume();
472 }
473
474 /**
475  * device_resume_early - Execute an "early resume" callback for given device.
476  * @dev: Device to handle.
477  * @state: PM transition of the system being carried out.
478  *
479  * Runtime PM is disabled for @dev while this function is being executed.
480  */
481 static int device_resume_early(struct device *dev, pm_message_t state)
482 {
483         pm_callback_t callback = NULL;
484         char *info = NULL;
485         int error = 0;
486
487         TRACE_DEVICE(dev);
488         TRACE_RESUME(0);
489
490         if (dev->power.syscore)
491                 goto Out;
492
493         if (dev->pm_domain) {
494                 info = "early power domain ";
495                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
496         } else if (dev->type && dev->type->pm) {
497                 info = "early type ";
498                 callback = pm_late_early_op(dev->type->pm, state);
499         } else if (dev->class && dev->class->pm) {
500                 info = "early class ";
501                 callback = pm_late_early_op(dev->class->pm, state);
502         } else if (dev->bus && dev->bus->pm) {
503                 info = "early bus ";
504                 callback = pm_late_early_op(dev->bus->pm, state);
505         }
506
507         if (!callback && dev->driver && dev->driver->pm) {
508                 info = "early driver ";
509                 callback = pm_late_early_op(dev->driver->pm, state);
510         }
511
512         error = dpm_run_callback(callback, dev, state, info);
513
514  Out:
515         TRACE_RESUME(error);
516
517         pm_runtime_enable(dev);
518         return error;
519 }
520
521 /**
522  * dpm_resume_early - Execute "early resume" callbacks for all devices.
523  * @state: PM transition of the system being carried out.
524  */
525 static void dpm_resume_early(pm_message_t state)
526 {
527         ktime_t starttime = ktime_get();
528
529         mutex_lock(&dpm_list_mtx);
530         while (!list_empty(&dpm_late_early_list)) {
531                 struct device *dev = to_device(dpm_late_early_list.next);
532                 int error;
533
534                 get_device(dev);
535                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
536                 mutex_unlock(&dpm_list_mtx);
537
538                 error = device_resume_early(dev, state);
539                 if (error) {
540                         suspend_stats.failed_resume_early++;
541                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
542                         dpm_save_failed_dev(dev_name(dev));
543                         pm_dev_err(dev, state, " early", error);
544                 }
545
546                 mutex_lock(&dpm_list_mtx);
547                 put_device(dev);
548         }
549         mutex_unlock(&dpm_list_mtx);
550         dpm_show_time(starttime, state, "early");
551 }
552
553 /**
554  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
555  * @state: PM transition of the system being carried out.
556  */
557 void dpm_resume_start(pm_message_t state)
558 {
559         dpm_resume_noirq(state);
560         dpm_resume_early(state);
561 }
562 EXPORT_SYMBOL_GPL(dpm_resume_start);
563
564 /**
565  * device_resume - Execute "resume" callbacks for given device.
566  * @dev: Device to handle.
567  * @state: PM transition of the system being carried out.
568  * @async: If true, the device is being resumed asynchronously.
569  */
570 static int device_resume(struct device *dev, pm_message_t state, bool async)
571 {
572         pm_callback_t callback = NULL;
573         char *info = NULL;
574         int error = 0;
575
576         TRACE_DEVICE(dev);
577         TRACE_RESUME(0);
578
579         if (dev->power.syscore)
580                 goto Complete;
581
582         dpm_wait(dev->parent, async);
583         device_lock(dev);
584
585         /*
586          * This is a fib.  But we'll allow new children to be added below
587          * a resumed device, even if the device hasn't been completed yet.
588          */
589         dev->power.is_prepared = false;
590
591         if (!dev->power.is_suspended)
592                 goto Unlock;
593
594         if (dev->pm_domain) {
595                 info = "power domain ";
596                 callback = pm_op(&dev->pm_domain->ops, state);
597                 goto Driver;
598         }
599
600         if (dev->type && dev->type->pm) {
601                 info = "type ";
602                 callback = pm_op(dev->type->pm, state);
603                 goto Driver;
604         }
605
606         if (dev->class) {
607                 if (dev->class->pm) {
608                         info = "class ";
609                         callback = pm_op(dev->class->pm, state);
610                         goto Driver;
611                 } else if (dev->class->resume) {
612                         info = "legacy class ";
613                         callback = dev->class->resume;
614                         goto End;
615                 }
616         }
617
618         if (dev->bus) {
619                 if (dev->bus->pm) {
620                         info = "bus ";
621                         callback = pm_op(dev->bus->pm, state);
622                 } else if (dev->bus->resume) {
623                         info = "legacy bus ";
624                         callback = dev->bus->resume;
625                         goto End;
626                 }
627         }
628
629  Driver:
630         if (!callback && dev->driver && dev->driver->pm) {
631                 info = "driver ";
632                 callback = pm_op(dev->driver->pm, state);
633         }
634
635  End:
636         error = dpm_run_callback(callback, dev, state, info);
637         dev->power.is_suspended = false;
638
639  Unlock:
640         device_unlock(dev);
641
642  Complete:
643         complete_all(&dev->power.completion);
644
645         TRACE_RESUME(error);
646
647         return error;
648 }
649
650 static void async_resume(void *data, async_cookie_t cookie)
651 {
652         struct device *dev = (struct device *)data;
653         int error;
654
655         error = device_resume(dev, pm_transition, true);
656         if (error)
657                 pm_dev_err(dev, pm_transition, " async", error);
658         put_device(dev);
659 }
660
661 static bool is_async(struct device *dev)
662 {
663         return dev->power.async_suspend && pm_async_enabled
664                 && !pm_trace_is_enabled();
665 }
666
667 /**
668  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
669  * @state: PM transition of the system being carried out.
670  *
671  * Execute the appropriate "resume" callback for all devices whose status
672  * indicates that they are suspended.
673  */
674 void dpm_resume(pm_message_t state)
675 {
676         struct device *dev;
677         ktime_t starttime = ktime_get();
678
679         might_sleep();
680
681         mutex_lock(&dpm_list_mtx);
682         pm_transition = state;
683         async_error = 0;
684
685         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686                 INIT_COMPLETION(dev->power.completion);
687                 if (is_async(dev)) {
688                         get_device(dev);
689                         async_schedule(async_resume, dev);
690                 }
691         }
692
693         while (!list_empty(&dpm_suspended_list)) {
694                 dev = to_device(dpm_suspended_list.next);
695                 get_device(dev);
696                 if (!is_async(dev)) {
697                         int error;
698
699                         mutex_unlock(&dpm_list_mtx);
700
701                         error = device_resume(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume++;
704                                 dpm_save_failed_step(SUSPEND_RESUME);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, "", error);
707                         }
708
709                         mutex_lock(&dpm_list_mtx);
710                 }
711                 if (!list_empty(&dev->power.entry))
712                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
713                 put_device(dev);
714         }
715         mutex_unlock(&dpm_list_mtx);
716         async_synchronize_full();
717         dpm_show_time(starttime, state, NULL);
718 }
719
720 /**
721  * device_complete - Complete a PM transition for given device.
722  * @dev: Device to handle.
723  * @state: PM transition of the system being carried out.
724  */
725 static void device_complete(struct device *dev, pm_message_t state)
726 {
727         void (*callback)(struct device *) = NULL;
728         char *info = NULL;
729
730         if (dev->power.syscore)
731                 return;
732
733         device_lock(dev);
734
735         if (dev->pm_domain) {
736                 info = "completing power domain ";
737                 callback = dev->pm_domain->ops.complete;
738         } else if (dev->type && dev->type->pm) {
739                 info = "completing type ";
740                 callback = dev->type->pm->complete;
741         } else if (dev->class && dev->class->pm) {
742                 info = "completing class ";
743                 callback = dev->class->pm->complete;
744         } else if (dev->bus && dev->bus->pm) {
745                 info = "completing bus ";
746                 callback = dev->bus->pm->complete;
747         }
748
749         if (!callback && dev->driver && dev->driver->pm) {
750                 info = "completing driver ";
751                 callback = dev->driver->pm->complete;
752         }
753
754         if (callback) {
755                 pm_dev_dbg(dev, state, info);
756                 callback(dev);
757         }
758
759         device_unlock(dev);
760
761         pm_runtime_put_sync(dev);
762 }
763
764 /**
765  * dpm_complete - Complete a PM transition for all non-sysdev devices.
766  * @state: PM transition of the system being carried out.
767  *
768  * Execute the ->complete() callbacks for all devices whose PM status is not
769  * DPM_ON (this allows new devices to be registered).
770  */
771 void dpm_complete(pm_message_t state)
772 {
773         struct list_head list;
774
775         might_sleep();
776
777         INIT_LIST_HEAD(&list);
778         mutex_lock(&dpm_list_mtx);
779         while (!list_empty(&dpm_prepared_list)) {
780                 struct device *dev = to_device(dpm_prepared_list.prev);
781
782                 get_device(dev);
783                 dev->power.is_prepared = false;
784                 list_move(&dev->power.entry, &list);
785                 mutex_unlock(&dpm_list_mtx);
786
787                 device_complete(dev, state);
788
789                 mutex_lock(&dpm_list_mtx);
790                 put_device(dev);
791         }
792         list_splice(&list, &dpm_list);
793         mutex_unlock(&dpm_list_mtx);
794 }
795
796 /**
797  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
798  * @state: PM transition of the system being carried out.
799  *
800  * Execute "resume" callbacks for all devices and complete the PM transition of
801  * the system.
802  */
803 void dpm_resume_end(pm_message_t state)
804 {
805         dpm_resume(state);
806         dpm_complete(state);
807 }
808 EXPORT_SYMBOL_GPL(dpm_resume_end);
809
810
811 /*------------------------- Suspend routines -------------------------*/
812
813 /**
814  * resume_event - Return a "resume" message for given "suspend" sleep state.
815  * @sleep_state: PM message representing a sleep state.
816  *
817  * Return a PM message representing the resume event corresponding to given
818  * sleep state.
819  */
820 static pm_message_t resume_event(pm_message_t sleep_state)
821 {
822         switch (sleep_state.event) {
823         case PM_EVENT_SUSPEND:
824                 return PMSG_RESUME;
825         case PM_EVENT_FREEZE:
826         case PM_EVENT_QUIESCE:
827                 return PMSG_RECOVER;
828         case PM_EVENT_HIBERNATE:
829                 return PMSG_RESTORE;
830         }
831         return PMSG_ON;
832 }
833
834 /**
835  * device_suspend_noirq - Execute a "late suspend" callback for given device.
836  * @dev: Device to handle.
837  * @state: PM transition of the system being carried out.
838  *
839  * The driver of @dev will not receive interrupts while this function is being
840  * executed.
841  */
842 static int device_suspend_noirq(struct device *dev, pm_message_t state)
843 {
844         pm_callback_t callback = NULL;
845         char *info = NULL;
846
847         if (dev->power.syscore)
848                 return 0;
849
850         if (dev->pm_domain) {
851                 info = "noirq power domain ";
852                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
853         } else if (dev->type && dev->type->pm) {
854                 info = "noirq type ";
855                 callback = pm_noirq_op(dev->type->pm, state);
856         } else if (dev->class && dev->class->pm) {
857                 info = "noirq class ";
858                 callback = pm_noirq_op(dev->class->pm, state);
859         } else if (dev->bus && dev->bus->pm) {
860                 info = "noirq bus ";
861                 callback = pm_noirq_op(dev->bus->pm, state);
862         }
863
864         if (!callback && dev->driver && dev->driver->pm) {
865                 info = "noirq driver ";
866                 callback = pm_noirq_op(dev->driver->pm, state);
867         }
868
869         return dpm_run_callback(callback, dev, state, info);
870 }
871
872 /**
873  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
874  * @state: PM transition of the system being carried out.
875  *
876  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
877  * handlers for all non-sysdev devices.
878  */
879 static int dpm_suspend_noirq(pm_message_t state)
880 {
881         ktime_t starttime = ktime_get();
882         int error = 0;
883
884         cpuidle_pause();
885         suspend_device_irqs();
886         mutex_lock(&dpm_list_mtx);
887         while (!list_empty(&dpm_late_early_list)) {
888                 struct device *dev = to_device(dpm_late_early_list.prev);
889
890                 get_device(dev);
891                 mutex_unlock(&dpm_list_mtx);
892
893                 error = device_suspend_noirq(dev, state);
894
895                 mutex_lock(&dpm_list_mtx);
896                 if (error) {
897                         pm_dev_err(dev, state, " noirq", error);
898                         suspend_stats.failed_suspend_noirq++;
899                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
900                         dpm_save_failed_dev(dev_name(dev));
901                         put_device(dev);
902                         break;
903                 }
904                 if (!list_empty(&dev->power.entry))
905                         list_move(&dev->power.entry, &dpm_noirq_list);
906                 put_device(dev);
907
908                 if (pm_wakeup_pending()) {
909                         error = -EBUSY;
910                         break;
911                 }
912         }
913         mutex_unlock(&dpm_list_mtx);
914         if (error)
915                 dpm_resume_noirq(resume_event(state));
916         else
917                 dpm_show_time(starttime, state, "noirq");
918         return error;
919 }
920
921 /**
922  * device_suspend_late - Execute a "late suspend" callback for given device.
923  * @dev: Device to handle.
924  * @state: PM transition of the system being carried out.
925  *
926  * Runtime PM is disabled for @dev while this function is being executed.
927  */
928 static int device_suspend_late(struct device *dev, pm_message_t state)
929 {
930         pm_callback_t callback = NULL;
931         char *info = NULL;
932
933         __pm_runtime_disable(dev, false);
934
935         if (dev->power.syscore)
936                 return 0;
937
938         if (dev->pm_domain) {
939                 info = "late power domain ";
940                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
941         } else if (dev->type && dev->type->pm) {
942                 info = "late type ";
943                 callback = pm_late_early_op(dev->type->pm, state);
944         } else if (dev->class && dev->class->pm) {
945                 info = "late class ";
946                 callback = pm_late_early_op(dev->class->pm, state);
947         } else if (dev->bus && dev->bus->pm) {
948                 info = "late bus ";
949                 callback = pm_late_early_op(dev->bus->pm, state);
950         }
951
952         if (!callback && dev->driver && dev->driver->pm) {
953                 info = "late driver ";
954                 callback = pm_late_early_op(dev->driver->pm, state);
955         }
956
957         return dpm_run_callback(callback, dev, state, info);
958 }
959
960 /**
961  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
962  * @state: PM transition of the system being carried out.
963  */
964 static int dpm_suspend_late(pm_message_t state)
965 {
966         ktime_t starttime = ktime_get();
967         int error = 0;
968
969         mutex_lock(&dpm_list_mtx);
970         while (!list_empty(&dpm_suspended_list)) {
971                 struct device *dev = to_device(dpm_suspended_list.prev);
972
973                 get_device(dev);
974                 mutex_unlock(&dpm_list_mtx);
975
976                 error = device_suspend_late(dev, state);
977
978                 mutex_lock(&dpm_list_mtx);
979                 if (error) {
980                         pm_dev_err(dev, state, " late", error);
981                         suspend_stats.failed_suspend_late++;
982                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
983                         dpm_save_failed_dev(dev_name(dev));
984                         put_device(dev);
985                         break;
986                 }
987                 if (!list_empty(&dev->power.entry))
988                         list_move(&dev->power.entry, &dpm_late_early_list);
989                 put_device(dev);
990
991                 if (pm_wakeup_pending()) {
992                         error = -EBUSY;
993                         break;
994                 }
995         }
996         mutex_unlock(&dpm_list_mtx);
997         if (error)
998                 dpm_resume_early(resume_event(state));
999         else
1000                 dpm_show_time(starttime, state, "late");
1001
1002         return error;
1003 }
1004
1005 /**
1006  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1007  * @state: PM transition of the system being carried out.
1008  */
1009 int dpm_suspend_end(pm_message_t state)
1010 {
1011         int error = dpm_suspend_late(state);
1012         if (error)
1013                 return error;
1014
1015         error = dpm_suspend_noirq(state);
1016         if (error) {
1017                 dpm_resume_early(resume_event(state));
1018                 return error;
1019         }
1020
1021         return 0;
1022 }
1023 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1024
1025 /**
1026  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1027  * @dev: Device to suspend.
1028  * @state: PM transition of the system being carried out.
1029  * @cb: Suspend callback to execute.
1030  */
1031 static int legacy_suspend(struct device *dev, pm_message_t state,
1032                           int (*cb)(struct device *dev, pm_message_t state))
1033 {
1034         int error;
1035         ktime_t calltime;
1036
1037         calltime = initcall_debug_start(dev);
1038
1039         error = cb(dev, state);
1040         suspend_report_result(cb, error);
1041
1042         initcall_debug_report(dev, calltime, error);
1043
1044         return error;
1045 }
1046
1047 /**
1048  * device_suspend - Execute "suspend" callbacks for given device.
1049  * @dev: Device to handle.
1050  * @state: PM transition of the system being carried out.
1051  * @async: If true, the device is being suspended asynchronously.
1052  */
1053 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1054 {
1055         pm_callback_t callback = NULL;
1056         char *info = NULL;
1057         int error = 0;
1058
1059         dpm_wait_for_children(dev, async);
1060
1061         if (async_error)
1062                 goto Complete;
1063
1064         /*
1065          * If a device configured to wake up the system from sleep states
1066          * has been suspended at run time and there's a resume request pending
1067          * for it, this is equivalent to the device signaling wakeup, so the
1068          * system suspend operation should be aborted.
1069          */
1070         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1071                 pm_wakeup_event(dev, 0);
1072
1073         if (pm_wakeup_pending()) {
1074                 async_error = -EBUSY;
1075                 goto Complete;
1076         }
1077
1078         if (dev->power.syscore)
1079                 goto Complete;
1080
1081         device_lock(dev);
1082
1083         if (dev->pm_domain) {
1084                 info = "power domain ";
1085                 callback = pm_op(&dev->pm_domain->ops, state);
1086                 goto Run;
1087         }
1088
1089         if (dev->type && dev->type->pm) {
1090                 info = "type ";
1091                 callback = pm_op(dev->type->pm, state);
1092                 goto Run;
1093         }
1094
1095         if (dev->class) {
1096                 if (dev->class->pm) {
1097                         info = "class ";
1098                         callback = pm_op(dev->class->pm, state);
1099                         goto Run;
1100                 } else if (dev->class->suspend) {
1101                         pm_dev_dbg(dev, state, "legacy class ");
1102                         error = legacy_suspend(dev, state, dev->class->suspend);
1103                         goto End;
1104                 }
1105         }
1106
1107         if (dev->bus) {
1108                 if (dev->bus->pm) {
1109                         info = "bus ";
1110                         callback = pm_op(dev->bus->pm, state);
1111                 } else if (dev->bus->suspend) {
1112                         pm_dev_dbg(dev, state, "legacy bus ");
1113                         error = legacy_suspend(dev, state, dev->bus->suspend);
1114                         goto End;
1115                 }
1116         }
1117
1118  Run:
1119         if (!callback && dev->driver && dev->driver->pm) {
1120                 info = "driver ";
1121                 callback = pm_op(dev->driver->pm, state);
1122         }
1123
1124         error = dpm_run_callback(callback, dev, state, info);
1125
1126  End:
1127         if (!error) {
1128                 dev->power.is_suspended = true;
1129                 if (dev->power.wakeup_path
1130                     && dev->parent && !dev->parent->power.ignore_children)
1131                         dev->parent->power.wakeup_path = true;
1132         }
1133
1134         device_unlock(dev);
1135
1136  Complete:
1137         complete_all(&dev->power.completion);
1138         if (error)
1139                 async_error = error;
1140
1141         return error;
1142 }
1143
1144 static void async_suspend(void *data, async_cookie_t cookie)
1145 {
1146         struct device *dev = (struct device *)data;
1147         int error;
1148
1149         error = __device_suspend(dev, pm_transition, true);
1150         if (error) {
1151                 dpm_save_failed_dev(dev_name(dev));
1152                 pm_dev_err(dev, pm_transition, " async", error);
1153         }
1154
1155         put_device(dev);
1156 }
1157
1158 static int device_suspend(struct device *dev)
1159 {
1160         INIT_COMPLETION(dev->power.completion);
1161
1162         if (pm_async_enabled && dev->power.async_suspend) {
1163                 get_device(dev);
1164                 async_schedule(async_suspend, dev);
1165                 return 0;
1166         }
1167
1168         return __device_suspend(dev, pm_transition, false);
1169 }
1170
1171 /**
1172  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1173  * @state: PM transition of the system being carried out.
1174  */
1175 int dpm_suspend(pm_message_t state)
1176 {
1177         ktime_t starttime = ktime_get();
1178         int error = 0;
1179
1180         might_sleep();
1181
1182         mutex_lock(&dpm_list_mtx);
1183         pm_transition = state;
1184         async_error = 0;
1185         while (!list_empty(&dpm_prepared_list)) {
1186                 struct device *dev = to_device(dpm_prepared_list.prev);
1187
1188                 get_device(dev);
1189                 mutex_unlock(&dpm_list_mtx);
1190
1191                 error = device_suspend(dev);
1192
1193                 mutex_lock(&dpm_list_mtx);
1194                 if (error) {
1195                         pm_dev_err(dev, state, "", error);
1196                         dpm_save_failed_dev(dev_name(dev));
1197                         put_device(dev);
1198                         break;
1199                 }
1200                 if (!list_empty(&dev->power.entry))
1201                         list_move(&dev->power.entry, &dpm_suspended_list);
1202                 put_device(dev);
1203                 if (async_error)
1204                         break;
1205         }
1206         mutex_unlock(&dpm_list_mtx);
1207         async_synchronize_full();
1208         if (!error)
1209                 error = async_error;
1210         if (error) {
1211                 suspend_stats.failed_suspend++;
1212                 dpm_save_failed_step(SUSPEND_SUSPEND);
1213         } else
1214                 dpm_show_time(starttime, state, NULL);
1215         return error;
1216 }
1217
1218 /**
1219  * device_prepare - Prepare a device for system power transition.
1220  * @dev: Device to handle.
1221  * @state: PM transition of the system being carried out.
1222  *
1223  * Execute the ->prepare() callback(s) for given device.  No new children of the
1224  * device may be registered after this function has returned.
1225  */
1226 static int device_prepare(struct device *dev, pm_message_t state)
1227 {
1228         int (*callback)(struct device *) = NULL;
1229         char *info = NULL;
1230         int error = 0;
1231
1232         if (dev->power.syscore)
1233                 return 0;
1234
1235         /*
1236          * If a device's parent goes into runtime suspend at the wrong time,
1237          * it won't be possible to resume the device.  To prevent this we
1238          * block runtime suspend here, during the prepare phase, and allow
1239          * it again during the complete phase.
1240          */
1241         pm_runtime_get_noresume(dev);
1242
1243         device_lock(dev);
1244
1245         dev->power.wakeup_path = device_may_wakeup(dev);
1246
1247         if (dev->pm_domain) {
1248                 info = "preparing power domain ";
1249                 callback = dev->pm_domain->ops.prepare;
1250         } else if (dev->type && dev->type->pm) {
1251                 info = "preparing type ";
1252                 callback = dev->type->pm->prepare;
1253         } else if (dev->class && dev->class->pm) {
1254                 info = "preparing class ";
1255                 callback = dev->class->pm->prepare;
1256         } else if (dev->bus && dev->bus->pm) {
1257                 info = "preparing bus ";
1258                 callback = dev->bus->pm->prepare;
1259         }
1260
1261         if (!callback && dev->driver && dev->driver->pm) {
1262                 info = "preparing driver ";
1263                 callback = dev->driver->pm->prepare;
1264         }
1265
1266         if (callback) {
1267                 error = callback(dev);
1268                 suspend_report_result(callback, error);
1269         }
1270
1271         device_unlock(dev);
1272
1273         return error;
1274 }
1275
1276 /**
1277  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1278  * @state: PM transition of the system being carried out.
1279  *
1280  * Execute the ->prepare() callback(s) for all devices.
1281  */
1282 int dpm_prepare(pm_message_t state)
1283 {
1284         int error = 0;
1285
1286         might_sleep();
1287
1288         mutex_lock(&dpm_list_mtx);
1289         while (!list_empty(&dpm_list)) {
1290                 struct device *dev = to_device(dpm_list.next);
1291
1292                 get_device(dev);
1293                 mutex_unlock(&dpm_list_mtx);
1294
1295                 error = device_prepare(dev, state);
1296
1297                 mutex_lock(&dpm_list_mtx);
1298                 if (error) {
1299                         if (error == -EAGAIN) {
1300                                 put_device(dev);
1301                                 error = 0;
1302                                 continue;
1303                         }
1304                         printk(KERN_INFO "PM: Device %s not prepared "
1305                                 "for power transition: code %d\n",
1306                                 dev_name(dev), error);
1307                         put_device(dev);
1308                         break;
1309                 }
1310                 dev->power.is_prepared = true;
1311                 if (!list_empty(&dev->power.entry))
1312                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1313                 put_device(dev);
1314         }
1315         mutex_unlock(&dpm_list_mtx);
1316         return error;
1317 }
1318
1319 /**
1320  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1321  * @state: PM transition of the system being carried out.
1322  *
1323  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1324  * callbacks for them.
1325  */
1326 int dpm_suspend_start(pm_message_t state)
1327 {
1328         int error;
1329
1330         error = dpm_prepare(state);
1331         if (error) {
1332                 suspend_stats.failed_prepare++;
1333                 dpm_save_failed_step(SUSPEND_PREPARE);
1334         } else
1335                 error = dpm_suspend(state);
1336         return error;
1337 }
1338 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1339
1340 void __suspend_report_result(const char *function, void *fn, int ret)
1341 {
1342         if (ret)
1343                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1344 }
1345 EXPORT_SYMBOL_GPL(__suspend_report_result);
1346
1347 /**
1348  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1349  * @dev: Device to wait for.
1350  * @subordinate: Device that needs to wait for @dev.
1351  */
1352 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1353 {
1354         dpm_wait(dev, subordinate->power.async_suspend);
1355         return async_error;
1356 }
1357 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1358
1359 /**
1360  * dpm_for_each_dev - device iterator.
1361  * @data: data for the callback.
1362  * @fn: function to be called for each device.
1363  *
1364  * Iterate over devices in dpm_list, and call @fn for each device,
1365  * passing it @data.
1366  */
1367 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1368 {
1369         struct device *dev;
1370
1371         if (!fn)
1372                 return;
1373
1374         device_pm_lock();
1375         list_for_each_entry(dev, &dpm_list, power.entry)
1376                 fn(dev, data);
1377         device_pm_unlock();
1378 }
1379 EXPORT_SYMBOL_GPL(dpm_for_each_dev);