2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
34 * The entries in the dpm_list list are in a depth first order, simply
35 * because children are guaranteed to be discovered after parents, and
36 * are inserted at the back of the list on discovery.
38 * Since device_pm_add() may be called with a device lock held,
39 * we must never try to acquire a device lock while holding
45 static DEFINE_MUTEX(dpm_list_mtx);
46 static pm_message_t pm_transition;
49 * Set once the preparation of devices for a PM transition has started, reset
50 * before starting to resume devices. Protected by dpm_list_mtx.
52 static bool transition_started;
55 * device_pm_init - Initialize the PM-related part of a device object.
56 * @dev: Device object being initialized.
58 void device_pm_init(struct device *dev)
60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion);
67 * device_pm_lock - Lock the list of active devices used by the PM core.
69 void device_pm_lock(void)
71 mutex_lock(&dpm_list_mtx);
75 * device_pm_unlock - Unlock the list of active devices used by the PM core.
77 void device_pm_unlock(void)
79 mutex_unlock(&dpm_list_mtx);
83 * device_pm_add - Add a device to the PM core's list of active devices.
84 * @dev: Device to add to the list.
86 void device_pm_add(struct device *dev)
88 pr_debug("PM: Adding info for %s:%s\n",
89 dev->bus ? dev->bus->name : "No Bus",
90 kobject_name(&dev->kobj));
91 mutex_lock(&dpm_list_mtx);
93 if (dev->parent->power.status >= DPM_SUSPENDING)
94 dev_warn(dev, "parent %s should not be sleeping\n",
95 dev_name(dev->parent));
96 } else if (transition_started) {
98 * We refuse to register parentless devices while a PM
99 * transition is in progress in order to avoid leaving them
100 * unhandled down the road
102 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
105 list_add_tail(&dev->power.entry, &dpm_list);
106 mutex_unlock(&dpm_list_mtx);
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
113 void device_pm_remove(struct device *dev)
115 pr_debug("PM: Removing info for %s:%s\n",
116 dev->bus ? dev->bus->name : "No Bus",
117 kobject_name(&dev->kobj));
118 complete_all(&dev->power.completion);
119 mutex_lock(&dpm_list_mtx);
120 list_del_init(&dev->power.entry);
121 mutex_unlock(&dpm_list_mtx);
122 pm_runtime_remove(dev);
126 * device_pm_move_before - Move device in the PM core's list of active devices.
127 * @deva: Device to move in dpm_list.
128 * @devb: Device @deva should come before.
130 void device_pm_move_before(struct device *deva, struct device *devb)
132 pr_debug("PM: Moving %s:%s before %s:%s\n",
133 deva->bus ? deva->bus->name : "No Bus",
134 kobject_name(&deva->kobj),
135 devb->bus ? devb->bus->name : "No Bus",
136 kobject_name(&devb->kobj));
137 /* Delete deva from dpm_list and reinsert before devb. */
138 list_move_tail(&deva->power.entry, &devb->power.entry);
142 * device_pm_move_after - Move device in the PM core's list of active devices.
143 * @deva: Device to move in dpm_list.
144 * @devb: Device @deva should come after.
146 void device_pm_move_after(struct device *deva, struct device *devb)
148 pr_debug("PM: Moving %s:%s after %s:%s\n",
149 deva->bus ? deva->bus->name : "No Bus",
150 kobject_name(&deva->kobj),
151 devb->bus ? devb->bus->name : "No Bus",
152 kobject_name(&devb->kobj));
153 /* Delete deva from dpm_list and reinsert after devb. */
154 list_move(&deva->power.entry, &devb->power.entry);
158 * device_pm_move_last - Move device to end of the PM core's list of devices.
159 * @dev: Device to move in dpm_list.
161 void device_pm_move_last(struct device *dev)
163 pr_debug("PM: Moving %s:%s to end of list\n",
164 dev->bus ? dev->bus->name : "No Bus",
165 kobject_name(&dev->kobj));
166 list_move_tail(&dev->power.entry, &dpm_list);
169 static ktime_t initcall_debug_start(struct device *dev)
171 ktime_t calltime = ktime_set(0, 0);
173 if (initcall_debug) {
174 pr_info("calling %s+ @ %i\n",
175 dev_name(dev), task_pid_nr(current));
176 calltime = ktime_get();
182 static void initcall_debug_report(struct device *dev, ktime_t calltime,
185 ktime_t delta, rettime;
187 if (initcall_debug) {
188 rettime = ktime_get();
189 delta = ktime_sub(rettime, calltime);
190 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
191 error, (unsigned long long)ktime_to_ns(delta) >> 10);
196 * dpm_wait - Wait for a PM operation to complete.
197 * @dev: Device to wait for.
198 * @async: If unset, wait only if the device's power.async_suspend flag is set.
200 static void dpm_wait(struct device *dev, bool async)
205 if (async || (pm_async_enabled && dev->power.async_suspend))
206 wait_for_completion(&dev->power.completion);
209 static int dpm_wait_fn(struct device *dev, void *async_ptr)
211 dpm_wait(dev, *((bool *)async_ptr));
215 static void dpm_wait_for_children(struct device *dev, bool async)
217 device_for_each_child(dev, &async, dpm_wait_fn);
221 * pm_op - Execute the PM operation appropriate for given PM event.
222 * @dev: Device to handle.
223 * @ops: PM operations to choose from.
224 * @state: PM transition of the system being carried out.
226 static int pm_op(struct device *dev,
227 const struct dev_pm_ops *ops,
233 calltime = initcall_debug_start(dev);
235 switch (state.event) {
236 #ifdef CONFIG_SUSPEND
237 case PM_EVENT_SUSPEND:
239 error = ops->suspend(dev);
240 suspend_report_result(ops->suspend, error);
243 case PM_EVENT_RESUME:
245 error = ops->resume(dev);
246 suspend_report_result(ops->resume, error);
249 #endif /* CONFIG_SUSPEND */
250 #ifdef CONFIG_HIBERNATION
251 case PM_EVENT_FREEZE:
252 case PM_EVENT_QUIESCE:
254 error = ops->freeze(dev);
255 suspend_report_result(ops->freeze, error);
258 case PM_EVENT_HIBERNATE:
260 error = ops->poweroff(dev);
261 suspend_report_result(ops->poweroff, error);
265 case PM_EVENT_RECOVER:
267 error = ops->thaw(dev);
268 suspend_report_result(ops->thaw, error);
271 case PM_EVENT_RESTORE:
273 error = ops->restore(dev);
274 suspend_report_result(ops->restore, error);
277 #endif /* CONFIG_HIBERNATION */
282 initcall_debug_report(dev, calltime, error);
288 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
289 * @dev: Device to handle.
290 * @ops: PM operations to choose from.
291 * @state: PM transition of the system being carried out.
293 * The driver of @dev will not receive interrupts while this function is being
296 static int pm_noirq_op(struct device *dev,
297 const struct dev_pm_ops *ops,
301 ktime_t calltime, delta, rettime;
303 if (initcall_debug) {
304 pr_info("calling %s+ @ %i, parent: %s\n",
305 dev_name(dev), task_pid_nr(current),
306 dev->parent ? dev_name(dev->parent) : "none");
307 calltime = ktime_get();
310 switch (state.event) {
311 #ifdef CONFIG_SUSPEND
312 case PM_EVENT_SUSPEND:
313 if (ops->suspend_noirq) {
314 error = ops->suspend_noirq(dev);
315 suspend_report_result(ops->suspend_noirq, error);
318 case PM_EVENT_RESUME:
319 if (ops->resume_noirq) {
320 error = ops->resume_noirq(dev);
321 suspend_report_result(ops->resume_noirq, error);
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATION
326 case PM_EVENT_FREEZE:
327 case PM_EVENT_QUIESCE:
328 if (ops->freeze_noirq) {
329 error = ops->freeze_noirq(dev);
330 suspend_report_result(ops->freeze_noirq, error);
333 case PM_EVENT_HIBERNATE:
334 if (ops->poweroff_noirq) {
335 error = ops->poweroff_noirq(dev);
336 suspend_report_result(ops->poweroff_noirq, error);
340 case PM_EVENT_RECOVER:
341 if (ops->thaw_noirq) {
342 error = ops->thaw_noirq(dev);
343 suspend_report_result(ops->thaw_noirq, error);
346 case PM_EVENT_RESTORE:
347 if (ops->restore_noirq) {
348 error = ops->restore_noirq(dev);
349 suspend_report_result(ops->restore_noirq, error);
352 #endif /* CONFIG_HIBERNATION */
357 if (initcall_debug) {
358 rettime = ktime_get();
359 delta = ktime_sub(rettime, calltime);
360 printk("initcall %s_i+ returned %d after %Ld usecs\n",
361 dev_name(dev), error,
362 (unsigned long long)ktime_to_ns(delta) >> 10);
368 static char *pm_verb(int event)
371 case PM_EVENT_SUSPEND:
373 case PM_EVENT_RESUME:
375 case PM_EVENT_FREEZE:
377 case PM_EVENT_QUIESCE:
379 case PM_EVENT_HIBERNATE:
383 case PM_EVENT_RESTORE:
385 case PM_EVENT_RECOVER:
388 return "(unknown PM event)";
392 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
394 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
395 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
396 ", may wakeup" : "");
399 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
402 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
403 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
406 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
412 calltime = ktime_get();
413 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
414 do_div(usecs64, NSEC_PER_USEC);
418 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
419 info ?: "", info ? " " : "", pm_verb(state.event),
420 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
423 /*------------------------- Resume routines -------------------------*/
426 * device_resume_noirq - Execute an "early resume" callback for given device.
427 * @dev: Device to handle.
428 * @state: PM transition of the system being carried out.
430 * The driver of @dev will not receive interrupts while this function is being
433 static int device_resume_noirq(struct device *dev, pm_message_t state)
440 if (dev->bus && dev->bus->pm) {
441 pm_dev_dbg(dev, state, "EARLY ");
442 error = pm_noirq_op(dev, dev->bus->pm, state);
447 if (dev->type && dev->type->pm) {
448 pm_dev_dbg(dev, state, "EARLY type ");
449 error = pm_noirq_op(dev, dev->type->pm, state);
454 if (dev->class && dev->class->pm) {
455 pm_dev_dbg(dev, state, "EARLY class ");
456 error = pm_noirq_op(dev, dev->class->pm, state);
465 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
466 * @state: PM transition of the system being carried out.
468 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
469 * enable device drivers to receive interrupts.
471 void dpm_resume_noirq(pm_message_t state)
474 ktime_t starttime = ktime_get();
476 mutex_lock(&dpm_list_mtx);
477 transition_started = false;
478 list_for_each_entry(dev, &dpm_list, power.entry)
479 if (dev->power.status > DPM_OFF) {
482 dev->power.status = DPM_OFF;
483 error = device_resume_noirq(dev, state);
485 pm_dev_err(dev, state, " early", error);
487 mutex_unlock(&dpm_list_mtx);
488 dpm_show_time(starttime, state, "early");
489 resume_device_irqs();
491 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
494 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
495 * @dev: Device to resume.
496 * @cb: Resume callback to execute.
498 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
503 calltime = initcall_debug_start(dev);
506 suspend_report_result(cb, error);
508 initcall_debug_report(dev, calltime, error);
514 * device_resume - Execute "resume" callbacks for given device.
515 * @dev: Device to handle.
516 * @state: PM transition of the system being carried out.
517 * @async: If true, the device is being resumed asynchronously.
519 static int device_resume(struct device *dev, pm_message_t state, bool async)
526 dpm_wait(dev->parent, async);
529 dev->power.status = DPM_RESUMING;
533 pm_dev_dbg(dev, state, "");
534 error = pm_op(dev, dev->bus->pm, state);
535 } else if (dev->bus->resume) {
536 pm_dev_dbg(dev, state, "legacy ");
537 error = legacy_resume(dev, dev->bus->resume);
545 pm_dev_dbg(dev, state, "type ");
546 error = pm_op(dev, dev->type->pm, state);
553 if (dev->class->pm) {
554 pm_dev_dbg(dev, state, "class ");
555 error = pm_op(dev, dev->class->pm, state);
556 } else if (dev->class->resume) {
557 pm_dev_dbg(dev, state, "legacy class ");
558 error = legacy_resume(dev, dev->class->resume);
563 complete_all(&dev->power.completion);
569 static void async_resume(void *data, async_cookie_t cookie)
571 struct device *dev = (struct device *)data;
574 error = device_resume(dev, pm_transition, true);
576 pm_dev_err(dev, pm_transition, " async", error);
580 static bool is_async(struct device *dev)
582 return dev->power.async_suspend && pm_async_enabled
583 && !pm_trace_is_enabled();
587 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
588 * @state: PM transition of the system being carried out.
590 * Execute the appropriate "resume" callback for all devices whose status
591 * indicates that they are suspended.
593 static void dpm_resume(pm_message_t state)
595 struct list_head list;
597 ktime_t starttime = ktime_get();
599 INIT_LIST_HEAD(&list);
600 mutex_lock(&dpm_list_mtx);
601 pm_transition = state;
603 list_for_each_entry(dev, &dpm_list, power.entry) {
604 if (dev->power.status < DPM_OFF)
607 INIT_COMPLETION(dev->power.completion);
610 async_schedule(async_resume, dev);
614 while (!list_empty(&dpm_list)) {
615 dev = to_device(dpm_list.next);
617 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
620 mutex_unlock(&dpm_list_mtx);
622 error = device_resume(dev, state, false);
624 mutex_lock(&dpm_list_mtx);
626 pm_dev_err(dev, state, "", error);
627 } else if (dev->power.status == DPM_SUSPENDING) {
628 /* Allow new children of the device to be registered */
629 dev->power.status = DPM_RESUMING;
631 if (!list_empty(&dev->power.entry))
632 list_move_tail(&dev->power.entry, &list);
635 list_splice(&list, &dpm_list);
636 mutex_unlock(&dpm_list_mtx);
637 async_synchronize_full();
638 dpm_show_time(starttime, state, NULL);
642 * device_complete - Complete a PM transition for given device.
643 * @dev: Device to handle.
644 * @state: PM transition of the system being carried out.
646 static void device_complete(struct device *dev, pm_message_t state)
650 if (dev->class && dev->class->pm && dev->class->pm->complete) {
651 pm_dev_dbg(dev, state, "completing class ");
652 dev->class->pm->complete(dev);
655 if (dev->type && dev->type->pm && dev->type->pm->complete) {
656 pm_dev_dbg(dev, state, "completing type ");
657 dev->type->pm->complete(dev);
660 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
661 pm_dev_dbg(dev, state, "completing ");
662 dev->bus->pm->complete(dev);
669 * dpm_complete - Complete a PM transition for all non-sysdev devices.
670 * @state: PM transition of the system being carried out.
672 * Execute the ->complete() callbacks for all devices whose PM status is not
673 * DPM_ON (this allows new devices to be registered).
675 static void dpm_complete(pm_message_t state)
677 struct list_head list;
679 INIT_LIST_HEAD(&list);
680 mutex_lock(&dpm_list_mtx);
681 transition_started = false;
682 while (!list_empty(&dpm_list)) {
683 struct device *dev = to_device(dpm_list.prev);
686 if (dev->power.status > DPM_ON) {
687 dev->power.status = DPM_ON;
688 mutex_unlock(&dpm_list_mtx);
690 device_complete(dev, state);
691 pm_runtime_put_sync(dev);
693 mutex_lock(&dpm_list_mtx);
695 if (!list_empty(&dev->power.entry))
696 list_move(&dev->power.entry, &list);
699 list_splice(&list, &dpm_list);
700 mutex_unlock(&dpm_list_mtx);
704 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
705 * @state: PM transition of the system being carried out.
707 * Execute "resume" callbacks for all devices and complete the PM transition of
710 void dpm_resume_end(pm_message_t state)
716 EXPORT_SYMBOL_GPL(dpm_resume_end);
719 /*------------------------- Suspend routines -------------------------*/
722 * resume_event - Return a "resume" message for given "suspend" sleep state.
723 * @sleep_state: PM message representing a sleep state.
725 * Return a PM message representing the resume event corresponding to given
728 static pm_message_t resume_event(pm_message_t sleep_state)
730 switch (sleep_state.event) {
731 case PM_EVENT_SUSPEND:
733 case PM_EVENT_FREEZE:
734 case PM_EVENT_QUIESCE:
736 case PM_EVENT_HIBERNATE:
743 * device_suspend_noirq - Execute a "late suspend" callback for given device.
744 * @dev: Device to handle.
745 * @state: PM transition of the system being carried out.
747 * The driver of @dev will not receive interrupts while this function is being
750 static int device_suspend_noirq(struct device *dev, pm_message_t state)
754 if (dev->class && dev->class->pm) {
755 pm_dev_dbg(dev, state, "LATE class ");
756 error = pm_noirq_op(dev, dev->class->pm, state);
761 if (dev->type && dev->type->pm) {
762 pm_dev_dbg(dev, state, "LATE type ");
763 error = pm_noirq_op(dev, dev->type->pm, state);
768 if (dev->bus && dev->bus->pm) {
769 pm_dev_dbg(dev, state, "LATE ");
770 error = pm_noirq_op(dev, dev->bus->pm, state);
778 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
779 * @state: PM transition of the system being carried out.
781 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
782 * handlers for all non-sysdev devices.
784 int dpm_suspend_noirq(pm_message_t state)
787 ktime_t starttime = ktime_get();
790 suspend_device_irqs();
791 mutex_lock(&dpm_list_mtx);
792 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
793 error = device_suspend_noirq(dev, state);
795 pm_dev_err(dev, state, " late", error);
798 dev->power.status = DPM_OFF_IRQ;
800 mutex_unlock(&dpm_list_mtx);
802 dpm_resume_noirq(resume_event(state));
804 dpm_show_time(starttime, state, "late");
807 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
810 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
811 * @dev: Device to suspend.
812 * @state: PM transition of the system being carried out.
813 * @cb: Suspend callback to execute.
815 static int legacy_suspend(struct device *dev, pm_message_t state,
816 int (*cb)(struct device *dev, pm_message_t state))
821 calltime = initcall_debug_start(dev);
823 error = cb(dev, state);
824 suspend_report_result(cb, error);
826 initcall_debug_report(dev, calltime, error);
831 static int async_error;
834 * device_suspend - Execute "suspend" callbacks for given device.
835 * @dev: Device to handle.
836 * @state: PM transition of the system being carried out.
837 * @async: If true, the device is being suspended asynchronously.
839 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
843 dpm_wait_for_children(dev, async);
850 if (dev->class->pm) {
851 pm_dev_dbg(dev, state, "class ");
852 error = pm_op(dev, dev->class->pm, state);
853 } else if (dev->class->suspend) {
854 pm_dev_dbg(dev, state, "legacy class ");
855 error = legacy_suspend(dev, state, dev->class->suspend);
863 pm_dev_dbg(dev, state, "type ");
864 error = pm_op(dev, dev->type->pm, state);
872 pm_dev_dbg(dev, state, "");
873 error = pm_op(dev, dev->bus->pm, state);
874 } else if (dev->bus->suspend) {
875 pm_dev_dbg(dev, state, "legacy ");
876 error = legacy_suspend(dev, state, dev->bus->suspend);
881 dev->power.status = DPM_OFF;
885 complete_all(&dev->power.completion);
890 static void async_suspend(void *data, async_cookie_t cookie)
892 struct device *dev = (struct device *)data;
895 error = __device_suspend(dev, pm_transition, true);
897 pm_dev_err(dev, pm_transition, " async", error);
904 static int device_suspend(struct device *dev)
906 INIT_COMPLETION(dev->power.completion);
908 if (pm_async_enabled && dev->power.async_suspend) {
910 async_schedule(async_suspend, dev);
914 return __device_suspend(dev, pm_transition, false);
918 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
919 * @state: PM transition of the system being carried out.
921 static int dpm_suspend(pm_message_t state)
923 struct list_head list;
924 ktime_t starttime = ktime_get();
927 INIT_LIST_HEAD(&list);
928 mutex_lock(&dpm_list_mtx);
929 pm_transition = state;
931 while (!list_empty(&dpm_list)) {
932 struct device *dev = to_device(dpm_list.prev);
935 mutex_unlock(&dpm_list_mtx);
937 error = device_suspend(dev);
939 mutex_lock(&dpm_list_mtx);
941 pm_dev_err(dev, state, "", error);
945 if (!list_empty(&dev->power.entry))
946 list_move(&dev->power.entry, &list);
951 list_splice(&list, dpm_list.prev);
952 mutex_unlock(&dpm_list_mtx);
953 async_synchronize_full();
957 dpm_show_time(starttime, state, NULL);
962 * device_prepare - Prepare a device for system power transition.
963 * @dev: Device to handle.
964 * @state: PM transition of the system being carried out.
966 * Execute the ->prepare() callback(s) for given device. No new children of the
967 * device may be registered after this function has returned.
969 static int device_prepare(struct device *dev, pm_message_t state)
975 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
976 pm_dev_dbg(dev, state, "preparing ");
977 error = dev->bus->pm->prepare(dev);
978 suspend_report_result(dev->bus->pm->prepare, error);
983 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
984 pm_dev_dbg(dev, state, "preparing type ");
985 error = dev->type->pm->prepare(dev);
986 suspend_report_result(dev->type->pm->prepare, error);
991 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
992 pm_dev_dbg(dev, state, "preparing class ");
993 error = dev->class->pm->prepare(dev);
994 suspend_report_result(dev->class->pm->prepare, error);
1003 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1004 * @state: PM transition of the system being carried out.
1006 * Execute the ->prepare() callback(s) for all devices.
1008 static int dpm_prepare(pm_message_t state)
1010 struct list_head list;
1013 INIT_LIST_HEAD(&list);
1014 mutex_lock(&dpm_list_mtx);
1015 transition_started = true;
1016 while (!list_empty(&dpm_list)) {
1017 struct device *dev = to_device(dpm_list.next);
1020 dev->power.status = DPM_PREPARING;
1021 mutex_unlock(&dpm_list_mtx);
1023 pm_runtime_get_noresume(dev);
1024 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
1025 /* Wake-up requested during system sleep transition. */
1026 pm_runtime_put_sync(dev);
1029 error = device_prepare(dev, state);
1032 mutex_lock(&dpm_list_mtx);
1034 dev->power.status = DPM_ON;
1035 if (error == -EAGAIN) {
1040 printk(KERN_ERR "PM: Failed to prepare device %s "
1041 "for power transition: error %d\n",
1042 kobject_name(&dev->kobj), error);
1046 dev->power.status = DPM_SUSPENDING;
1047 if (!list_empty(&dev->power.entry))
1048 list_move_tail(&dev->power.entry, &list);
1051 list_splice(&list, &dpm_list);
1052 mutex_unlock(&dpm_list_mtx);
1057 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1058 * @state: PM transition of the system being carried out.
1060 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1061 * callbacks for them.
1063 int dpm_suspend_start(pm_message_t state)
1068 error = dpm_prepare(state);
1070 error = dpm_suspend(state);
1073 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1075 void __suspend_report_result(const char *function, void *fn, int ret)
1078 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1080 EXPORT_SYMBOL_GPL(__suspend_report_result);
1083 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1084 * @dev: Device to wait for.
1085 * @subordinate: Device that needs to wait for @dev.
1087 void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1089 dpm_wait(dev, subordinate->power.async_suspend);
1091 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);