]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/domain.c
93f0f164414d7eeadf19d634d207f5f52d149320
[karo-tx-linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pm_domain.h>
13 #include <linux/pm_qos.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
19
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
21 ({                                                              \
22         type (*__routine)(struct device *__d);                  \
23         type __ret = (type)0;                                   \
24                                                                 \
25         __routine = genpd->dev_ops.callback;                    \
26         if (__routine) {                                        \
27                 __ret = __routine(dev);                         \
28         } else {                                                \
29                 __routine = dev_gpd_data(dev)->ops.callback;    \
30                 if (__routine)                                  \
31                         __ret = __routine(dev);                 \
32         }                                                       \
33         __ret;                                                  \
34 })
35
36 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
37 ({                                                                              \
38         ktime_t __start = ktime_get();                                          \
39         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
40         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
41         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
42         if (!__retval && __elapsed > __td->field) {                             \
43                 __td->field = __elapsed;                                        \
44                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
45                         __elapsed);                                             \
46                 genpd->max_off_time_changed = true;                             \
47                 __td->constraint_changed = true;                                \
48         }                                                                       \
49         __retval;                                                               \
50 })
51
52 static LIST_HEAD(gpd_list);
53 static DEFINE_MUTEX(gpd_list_lock);
54
55 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
56 {
57         struct generic_pm_domain *genpd = NULL, *gpd;
58
59         if (IS_ERR_OR_NULL(domain_name))
60                 return NULL;
61
62         mutex_lock(&gpd_list_lock);
63         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
64                 if (!strcmp(gpd->name, domain_name)) {
65                         genpd = gpd;
66                         break;
67                 }
68         }
69         mutex_unlock(&gpd_list_lock);
70         return genpd;
71 }
72
73 #ifdef CONFIG_PM
74
75 struct generic_pm_domain *dev_to_genpd(struct device *dev)
76 {
77         if (IS_ERR_OR_NULL(dev->pm_domain))
78                 return ERR_PTR(-EINVAL);
79
80         return pd_to_genpd(dev->pm_domain);
81 }
82
83 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
84 {
85         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
86                                         stop_latency_ns, "stop");
87 }
88
89 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
90 {
91         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
92                                         start_latency_ns, "start");
93 }
94
95 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
96 {
97         bool ret = false;
98
99         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
100                 ret = !!atomic_dec_and_test(&genpd->sd_count);
101
102         return ret;
103 }
104
105 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
106 {
107         atomic_inc(&genpd->sd_count);
108         smp_mb__after_atomic_inc();
109 }
110
111 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
112 {
113         DEFINE_WAIT(wait);
114
115         mutex_lock(&genpd->lock);
116         /*
117          * Wait for the domain to transition into either the active,
118          * or the power off state.
119          */
120         for (;;) {
121                 prepare_to_wait(&genpd->status_wait_queue, &wait,
122                                 TASK_UNINTERRUPTIBLE);
123                 if (genpd->status == GPD_STATE_ACTIVE
124                     || genpd->status == GPD_STATE_POWER_OFF)
125                         break;
126                 mutex_unlock(&genpd->lock);
127
128                 schedule();
129
130                 mutex_lock(&genpd->lock);
131         }
132         finish_wait(&genpd->status_wait_queue, &wait);
133 }
134
135 static void genpd_release_lock(struct generic_pm_domain *genpd)
136 {
137         mutex_unlock(&genpd->lock);
138 }
139
140 static void genpd_set_active(struct generic_pm_domain *genpd)
141 {
142         if (genpd->resume_count == 0)
143                 genpd->status = GPD_STATE_ACTIVE;
144 }
145
146 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
147 {
148         s64 usecs64;
149
150         if (!genpd->cpu_data)
151                 return;
152
153         usecs64 = genpd->power_on_latency_ns;
154         do_div(usecs64, NSEC_PER_USEC);
155         usecs64 += genpd->cpu_data->saved_exit_latency;
156         genpd->cpu_data->idle_state->exit_latency = usecs64;
157 }
158
159 /**
160  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
161  * @genpd: PM domain to power up.
162  *
163  * Restore power to @genpd and all of its masters so that it is possible to
164  * resume a device belonging to it.
165  */
166 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
167         __releases(&genpd->lock) __acquires(&genpd->lock)
168 {
169         struct gpd_link *link;
170         DEFINE_WAIT(wait);
171         int ret = 0;
172
173         /* If the domain's master is being waited for, we have to wait too. */
174         for (;;) {
175                 prepare_to_wait(&genpd->status_wait_queue, &wait,
176                                 TASK_UNINTERRUPTIBLE);
177                 if (genpd->status != GPD_STATE_WAIT_MASTER)
178                         break;
179                 mutex_unlock(&genpd->lock);
180
181                 schedule();
182
183                 mutex_lock(&genpd->lock);
184         }
185         finish_wait(&genpd->status_wait_queue, &wait);
186
187         if (genpd->status == GPD_STATE_ACTIVE
188             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189                 return 0;
190
191         if (genpd->status != GPD_STATE_POWER_OFF) {
192                 genpd_set_active(genpd);
193                 return 0;
194         }
195
196         if (genpd->cpu_data) {
197                 cpuidle_pause_and_lock();
198                 genpd->cpu_data->idle_state->disabled = true;
199                 cpuidle_resume_and_unlock();
200                 goto out;
201         }
202
203         /*
204          * The list is guaranteed not to change while the loop below is being
205          * executed, unless one of the masters' .power_on() callbacks fiddles
206          * with it.
207          */
208         list_for_each_entry(link, &genpd->slave_links, slave_node) {
209                 genpd_sd_counter_inc(link->master);
210                 genpd->status = GPD_STATE_WAIT_MASTER;
211
212                 mutex_unlock(&genpd->lock);
213
214                 ret = pm_genpd_poweron(link->master);
215
216                 mutex_lock(&genpd->lock);
217
218                 /*
219                  * The "wait for parent" status is guaranteed not to change
220                  * while the master is powering on.
221                  */
222                 genpd->status = GPD_STATE_POWER_OFF;
223                 wake_up_all(&genpd->status_wait_queue);
224                 if (ret) {
225                         genpd_sd_counter_dec(link->master);
226                         goto err;
227                 }
228         }
229
230         if (genpd->power_on) {
231                 ktime_t time_start = ktime_get();
232                 s64 elapsed_ns;
233
234                 ret = genpd->power_on(genpd);
235                 if (ret)
236                         goto err;
237
238                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
239                 if (elapsed_ns > genpd->power_on_latency_ns) {
240                         genpd->power_on_latency_ns = elapsed_ns;
241                         genpd->max_off_time_changed = true;
242                         genpd_recalc_cpu_exit_latency(genpd);
243                         if (genpd->name)
244                                 pr_warning("%s: Power-on latency exceeded, "
245                                         "new value %lld ns\n", genpd->name,
246                                         elapsed_ns);
247                 }
248         }
249
250  out:
251         genpd_set_active(genpd);
252
253         return 0;
254
255  err:
256         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
257                 genpd_sd_counter_dec(link->master);
258
259         return ret;
260 }
261
262 /**
263  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
264  * @genpd: PM domain to power up.
265  */
266 int pm_genpd_poweron(struct generic_pm_domain *genpd)
267 {
268         int ret;
269
270         mutex_lock(&genpd->lock);
271         ret = __pm_genpd_poweron(genpd);
272         mutex_unlock(&genpd->lock);
273         return ret;
274 }
275
276 /**
277  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
278  * @domain_name: Name of the PM domain to power up.
279  */
280 int pm_genpd_name_poweron(const char *domain_name)
281 {
282         struct generic_pm_domain *genpd;
283
284         genpd = pm_genpd_lookup_name(domain_name);
285         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
286 }
287
288 #endif /* CONFIG_PM */
289
290 #ifdef CONFIG_PM_RUNTIME
291
292 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
293                                      struct device *dev)
294 {
295         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
296 }
297
298 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
299 {
300         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
301                                         save_state_latency_ns, "state save");
302 }
303
304 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
305 {
306         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
307                                         restore_state_latency_ns,
308                                         "state restore");
309 }
310
311 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
312                                      unsigned long val, void *ptr)
313 {
314         struct generic_pm_domain_data *gpd_data;
315         struct device *dev;
316
317         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
318
319         mutex_lock(&gpd_data->lock);
320         dev = gpd_data->base.dev;
321         if (!dev) {
322                 mutex_unlock(&gpd_data->lock);
323                 return NOTIFY_DONE;
324         }
325         mutex_unlock(&gpd_data->lock);
326
327         for (;;) {
328                 struct generic_pm_domain *genpd;
329                 struct pm_domain_data *pdd;
330
331                 spin_lock_irq(&dev->power.lock);
332
333                 pdd = dev->power.subsys_data ?
334                                 dev->power.subsys_data->domain_data : NULL;
335                 if (pdd && pdd->dev) {
336                         to_gpd_data(pdd)->td.constraint_changed = true;
337                         genpd = dev_to_genpd(dev);
338                 } else {
339                         genpd = ERR_PTR(-ENODATA);
340                 }
341
342                 spin_unlock_irq(&dev->power.lock);
343
344                 if (!IS_ERR(genpd)) {
345                         mutex_lock(&genpd->lock);
346                         genpd->max_off_time_changed = true;
347                         mutex_unlock(&genpd->lock);
348                 }
349
350                 dev = dev->parent;
351                 if (!dev || dev->power.ignore_children)
352                         break;
353         }
354
355         return NOTIFY_DONE;
356 }
357
358 /**
359  * __pm_genpd_save_device - Save the pre-suspend state of a device.
360  * @pdd: Domain data of the device to save the state of.
361  * @genpd: PM domain the device belongs to.
362  */
363 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
364                                   struct generic_pm_domain *genpd)
365         __releases(&genpd->lock) __acquires(&genpd->lock)
366 {
367         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
368         struct device *dev = pdd->dev;
369         int ret = 0;
370
371         if (gpd_data->need_restore)
372                 return 0;
373
374         mutex_unlock(&genpd->lock);
375
376         genpd_start_dev(genpd, dev);
377         ret = genpd_save_dev(genpd, dev);
378         genpd_stop_dev(genpd, dev);
379
380         mutex_lock(&genpd->lock);
381
382         if (!ret)
383                 gpd_data->need_restore = true;
384
385         return ret;
386 }
387
388 /**
389  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
390  * @pdd: Domain data of the device to restore the state of.
391  * @genpd: PM domain the device belongs to.
392  */
393 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
394                                       struct generic_pm_domain *genpd)
395         __releases(&genpd->lock) __acquires(&genpd->lock)
396 {
397         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
398         struct device *dev = pdd->dev;
399         bool need_restore = gpd_data->need_restore;
400
401         gpd_data->need_restore = false;
402         mutex_unlock(&genpd->lock);
403
404         genpd_start_dev(genpd, dev);
405         if (need_restore)
406                 genpd_restore_dev(genpd, dev);
407
408         mutex_lock(&genpd->lock);
409 }
410
411 /**
412  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
413  * @genpd: PM domain to check.
414  *
415  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
416  * a "power off" operation, which means that a "power on" has occured in the
417  * meantime, or if its resume_count field is different from zero, which means
418  * that one of its devices has been resumed in the meantime.
419  */
420 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
421 {
422         return genpd->status == GPD_STATE_WAIT_MASTER
423                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
424 }
425
426 /**
427  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
428  * @genpd: PM domait to power off.
429  *
430  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
431  * before.
432  */
433 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
434 {
435         queue_work(pm_wq, &genpd->power_off_work);
436 }
437
438 /**
439  * pm_genpd_poweroff - Remove power from a given PM domain.
440  * @genpd: PM domain to power down.
441  *
442  * If all of the @genpd's devices have been suspended and all of its subdomains
443  * have been powered down, run the runtime suspend callbacks provided by all of
444  * the @genpd's devices' drivers and remove power from @genpd.
445  */
446 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
447         __releases(&genpd->lock) __acquires(&genpd->lock)
448 {
449         struct pm_domain_data *pdd;
450         struct gpd_link *link;
451         unsigned int not_suspended;
452         int ret = 0;
453
454  start:
455         /*
456          * Do not try to power off the domain in the following situations:
457          * (1) The domain is already in the "power off" state.
458          * (2) The domain is waiting for its master to power up.
459          * (3) One of the domain's devices is being resumed right now.
460          * (4) System suspend is in progress.
461          */
462         if (genpd->status == GPD_STATE_POWER_OFF
463             || genpd->status == GPD_STATE_WAIT_MASTER
464             || genpd->resume_count > 0 || genpd->prepared_count > 0)
465                 return 0;
466
467         if (atomic_read(&genpd->sd_count) > 0)
468                 return -EBUSY;
469
470         not_suspended = 0;
471         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
472                 enum pm_qos_flags_status stat;
473
474                 stat = dev_pm_qos_flags(pdd->dev,
475                                         PM_QOS_FLAG_NO_POWER_OFF
476                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
477                 if (stat > PM_QOS_FLAGS_NONE)
478                         return -EBUSY;
479
480                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
481                     || pdd->dev->power.irq_safe))
482                         not_suspended++;
483         }
484
485         if (not_suspended > genpd->in_progress)
486                 return -EBUSY;
487
488         if (genpd->poweroff_task) {
489                 /*
490                  * Another instance of pm_genpd_poweroff() is executing
491                  * callbacks, so tell it to start over and return.
492                  */
493                 genpd->status = GPD_STATE_REPEAT;
494                 return 0;
495         }
496
497         if (genpd->gov && genpd->gov->power_down_ok) {
498                 if (!genpd->gov->power_down_ok(&genpd->domain))
499                         return -EAGAIN;
500         }
501
502         genpd->status = GPD_STATE_BUSY;
503         genpd->poweroff_task = current;
504
505         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
506                 ret = atomic_read(&genpd->sd_count) == 0 ?
507                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
508
509                 if (genpd_abort_poweroff(genpd))
510                         goto out;
511
512                 if (ret) {
513                         genpd_set_active(genpd);
514                         goto out;
515                 }
516
517                 if (genpd->status == GPD_STATE_REPEAT) {
518                         genpd->poweroff_task = NULL;
519                         goto start;
520                 }
521         }
522
523         if (genpd->cpu_data) {
524                 /*
525                  * If cpu_data is set, cpuidle should turn the domain off when
526                  * the CPU in it is idle.  In that case we don't decrement the
527                  * subdomain counts of the master domains, so that power is not
528                  * removed from the current domain prematurely as a result of
529                  * cutting off the masters' power.
530                  */
531                 genpd->status = GPD_STATE_POWER_OFF;
532                 cpuidle_pause_and_lock();
533                 genpd->cpu_data->idle_state->disabled = false;
534                 cpuidle_resume_and_unlock();
535                 goto out;
536         }
537
538         if (genpd->power_off) {
539                 ktime_t time_start;
540                 s64 elapsed_ns;
541
542                 if (atomic_read(&genpd->sd_count) > 0) {
543                         ret = -EBUSY;
544                         goto out;
545                 }
546
547                 time_start = ktime_get();
548
549                 /*
550                  * If sd_count > 0 at this point, one of the subdomains hasn't
551                  * managed to call pm_genpd_poweron() for the master yet after
552                  * incrementing it.  In that case pm_genpd_poweron() will wait
553                  * for us to drop the lock, so we can call .power_off() and let
554                  * the pm_genpd_poweron() restore power for us (this shouldn't
555                  * happen very often).
556                  */
557                 ret = genpd->power_off(genpd);
558                 if (ret == -EBUSY) {
559                         genpd_set_active(genpd);
560                         goto out;
561                 }
562
563                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
564                 if (elapsed_ns > genpd->power_off_latency_ns) {
565                         genpd->power_off_latency_ns = elapsed_ns;
566                         genpd->max_off_time_changed = true;
567                         if (genpd->name)
568                                 pr_warning("%s: Power-off latency exceeded, "
569                                         "new value %lld ns\n", genpd->name,
570                                         elapsed_ns);
571                 }
572         }
573
574         genpd->status = GPD_STATE_POWER_OFF;
575
576         list_for_each_entry(link, &genpd->slave_links, slave_node) {
577                 genpd_sd_counter_dec(link->master);
578                 genpd_queue_power_off_work(link->master);
579         }
580
581  out:
582         genpd->poweroff_task = NULL;
583         wake_up_all(&genpd->status_wait_queue);
584         return ret;
585 }
586
587 /**
588  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
589  * @work: Work structure used for scheduling the execution of this function.
590  */
591 static void genpd_power_off_work_fn(struct work_struct *work)
592 {
593         struct generic_pm_domain *genpd;
594
595         genpd = container_of(work, struct generic_pm_domain, power_off_work);
596
597         genpd_acquire_lock(genpd);
598         pm_genpd_poweroff(genpd);
599         genpd_release_lock(genpd);
600 }
601
602 /**
603  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
604  * @dev: Device to suspend.
605  *
606  * Carry out a runtime suspend of a device under the assumption that its
607  * pm_domain field points to the domain member of an object of type
608  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
609  */
610 static int pm_genpd_runtime_suspend(struct device *dev)
611 {
612         struct generic_pm_domain *genpd;
613         bool (*stop_ok)(struct device *__dev);
614         int ret;
615
616         dev_dbg(dev, "%s()\n", __func__);
617
618         genpd = dev_to_genpd(dev);
619         if (IS_ERR(genpd))
620                 return -EINVAL;
621
622         might_sleep_if(!genpd->dev_irq_safe);
623
624         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
625         if (stop_ok && !stop_ok(dev))
626                 return -EBUSY;
627
628         ret = genpd_stop_dev(genpd, dev);
629         if (ret)
630                 return ret;
631
632         /*
633          * If power.irq_safe is set, this routine will be run with interrupts
634          * off, so it can't use mutexes.
635          */
636         if (dev->power.irq_safe)
637                 return 0;
638
639         mutex_lock(&genpd->lock);
640         genpd->in_progress++;
641         pm_genpd_poweroff(genpd);
642         genpd->in_progress--;
643         mutex_unlock(&genpd->lock);
644
645         return 0;
646 }
647
648 /**
649  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
650  * @dev: Device to resume.
651  *
652  * Carry out a runtime resume of a device under the assumption that its
653  * pm_domain field points to the domain member of an object of type
654  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
655  */
656 static int pm_genpd_runtime_resume(struct device *dev)
657 {
658         struct generic_pm_domain *genpd;
659         DEFINE_WAIT(wait);
660         int ret;
661
662         dev_dbg(dev, "%s()\n", __func__);
663
664         genpd = dev_to_genpd(dev);
665         if (IS_ERR(genpd))
666                 return -EINVAL;
667
668         might_sleep_if(!genpd->dev_irq_safe);
669
670         /* If power.irq_safe, the PM domain is never powered off. */
671         if (dev->power.irq_safe)
672                 return genpd_start_dev_no_timing(genpd, dev);
673
674         mutex_lock(&genpd->lock);
675         ret = __pm_genpd_poweron(genpd);
676         if (ret) {
677                 mutex_unlock(&genpd->lock);
678                 return ret;
679         }
680         genpd->status = GPD_STATE_BUSY;
681         genpd->resume_count++;
682         for (;;) {
683                 prepare_to_wait(&genpd->status_wait_queue, &wait,
684                                 TASK_UNINTERRUPTIBLE);
685                 /*
686                  * If current is the powering off task, we have been called
687                  * reentrantly from one of the device callbacks, so we should
688                  * not wait.
689                  */
690                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
691                         break;
692                 mutex_unlock(&genpd->lock);
693
694                 schedule();
695
696                 mutex_lock(&genpd->lock);
697         }
698         finish_wait(&genpd->status_wait_queue, &wait);
699         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
700         genpd->resume_count--;
701         genpd_set_active(genpd);
702         wake_up_all(&genpd->status_wait_queue);
703         mutex_unlock(&genpd->lock);
704
705         return 0;
706 }
707
708 static bool pd_ignore_unused;
709 static int __init pd_ignore_unused_setup(char *__unused)
710 {
711         pd_ignore_unused = true;
712         return 1;
713 }
714 __setup("pd_ignore_unused", pd_ignore_unused_setup);
715
716 /**
717  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
718  */
719 void pm_genpd_poweroff_unused(void)
720 {
721         struct generic_pm_domain *genpd;
722
723         if (pd_ignore_unused) {
724                 pr_warn("genpd: Not disabling unused power domains\n");
725                 return;
726         }
727
728         mutex_lock(&gpd_list_lock);
729
730         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
731                 genpd_queue_power_off_work(genpd);
732
733         mutex_unlock(&gpd_list_lock);
734 }
735
736 #else
737
738 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
739                                             unsigned long val, void *ptr)
740 {
741         return NOTIFY_DONE;
742 }
743
744 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
745
746 #define pm_genpd_runtime_suspend        NULL
747 #define pm_genpd_runtime_resume         NULL
748
749 #endif /* CONFIG_PM_RUNTIME */
750
751 #ifdef CONFIG_PM_SLEEP
752
753 /**
754  * pm_genpd_present - Check if the given PM domain has been initialized.
755  * @genpd: PM domain to check.
756  */
757 static bool pm_genpd_present(struct generic_pm_domain *genpd)
758 {
759         struct generic_pm_domain *gpd;
760
761         if (IS_ERR_OR_NULL(genpd))
762                 return false;
763
764         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
765                 if (gpd == genpd)
766                         return true;
767
768         return false;
769 }
770
771 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
772                                     struct device *dev)
773 {
774         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
775 }
776
777 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
778 {
779         return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
780 }
781
782 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
783 {
784         return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
785 }
786
787 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
788 {
789         return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
790 }
791
792 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
793 {
794         return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
795 }
796
797 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
798 {
799         return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
800 }
801
802 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
803 {
804         return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
805 }
806
807 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
808 {
809         return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
810 }
811
812 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
813 {
814         return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
815 }
816
817 /**
818  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
819  * @genpd: PM domain to power off, if possible.
820  *
821  * Check if the given PM domain can be powered off (during system suspend or
822  * hibernation) and do that if so.  Also, in that case propagate to its masters.
823  *
824  * This function is only called in "noirq" and "syscore" stages of system power
825  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
826  * executed sequentially, so it is guaranteed that it will never run twice in
827  * parallel).
828  */
829 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
830 {
831         struct gpd_link *link;
832
833         if (genpd->status == GPD_STATE_POWER_OFF)
834                 return;
835
836         if (genpd->suspended_count != genpd->device_count
837             || atomic_read(&genpd->sd_count) > 0)
838                 return;
839
840         if (genpd->power_off)
841                 genpd->power_off(genpd);
842
843         genpd->status = GPD_STATE_POWER_OFF;
844
845         list_for_each_entry(link, &genpd->slave_links, slave_node) {
846                 genpd_sd_counter_dec(link->master);
847                 pm_genpd_sync_poweroff(link->master);
848         }
849 }
850
851 /**
852  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
853  * @genpd: PM domain to power on.
854  *
855  * This function is only called in "noirq" and "syscore" stages of system power
856  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
857  * executed sequentially, so it is guaranteed that it will never run twice in
858  * parallel).
859  */
860 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
861 {
862         struct gpd_link *link;
863
864         if (genpd->status != GPD_STATE_POWER_OFF)
865                 return;
866
867         list_for_each_entry(link, &genpd->slave_links, slave_node) {
868                 pm_genpd_sync_poweron(link->master);
869                 genpd_sd_counter_inc(link->master);
870         }
871
872         if (genpd->power_on)
873                 genpd->power_on(genpd);
874
875         genpd->status = GPD_STATE_ACTIVE;
876 }
877
878 /**
879  * resume_needed - Check whether to resume a device before system suspend.
880  * @dev: Device to check.
881  * @genpd: PM domain the device belongs to.
882  *
883  * There are two cases in which a device that can wake up the system from sleep
884  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
885  * to wake up the system and it has to remain active for this purpose while the
886  * system is in the sleep state and (2) if the device is not enabled to wake up
887  * the system from sleep states and it generally doesn't generate wakeup signals
888  * by itself (those signals are generated on its behalf by other parts of the
889  * system).  In the latter case it may be necessary to reconfigure the device's
890  * wakeup settings during system suspend, because it may have been set up to
891  * signal remote wakeup from the system's working state as needed by runtime PM.
892  * Return 'true' in either of the above cases.
893  */
894 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
895 {
896         bool active_wakeup;
897
898         if (!device_can_wakeup(dev))
899                 return false;
900
901         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
902         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
903 }
904
905 /**
906  * pm_genpd_prepare - Start power transition of a device in a PM domain.
907  * @dev: Device to start the transition of.
908  *
909  * Start a power transition of a device (during a system-wide power transition)
910  * under the assumption that its pm_domain field points to the domain member of
911  * an object of type struct generic_pm_domain representing a PM domain
912  * consisting of I/O devices.
913  */
914 static int pm_genpd_prepare(struct device *dev)
915 {
916         struct generic_pm_domain *genpd;
917         int ret;
918
919         dev_dbg(dev, "%s()\n", __func__);
920
921         genpd = dev_to_genpd(dev);
922         if (IS_ERR(genpd))
923                 return -EINVAL;
924
925         /*
926          * If a wakeup request is pending for the device, it should be woken up
927          * at this point and a system wakeup event should be reported if it's
928          * set up to wake up the system from sleep states.
929          */
930         pm_runtime_get_noresume(dev);
931         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
932                 pm_wakeup_event(dev, 0);
933
934         if (pm_wakeup_pending()) {
935                 pm_runtime_put(dev);
936                 return -EBUSY;
937         }
938
939         if (resume_needed(dev, genpd))
940                 pm_runtime_resume(dev);
941
942         genpd_acquire_lock(genpd);
943
944         if (genpd->prepared_count++ == 0) {
945                 genpd->suspended_count = 0;
946                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
947         }
948
949         genpd_release_lock(genpd);
950
951         if (genpd->suspend_power_off) {
952                 pm_runtime_put_noidle(dev);
953                 return 0;
954         }
955
956         /*
957          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
958          * so pm_genpd_poweron() will return immediately, but if the device
959          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
960          * to make it operational.
961          */
962         pm_runtime_resume(dev);
963         __pm_runtime_disable(dev, false);
964
965         ret = pm_generic_prepare(dev);
966         if (ret) {
967                 mutex_lock(&genpd->lock);
968
969                 if (--genpd->prepared_count == 0)
970                         genpd->suspend_power_off = false;
971
972                 mutex_unlock(&genpd->lock);
973                 pm_runtime_enable(dev);
974         }
975
976         pm_runtime_put(dev);
977         return ret;
978 }
979
980 /**
981  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
982  * @dev: Device to suspend.
983  *
984  * Suspend a device under the assumption that its pm_domain field points to the
985  * domain member of an object of type struct generic_pm_domain representing
986  * a PM domain consisting of I/O devices.
987  */
988 static int pm_genpd_suspend(struct device *dev)
989 {
990         struct generic_pm_domain *genpd;
991
992         dev_dbg(dev, "%s()\n", __func__);
993
994         genpd = dev_to_genpd(dev);
995         if (IS_ERR(genpd))
996                 return -EINVAL;
997
998         return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
999 }
1000
1001 /**
1002  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
1003  * @dev: Device to suspend.
1004  *
1005  * Carry out a late suspend of a device under the assumption that its
1006  * pm_domain field points to the domain member of an object of type
1007  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1008  */
1009 static int pm_genpd_suspend_late(struct device *dev)
1010 {
1011         struct generic_pm_domain *genpd;
1012
1013         dev_dbg(dev, "%s()\n", __func__);
1014
1015         genpd = dev_to_genpd(dev);
1016         if (IS_ERR(genpd))
1017                 return -EINVAL;
1018
1019         return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1020 }
1021
1022 /**
1023  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1024  * @dev: Device to suspend.
1025  *
1026  * Stop the device and remove power from the domain if all devices in it have
1027  * been stopped.
1028  */
1029 static int pm_genpd_suspend_noirq(struct device *dev)
1030 {
1031         struct generic_pm_domain *genpd;
1032
1033         dev_dbg(dev, "%s()\n", __func__);
1034
1035         genpd = dev_to_genpd(dev);
1036         if (IS_ERR(genpd))
1037                 return -EINVAL;
1038
1039         if (genpd->suspend_power_off
1040             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1041                 return 0;
1042
1043         genpd_stop_dev(genpd, dev);
1044
1045         /*
1046          * Since all of the "noirq" callbacks are executed sequentially, it is
1047          * guaranteed that this function will never run twice in parallel for
1048          * the same PM domain, so it is not necessary to use locking here.
1049          */
1050         genpd->suspended_count++;
1051         pm_genpd_sync_poweroff(genpd);
1052
1053         return 0;
1054 }
1055
1056 /**
1057  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1058  * @dev: Device to resume.
1059  *
1060  * Restore power to the device's PM domain, if necessary, and start the device.
1061  */
1062 static int pm_genpd_resume_noirq(struct device *dev)
1063 {
1064         struct generic_pm_domain *genpd;
1065
1066         dev_dbg(dev, "%s()\n", __func__);
1067
1068         genpd = dev_to_genpd(dev);
1069         if (IS_ERR(genpd))
1070                 return -EINVAL;
1071
1072         if (genpd->suspend_power_off
1073             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1074                 return 0;
1075
1076         /*
1077          * Since all of the "noirq" callbacks are executed sequentially, it is
1078          * guaranteed that this function will never run twice in parallel for
1079          * the same PM domain, so it is not necessary to use locking here.
1080          */
1081         pm_genpd_sync_poweron(genpd);
1082         genpd->suspended_count--;
1083
1084         return genpd_start_dev(genpd, dev);
1085 }
1086
1087 /**
1088  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1089  * @dev: Device to resume.
1090  *
1091  * Carry out an early resume of a device under the assumption that its
1092  * pm_domain field points to the domain member of an object of type
1093  * struct generic_pm_domain representing a power domain consisting of I/O
1094  * devices.
1095  */
1096 static int pm_genpd_resume_early(struct device *dev)
1097 {
1098         struct generic_pm_domain *genpd;
1099
1100         dev_dbg(dev, "%s()\n", __func__);
1101
1102         genpd = dev_to_genpd(dev);
1103         if (IS_ERR(genpd))
1104                 return -EINVAL;
1105
1106         return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1107 }
1108
1109 /**
1110  * pm_genpd_resume - Resume of device in an I/O PM domain.
1111  * @dev: Device to resume.
1112  *
1113  * Resume a device under the assumption that its pm_domain field points to the
1114  * domain member of an object of type struct generic_pm_domain representing
1115  * a power domain consisting of I/O devices.
1116  */
1117 static int pm_genpd_resume(struct device *dev)
1118 {
1119         struct generic_pm_domain *genpd;
1120
1121         dev_dbg(dev, "%s()\n", __func__);
1122
1123         genpd = dev_to_genpd(dev);
1124         if (IS_ERR(genpd))
1125                 return -EINVAL;
1126
1127         return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1128 }
1129
1130 /**
1131  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1132  * @dev: Device to freeze.
1133  *
1134  * Freeze a device under the assumption that its pm_domain field points to the
1135  * domain member of an object of type struct generic_pm_domain representing
1136  * a power domain consisting of I/O devices.
1137  */
1138 static int pm_genpd_freeze(struct device *dev)
1139 {
1140         struct generic_pm_domain *genpd;
1141
1142         dev_dbg(dev, "%s()\n", __func__);
1143
1144         genpd = dev_to_genpd(dev);
1145         if (IS_ERR(genpd))
1146                 return -EINVAL;
1147
1148         return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1149 }
1150
1151 /**
1152  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1153  * @dev: Device to freeze.
1154  *
1155  * Carry out a late freeze of a device under the assumption that its
1156  * pm_domain field points to the domain member of an object of type
1157  * struct generic_pm_domain representing a power domain consisting of I/O
1158  * devices.
1159  */
1160 static int pm_genpd_freeze_late(struct device *dev)
1161 {
1162         struct generic_pm_domain *genpd;
1163
1164         dev_dbg(dev, "%s()\n", __func__);
1165
1166         genpd = dev_to_genpd(dev);
1167         if (IS_ERR(genpd))
1168                 return -EINVAL;
1169
1170         return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1171 }
1172
1173 /**
1174  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1175  * @dev: Device to freeze.
1176  *
1177  * Carry out a late freeze of a device under the assumption that its
1178  * pm_domain field points to the domain member of an object of type
1179  * struct generic_pm_domain representing a power domain consisting of I/O
1180  * devices.
1181  */
1182 static int pm_genpd_freeze_noirq(struct device *dev)
1183 {
1184         struct generic_pm_domain *genpd;
1185
1186         dev_dbg(dev, "%s()\n", __func__);
1187
1188         genpd = dev_to_genpd(dev);
1189         if (IS_ERR(genpd))
1190                 return -EINVAL;
1191
1192         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1193 }
1194
1195 /**
1196  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1197  * @dev: Device to thaw.
1198  *
1199  * Start the device, unless power has been removed from the domain already
1200  * before the system transition.
1201  */
1202 static int pm_genpd_thaw_noirq(struct device *dev)
1203 {
1204         struct generic_pm_domain *genpd;
1205
1206         dev_dbg(dev, "%s()\n", __func__);
1207
1208         genpd = dev_to_genpd(dev);
1209         if (IS_ERR(genpd))
1210                 return -EINVAL;
1211
1212         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1213 }
1214
1215 /**
1216  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1217  * @dev: Device to thaw.
1218  *
1219  * Carry out an early thaw of a device under the assumption that its
1220  * pm_domain field points to the domain member of an object of type
1221  * struct generic_pm_domain representing a power domain consisting of I/O
1222  * devices.
1223  */
1224 static int pm_genpd_thaw_early(struct device *dev)
1225 {
1226         struct generic_pm_domain *genpd;
1227
1228         dev_dbg(dev, "%s()\n", __func__);
1229
1230         genpd = dev_to_genpd(dev);
1231         if (IS_ERR(genpd))
1232                 return -EINVAL;
1233
1234         return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1235 }
1236
1237 /**
1238  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1239  * @dev: Device to thaw.
1240  *
1241  * Thaw a device under the assumption that its pm_domain field points to the
1242  * domain member of an object of type struct generic_pm_domain representing
1243  * a power domain consisting of I/O devices.
1244  */
1245 static int pm_genpd_thaw(struct device *dev)
1246 {
1247         struct generic_pm_domain *genpd;
1248
1249         dev_dbg(dev, "%s()\n", __func__);
1250
1251         genpd = dev_to_genpd(dev);
1252         if (IS_ERR(genpd))
1253                 return -EINVAL;
1254
1255         return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1256 }
1257
1258 /**
1259  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1260  * @dev: Device to resume.
1261  *
1262  * Make sure the domain will be in the same power state as before the
1263  * hibernation the system is resuming from and start the device if necessary.
1264  */
1265 static int pm_genpd_restore_noirq(struct device *dev)
1266 {
1267         struct generic_pm_domain *genpd;
1268
1269         dev_dbg(dev, "%s()\n", __func__);
1270
1271         genpd = dev_to_genpd(dev);
1272         if (IS_ERR(genpd))
1273                 return -EINVAL;
1274
1275         /*
1276          * Since all of the "noirq" callbacks are executed sequentially, it is
1277          * guaranteed that this function will never run twice in parallel for
1278          * the same PM domain, so it is not necessary to use locking here.
1279          *
1280          * At this point suspended_count == 0 means we are being run for the
1281          * first time for the given domain in the present cycle.
1282          */
1283         if (genpd->suspended_count++ == 0) {
1284                 /*
1285                  * The boot kernel might put the domain into arbitrary state,
1286                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1287                  * so that it tries to power it on in case it was really off.
1288                  */
1289                 genpd->status = GPD_STATE_POWER_OFF;
1290                 if (genpd->suspend_power_off) {
1291                         /*
1292                          * If the domain was off before the hibernation, make
1293                          * sure it will be off going forward.
1294                          */
1295                         if (genpd->power_off)
1296                                 genpd->power_off(genpd);
1297
1298                         return 0;
1299                 }
1300         }
1301
1302         if (genpd->suspend_power_off)
1303                 return 0;
1304
1305         pm_genpd_sync_poweron(genpd);
1306
1307         return genpd_start_dev(genpd, dev);
1308 }
1309
1310 /**
1311  * pm_genpd_complete - Complete power transition of a device in a power domain.
1312  * @dev: Device to complete the transition of.
1313  *
1314  * Complete a power transition of a device (during a system-wide power
1315  * transition) under the assumption that its pm_domain field points to the
1316  * domain member of an object of type struct generic_pm_domain representing
1317  * a power domain consisting of I/O devices.
1318  */
1319 static void pm_genpd_complete(struct device *dev)
1320 {
1321         struct generic_pm_domain *genpd;
1322         bool run_complete;
1323
1324         dev_dbg(dev, "%s()\n", __func__);
1325
1326         genpd = dev_to_genpd(dev);
1327         if (IS_ERR(genpd))
1328                 return;
1329
1330         mutex_lock(&genpd->lock);
1331
1332         run_complete = !genpd->suspend_power_off;
1333         if (--genpd->prepared_count == 0)
1334                 genpd->suspend_power_off = false;
1335
1336         mutex_unlock(&genpd->lock);
1337
1338         if (run_complete) {
1339                 pm_generic_complete(dev);
1340                 pm_runtime_set_active(dev);
1341                 pm_runtime_enable(dev);
1342                 pm_request_idle(dev);
1343         }
1344 }
1345
1346 /**
1347  * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1348  * @dev: Device that normally is marked as "always on" to switch power for.
1349  *
1350  * This routine may only be called during the system core (syscore) suspend or
1351  * resume phase for devices whose "always on" flags are set.
1352  */
1353 void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1354 {
1355         struct generic_pm_domain *genpd;
1356
1357         genpd = dev_to_genpd(dev);
1358         if (!pm_genpd_present(genpd))
1359                 return;
1360
1361         if (suspend) {
1362                 genpd->suspended_count++;
1363                 pm_genpd_sync_poweroff(genpd);
1364         } else {
1365                 pm_genpd_sync_poweron(genpd);
1366                 genpd->suspended_count--;
1367         }
1368 }
1369 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1370
1371 #else
1372
1373 #define pm_genpd_prepare                NULL
1374 #define pm_genpd_suspend                NULL
1375 #define pm_genpd_suspend_late           NULL
1376 #define pm_genpd_suspend_noirq          NULL
1377 #define pm_genpd_resume_early           NULL
1378 #define pm_genpd_resume_noirq           NULL
1379 #define pm_genpd_resume                 NULL
1380 #define pm_genpd_freeze                 NULL
1381 #define pm_genpd_freeze_late            NULL
1382 #define pm_genpd_freeze_noirq           NULL
1383 #define pm_genpd_thaw_early             NULL
1384 #define pm_genpd_thaw_noirq             NULL
1385 #define pm_genpd_thaw                   NULL
1386 #define pm_genpd_restore_noirq          NULL
1387 #define pm_genpd_complete               NULL
1388
1389 #endif /* CONFIG_PM_SLEEP */
1390
1391 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1392 {
1393         struct generic_pm_domain_data *gpd_data;
1394
1395         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1396         if (!gpd_data)
1397                 return NULL;
1398
1399         mutex_init(&gpd_data->lock);
1400         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1401         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1402         return gpd_data;
1403 }
1404
1405 static void __pm_genpd_free_dev_data(struct device *dev,
1406                                      struct generic_pm_domain_data *gpd_data)
1407 {
1408         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1409         kfree(gpd_data);
1410 }
1411
1412 /**
1413  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1414  * @genpd: PM domain to add the device to.
1415  * @dev: Device to be added.
1416  * @td: Set of PM QoS timing parameters to attach to the device.
1417  */
1418 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1419                           struct gpd_timing_data *td)
1420 {
1421         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1422         struct pm_domain_data *pdd;
1423         int ret = 0;
1424
1425         dev_dbg(dev, "%s()\n", __func__);
1426
1427         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1428                 return -EINVAL;
1429
1430         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1431         if (!gpd_data_new)
1432                 return -ENOMEM;
1433
1434         genpd_acquire_lock(genpd);
1435
1436         if (genpd->prepared_count > 0) {
1437                 ret = -EAGAIN;
1438                 goto out;
1439         }
1440
1441         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1442                 if (pdd->dev == dev) {
1443                         ret = -EINVAL;
1444                         goto out;
1445                 }
1446
1447         ret = dev_pm_get_subsys_data(dev);
1448         if (ret)
1449                 goto out;
1450
1451         genpd->device_count++;
1452         genpd->max_off_time_changed = true;
1453
1454         spin_lock_irq(&dev->power.lock);
1455
1456         dev->pm_domain = &genpd->domain;
1457         if (dev->power.subsys_data->domain_data) {
1458                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1459         } else {
1460                 gpd_data = gpd_data_new;
1461                 dev->power.subsys_data->domain_data = &gpd_data->base;
1462         }
1463         gpd_data->refcount++;
1464         if (td)
1465                 gpd_data->td = *td;
1466
1467         spin_unlock_irq(&dev->power.lock);
1468
1469         mutex_lock(&gpd_data->lock);
1470         gpd_data->base.dev = dev;
1471         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1472         gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1473         gpd_data->td.constraint_changed = true;
1474         gpd_data->td.effective_constraint_ns = -1;
1475         mutex_unlock(&gpd_data->lock);
1476
1477  out:
1478         genpd_release_lock(genpd);
1479
1480         if (gpd_data != gpd_data_new)
1481                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1482
1483         return ret;
1484 }
1485
1486 /**
1487  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1488  * @genpd_node: Device tree node pointer representing a PM domain to which the
1489  *   the device is added to.
1490  * @dev: Device to be added.
1491  * @td: Set of PM QoS timing parameters to attach to the device.
1492  */
1493 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1494                              struct gpd_timing_data *td)
1495 {
1496         struct generic_pm_domain *genpd = NULL, *gpd;
1497
1498         dev_dbg(dev, "%s()\n", __func__);
1499
1500         if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1501                 return -EINVAL;
1502
1503         mutex_lock(&gpd_list_lock);
1504         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1505                 if (gpd->of_node == genpd_node) {
1506                         genpd = gpd;
1507                         break;
1508                 }
1509         }
1510         mutex_unlock(&gpd_list_lock);
1511
1512         if (!genpd)
1513                 return -EINVAL;
1514
1515         return __pm_genpd_add_device(genpd, dev, td);
1516 }
1517
1518
1519 /**
1520  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1521  * @domain_name: Name of the PM domain to add the device to.
1522  * @dev: Device to be added.
1523  * @td: Set of PM QoS timing parameters to attach to the device.
1524  */
1525 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1526                                struct gpd_timing_data *td)
1527 {
1528         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1529 }
1530
1531 /**
1532  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1533  * @genpd: PM domain to remove the device from.
1534  * @dev: Device to be removed.
1535  */
1536 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1537                            struct device *dev)
1538 {
1539         struct generic_pm_domain_data *gpd_data;
1540         struct pm_domain_data *pdd;
1541         bool remove = false;
1542         int ret = 0;
1543
1544         dev_dbg(dev, "%s()\n", __func__);
1545
1546         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1547             ||  IS_ERR_OR_NULL(dev->pm_domain)
1548             ||  pd_to_genpd(dev->pm_domain) != genpd)
1549                 return -EINVAL;
1550
1551         genpd_acquire_lock(genpd);
1552
1553         if (genpd->prepared_count > 0) {
1554                 ret = -EAGAIN;
1555                 goto out;
1556         }
1557
1558         genpd->device_count--;
1559         genpd->max_off_time_changed = true;
1560
1561         spin_lock_irq(&dev->power.lock);
1562
1563         dev->pm_domain = NULL;
1564         pdd = dev->power.subsys_data->domain_data;
1565         list_del_init(&pdd->list_node);
1566         gpd_data = to_gpd_data(pdd);
1567         if (--gpd_data->refcount == 0) {
1568                 dev->power.subsys_data->domain_data = NULL;
1569                 remove = true;
1570         }
1571
1572         spin_unlock_irq(&dev->power.lock);
1573
1574         mutex_lock(&gpd_data->lock);
1575         pdd->dev = NULL;
1576         mutex_unlock(&gpd_data->lock);
1577
1578         genpd_release_lock(genpd);
1579
1580         dev_pm_put_subsys_data(dev);
1581         if (remove)
1582                 __pm_genpd_free_dev_data(dev, gpd_data);
1583
1584         return 0;
1585
1586  out:
1587         genpd_release_lock(genpd);
1588
1589         return ret;
1590 }
1591
1592 /**
1593  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1594  * @dev: Device to set/unset the flag for.
1595  * @val: The new value of the device's "need restore" flag.
1596  */
1597 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1598 {
1599         struct pm_subsys_data *psd;
1600         unsigned long flags;
1601
1602         spin_lock_irqsave(&dev->power.lock, flags);
1603
1604         psd = dev_to_psd(dev);
1605         if (psd && psd->domain_data)
1606                 to_gpd_data(psd->domain_data)->need_restore = val;
1607
1608         spin_unlock_irqrestore(&dev->power.lock, flags);
1609 }
1610 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1611
1612 /**
1613  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1614  * @genpd: Master PM domain to add the subdomain to.
1615  * @subdomain: Subdomain to be added.
1616  */
1617 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1618                            struct generic_pm_domain *subdomain)
1619 {
1620         struct gpd_link *link;
1621         int ret = 0;
1622
1623         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1624             || genpd == subdomain)
1625                 return -EINVAL;
1626
1627  start:
1628         genpd_acquire_lock(genpd);
1629         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1630
1631         if (subdomain->status != GPD_STATE_POWER_OFF
1632             && subdomain->status != GPD_STATE_ACTIVE) {
1633                 mutex_unlock(&subdomain->lock);
1634                 genpd_release_lock(genpd);
1635                 goto start;
1636         }
1637
1638         if (genpd->status == GPD_STATE_POWER_OFF
1639             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1640                 ret = -EINVAL;
1641                 goto out;
1642         }
1643
1644         list_for_each_entry(link, &genpd->master_links, master_node) {
1645                 if (link->slave == subdomain && link->master == genpd) {
1646                         ret = -EINVAL;
1647                         goto out;
1648                 }
1649         }
1650
1651         link = kzalloc(sizeof(*link), GFP_KERNEL);
1652         if (!link) {
1653                 ret = -ENOMEM;
1654                 goto out;
1655         }
1656         link->master = genpd;
1657         list_add_tail(&link->master_node, &genpd->master_links);
1658         link->slave = subdomain;
1659         list_add_tail(&link->slave_node, &subdomain->slave_links);
1660         if (subdomain->status != GPD_STATE_POWER_OFF)
1661                 genpd_sd_counter_inc(genpd);
1662
1663  out:
1664         mutex_unlock(&subdomain->lock);
1665         genpd_release_lock(genpd);
1666
1667         return ret;
1668 }
1669
1670 /**
1671  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1672  * @master_name: Name of the master PM domain to add the subdomain to.
1673  * @subdomain_name: Name of the subdomain to be added.
1674  */
1675 int pm_genpd_add_subdomain_names(const char *master_name,
1676                                  const char *subdomain_name)
1677 {
1678         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1679
1680         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1681                 return -EINVAL;
1682
1683         mutex_lock(&gpd_list_lock);
1684         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1685                 if (!master && !strcmp(gpd->name, master_name))
1686                         master = gpd;
1687
1688                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1689                         subdomain = gpd;
1690
1691                 if (master && subdomain)
1692                         break;
1693         }
1694         mutex_unlock(&gpd_list_lock);
1695
1696         return pm_genpd_add_subdomain(master, subdomain);
1697 }
1698
1699 /**
1700  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1701  * @genpd: Master PM domain to remove the subdomain from.
1702  * @subdomain: Subdomain to be removed.
1703  */
1704 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1705                               struct generic_pm_domain *subdomain)
1706 {
1707         struct gpd_link *link;
1708         int ret = -EINVAL;
1709
1710         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1711                 return -EINVAL;
1712
1713  start:
1714         genpd_acquire_lock(genpd);
1715
1716         list_for_each_entry(link, &genpd->master_links, master_node) {
1717                 if (link->slave != subdomain)
1718                         continue;
1719
1720                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1721
1722                 if (subdomain->status != GPD_STATE_POWER_OFF
1723                     && subdomain->status != GPD_STATE_ACTIVE) {
1724                         mutex_unlock(&subdomain->lock);
1725                         genpd_release_lock(genpd);
1726                         goto start;
1727                 }
1728
1729                 list_del(&link->master_node);
1730                 list_del(&link->slave_node);
1731                 kfree(link);
1732                 if (subdomain->status != GPD_STATE_POWER_OFF)
1733                         genpd_sd_counter_dec(genpd);
1734
1735                 mutex_unlock(&subdomain->lock);
1736
1737                 ret = 0;
1738                 break;
1739         }
1740
1741         genpd_release_lock(genpd);
1742
1743         return ret;
1744 }
1745
1746 /**
1747  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1748  * @genpd: PM domain to be connected with cpuidle.
1749  * @state: cpuidle state this domain can disable/enable.
1750  *
1751  * Make a PM domain behave as though it contained a CPU core, that is, instead
1752  * of calling its power down routine it will enable the given cpuidle state so
1753  * that the cpuidle subsystem can power it down (if possible and desirable).
1754  */
1755 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1756 {
1757         struct cpuidle_driver *cpuidle_drv;
1758         struct gpd_cpu_data *cpu_data;
1759         struct cpuidle_state *idle_state;
1760         int ret = 0;
1761
1762         if (IS_ERR_OR_NULL(genpd) || state < 0)
1763                 return -EINVAL;
1764
1765         genpd_acquire_lock(genpd);
1766
1767         if (genpd->cpu_data) {
1768                 ret = -EEXIST;
1769                 goto out;
1770         }
1771         cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1772         if (!cpu_data) {
1773                 ret = -ENOMEM;
1774                 goto out;
1775         }
1776         cpuidle_drv = cpuidle_driver_ref();
1777         if (!cpuidle_drv) {
1778                 ret = -ENODEV;
1779                 goto err_drv;
1780         }
1781         if (cpuidle_drv->state_count <= state) {
1782                 ret = -EINVAL;
1783                 goto err;
1784         }
1785         idle_state = &cpuidle_drv->states[state];
1786         if (!idle_state->disabled) {
1787                 ret = -EAGAIN;
1788                 goto err;
1789         }
1790         cpu_data->idle_state = idle_state;
1791         cpu_data->saved_exit_latency = idle_state->exit_latency;
1792         genpd->cpu_data = cpu_data;
1793         genpd_recalc_cpu_exit_latency(genpd);
1794
1795  out:
1796         genpd_release_lock(genpd);
1797         return ret;
1798
1799  err:
1800         cpuidle_driver_unref();
1801
1802  err_drv:
1803         kfree(cpu_data);
1804         goto out;
1805 }
1806
1807 /**
1808  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1809  * @name: Name of the domain to connect to cpuidle.
1810  * @state: cpuidle state this domain can manipulate.
1811  */
1812 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1813 {
1814         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1815 }
1816
1817 /**
1818  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1819  * @genpd: PM domain to remove the cpuidle connection from.
1820  *
1821  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1822  * given PM domain.
1823  */
1824 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1825 {
1826         struct gpd_cpu_data *cpu_data;
1827         struct cpuidle_state *idle_state;
1828         int ret = 0;
1829
1830         if (IS_ERR_OR_NULL(genpd))
1831                 return -EINVAL;
1832
1833         genpd_acquire_lock(genpd);
1834
1835         cpu_data = genpd->cpu_data;
1836         if (!cpu_data) {
1837                 ret = -ENODEV;
1838                 goto out;
1839         }
1840         idle_state = cpu_data->idle_state;
1841         if (!idle_state->disabled) {
1842                 ret = -EAGAIN;
1843                 goto out;
1844         }
1845         idle_state->exit_latency = cpu_data->saved_exit_latency;
1846         cpuidle_driver_unref();
1847         genpd->cpu_data = NULL;
1848         kfree(cpu_data);
1849
1850  out:
1851         genpd_release_lock(genpd);
1852         return ret;
1853 }
1854
1855 /**
1856  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1857  * @name: Name of the domain to disconnect cpuidle from.
1858  */
1859 int pm_genpd_name_detach_cpuidle(const char *name)
1860 {
1861         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1862 }
1863
1864 /* Default device callbacks for generic PM domains. */
1865
1866 /**
1867  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1868  * @dev: Device to handle.
1869  */
1870 static int pm_genpd_default_save_state(struct device *dev)
1871 {
1872         int (*cb)(struct device *__dev);
1873
1874         cb = dev_gpd_data(dev)->ops.save_state;
1875         if (cb)
1876                 return cb(dev);
1877
1878         if (dev->type && dev->type->pm)
1879                 cb = dev->type->pm->runtime_suspend;
1880         else if (dev->class && dev->class->pm)
1881                 cb = dev->class->pm->runtime_suspend;
1882         else if (dev->bus && dev->bus->pm)
1883                 cb = dev->bus->pm->runtime_suspend;
1884         else
1885                 cb = NULL;
1886
1887         if (!cb && dev->driver && dev->driver->pm)
1888                 cb = dev->driver->pm->runtime_suspend;
1889
1890         return cb ? cb(dev) : 0;
1891 }
1892
1893 /**
1894  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1895  * @dev: Device to handle.
1896  */
1897 static int pm_genpd_default_restore_state(struct device *dev)
1898 {
1899         int (*cb)(struct device *__dev);
1900
1901         cb = dev_gpd_data(dev)->ops.restore_state;
1902         if (cb)
1903                 return cb(dev);
1904
1905         if (dev->type && dev->type->pm)
1906                 cb = dev->type->pm->runtime_resume;
1907         else if (dev->class && dev->class->pm)
1908                 cb = dev->class->pm->runtime_resume;
1909         else if (dev->bus && dev->bus->pm)
1910                 cb = dev->bus->pm->runtime_resume;
1911         else
1912                 cb = NULL;
1913
1914         if (!cb && dev->driver && dev->driver->pm)
1915                 cb = dev->driver->pm->runtime_resume;
1916
1917         return cb ? cb(dev) : 0;
1918 }
1919
1920 #ifdef CONFIG_PM_SLEEP
1921
1922 /**
1923  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1924  * @dev: Device to handle.
1925  */
1926 static int pm_genpd_default_suspend(struct device *dev)
1927 {
1928         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1929
1930         return cb ? cb(dev) : pm_generic_suspend(dev);
1931 }
1932
1933 /**
1934  * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1935  * @dev: Device to handle.
1936  */
1937 static int pm_genpd_default_suspend_late(struct device *dev)
1938 {
1939         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1940
1941         return cb ? cb(dev) : pm_generic_suspend_late(dev);
1942 }
1943
1944 /**
1945  * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1946  * @dev: Device to handle.
1947  */
1948 static int pm_genpd_default_resume_early(struct device *dev)
1949 {
1950         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1951
1952         return cb ? cb(dev) : pm_generic_resume_early(dev);
1953 }
1954
1955 /**
1956  * pm_genpd_default_resume - Default "device resume" for PM domians.
1957  * @dev: Device to handle.
1958  */
1959 static int pm_genpd_default_resume(struct device *dev)
1960 {
1961         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1962
1963         return cb ? cb(dev) : pm_generic_resume(dev);
1964 }
1965
1966 /**
1967  * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1968  * @dev: Device to handle.
1969  */
1970 static int pm_genpd_default_freeze(struct device *dev)
1971 {
1972         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1973
1974         return cb ? cb(dev) : pm_generic_freeze(dev);
1975 }
1976
1977 /**
1978  * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1979  * @dev: Device to handle.
1980  */
1981 static int pm_genpd_default_freeze_late(struct device *dev)
1982 {
1983         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1984
1985         return cb ? cb(dev) : pm_generic_freeze_late(dev);
1986 }
1987
1988 /**
1989  * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1990  * @dev: Device to handle.
1991  */
1992 static int pm_genpd_default_thaw_early(struct device *dev)
1993 {
1994         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1995
1996         return cb ? cb(dev) : pm_generic_thaw_early(dev);
1997 }
1998
1999 /**
2000  * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2001  * @dev: Device to handle.
2002  */
2003 static int pm_genpd_default_thaw(struct device *dev)
2004 {
2005         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2006
2007         return cb ? cb(dev) : pm_generic_thaw(dev);
2008 }
2009
2010 #else /* !CONFIG_PM_SLEEP */
2011
2012 #define pm_genpd_default_suspend        NULL
2013 #define pm_genpd_default_suspend_late   NULL
2014 #define pm_genpd_default_resume_early   NULL
2015 #define pm_genpd_default_resume         NULL
2016 #define pm_genpd_default_freeze         NULL
2017 #define pm_genpd_default_freeze_late    NULL
2018 #define pm_genpd_default_thaw_early     NULL
2019 #define pm_genpd_default_thaw           NULL
2020
2021 #endif /* !CONFIG_PM_SLEEP */
2022
2023 /**
2024  * pm_genpd_init - Initialize a generic I/O PM domain object.
2025  * @genpd: PM domain object to initialize.
2026  * @gov: PM domain governor to associate with the domain (may be NULL).
2027  * @is_off: Initial value of the domain's power_is_off field.
2028  */
2029 void pm_genpd_init(struct generic_pm_domain *genpd,
2030                    struct dev_power_governor *gov, bool is_off)
2031 {
2032         if (IS_ERR_OR_NULL(genpd))
2033                 return;
2034
2035         INIT_LIST_HEAD(&genpd->master_links);
2036         INIT_LIST_HEAD(&genpd->slave_links);
2037         INIT_LIST_HEAD(&genpd->dev_list);
2038         mutex_init(&genpd->lock);
2039         genpd->gov = gov;
2040         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2041         genpd->in_progress = 0;
2042         atomic_set(&genpd->sd_count, 0);
2043         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2044         init_waitqueue_head(&genpd->status_wait_queue);
2045         genpd->poweroff_task = NULL;
2046         genpd->resume_count = 0;
2047         genpd->device_count = 0;
2048         genpd->max_off_time_ns = -1;
2049         genpd->max_off_time_changed = true;
2050         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2051         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2052         genpd->domain.ops.prepare = pm_genpd_prepare;
2053         genpd->domain.ops.suspend = pm_genpd_suspend;
2054         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2055         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2056         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2057         genpd->domain.ops.resume_early = pm_genpd_resume_early;
2058         genpd->domain.ops.resume = pm_genpd_resume;
2059         genpd->domain.ops.freeze = pm_genpd_freeze;
2060         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2061         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2062         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2063         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2064         genpd->domain.ops.thaw = pm_genpd_thaw;
2065         genpd->domain.ops.poweroff = pm_genpd_suspend;
2066         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2067         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2068         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2069         genpd->domain.ops.restore_early = pm_genpd_resume_early;
2070         genpd->domain.ops.restore = pm_genpd_resume;
2071         genpd->domain.ops.complete = pm_genpd_complete;
2072         genpd->dev_ops.save_state = pm_genpd_default_save_state;
2073         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2074         genpd->dev_ops.suspend = pm_genpd_default_suspend;
2075         genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2076         genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2077         genpd->dev_ops.resume = pm_genpd_default_resume;
2078         genpd->dev_ops.freeze = pm_genpd_default_freeze;
2079         genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2080         genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2081         genpd->dev_ops.thaw = pm_genpd_default_thaw;
2082         mutex_lock(&gpd_list_lock);
2083         list_add(&genpd->gpd_list_node, &gpd_list);
2084         mutex_unlock(&gpd_list_lock);
2085 }