]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/domain.c
regulator: max8973: Fix up control flag option for bias control
[karo-tx-linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/pm_clock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sched.h>
19 #include <linux/suspend.h>
20 #include <linux/export.h>
21
22 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
23 ({                                                              \
24         type (*__routine)(struct device *__d);                  \
25         type __ret = (type)0;                                   \
26                                                                 \
27         __routine = genpd->dev_ops.callback;                    \
28         if (__routine) {                                        \
29                 __ret = __routine(dev);                         \
30         }                                                       \
31         __ret;                                                  \
32 })
33
34 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
35 ({                                                                              \
36         ktime_t __start = ktime_get();                                          \
37         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
38         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
39         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
40         if (!__retval && __elapsed > __td->field) {                             \
41                 __td->field = __elapsed;                                        \
42                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
43                         __elapsed);                                             \
44                 genpd->max_off_time_changed = true;                             \
45                 __td->constraint_changed = true;                                \
46         }                                                                       \
47         __retval;                                                               \
48 })
49
50 static LIST_HEAD(gpd_list);
51 static DEFINE_MUTEX(gpd_list_lock);
52
53 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
54 {
55         struct generic_pm_domain *genpd = NULL, *gpd;
56
57         if (IS_ERR_OR_NULL(domain_name))
58                 return NULL;
59
60         mutex_lock(&gpd_list_lock);
61         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
62                 if (!strcmp(gpd->name, domain_name)) {
63                         genpd = gpd;
64                         break;
65                 }
66         }
67         mutex_unlock(&gpd_list_lock);
68         return genpd;
69 }
70
71 /*
72  * Get the generic PM domain for a particular struct device.
73  * This validates the struct device pointer, the PM domain pointer,
74  * and checks that the PM domain pointer is a real generic PM domain.
75  * Any failure results in NULL being returned.
76  */
77 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
78 {
79         struct generic_pm_domain *genpd = NULL, *gpd;
80
81         if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
82                 return NULL;
83
84         mutex_lock(&gpd_list_lock);
85         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
86                 if (&gpd->domain == dev->pm_domain) {
87                         genpd = gpd;
88                         break;
89                 }
90         }
91         mutex_unlock(&gpd_list_lock);
92
93         return genpd;
94 }
95
96 /*
97  * This should only be used where we are certain that the pm_domain
98  * attached to the device is a genpd domain.
99  */
100 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
101 {
102         if (IS_ERR_OR_NULL(dev->pm_domain))
103                 return ERR_PTR(-EINVAL);
104
105         return pd_to_genpd(dev->pm_domain);
106 }
107
108 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
109 {
110         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
111                                         stop_latency_ns, "stop");
112 }
113
114 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
115 {
116         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
117                                         start_latency_ns, "start");
118 }
119
120 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
121 {
122         bool ret = false;
123
124         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
125                 ret = !!atomic_dec_and_test(&genpd->sd_count);
126
127         return ret;
128 }
129
130 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
131 {
132         atomic_inc(&genpd->sd_count);
133         smp_mb__after_atomic();
134 }
135
136 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
137 {
138         DEFINE_WAIT(wait);
139
140         mutex_lock(&genpd->lock);
141         /*
142          * Wait for the domain to transition into either the active,
143          * or the power off state.
144          */
145         for (;;) {
146                 prepare_to_wait(&genpd->status_wait_queue, &wait,
147                                 TASK_UNINTERRUPTIBLE);
148                 if (genpd->status == GPD_STATE_ACTIVE
149                     || genpd->status == GPD_STATE_POWER_OFF)
150                         break;
151                 mutex_unlock(&genpd->lock);
152
153                 schedule();
154
155                 mutex_lock(&genpd->lock);
156         }
157         finish_wait(&genpd->status_wait_queue, &wait);
158 }
159
160 static void genpd_release_lock(struct generic_pm_domain *genpd)
161 {
162         mutex_unlock(&genpd->lock);
163 }
164
165 static void genpd_set_active(struct generic_pm_domain *genpd)
166 {
167         if (genpd->resume_count == 0)
168                 genpd->status = GPD_STATE_ACTIVE;
169 }
170
171 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
172 {
173         s64 usecs64;
174
175         if (!genpd->cpuidle_data)
176                 return;
177
178         usecs64 = genpd->power_on_latency_ns;
179         do_div(usecs64, NSEC_PER_USEC);
180         usecs64 += genpd->cpuidle_data->saved_exit_latency;
181         genpd->cpuidle_data->idle_state->exit_latency = usecs64;
182 }
183
184 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
185 {
186         ktime_t time_start;
187         s64 elapsed_ns;
188         int ret;
189
190         if (!genpd->power_on)
191                 return 0;
192
193         if (!timed)
194                 return genpd->power_on(genpd);
195
196         time_start = ktime_get();
197         ret = genpd->power_on(genpd);
198         if (ret)
199                 return ret;
200
201         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
202         if (elapsed_ns <= genpd->power_on_latency_ns)
203                 return ret;
204
205         genpd->power_on_latency_ns = elapsed_ns;
206         genpd->max_off_time_changed = true;
207         genpd_recalc_cpu_exit_latency(genpd);
208         pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
209                  genpd->name, "on", elapsed_ns);
210
211         return ret;
212 }
213
214 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
215 {
216         ktime_t time_start;
217         s64 elapsed_ns;
218         int ret;
219
220         if (!genpd->power_off)
221                 return 0;
222
223         if (!timed)
224                 return genpd->power_off(genpd);
225
226         time_start = ktime_get();
227         ret = genpd->power_off(genpd);
228         if (ret == -EBUSY)
229                 return ret;
230
231         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
232         if (elapsed_ns <= genpd->power_off_latency_ns)
233                 return ret;
234
235         genpd->power_off_latency_ns = elapsed_ns;
236         genpd->max_off_time_changed = true;
237         pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
238                  genpd->name, "off", elapsed_ns);
239
240         return ret;
241 }
242
243 /**
244  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
245  * @genpd: PM domain to power up.
246  *
247  * Restore power to @genpd and all of its masters so that it is possible to
248  * resume a device belonging to it.
249  */
250 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
251         __releases(&genpd->lock) __acquires(&genpd->lock)
252 {
253         struct gpd_link *link;
254         DEFINE_WAIT(wait);
255         int ret = 0;
256
257         /* If the domain's master is being waited for, we have to wait too. */
258         for (;;) {
259                 prepare_to_wait(&genpd->status_wait_queue, &wait,
260                                 TASK_UNINTERRUPTIBLE);
261                 if (genpd->status != GPD_STATE_WAIT_MASTER)
262                         break;
263                 mutex_unlock(&genpd->lock);
264
265                 schedule();
266
267                 mutex_lock(&genpd->lock);
268         }
269         finish_wait(&genpd->status_wait_queue, &wait);
270
271         if (genpd->status == GPD_STATE_ACTIVE
272             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
273                 return 0;
274
275         if (genpd->status != GPD_STATE_POWER_OFF) {
276                 genpd_set_active(genpd);
277                 return 0;
278         }
279
280         if (genpd->cpuidle_data) {
281                 cpuidle_pause_and_lock();
282                 genpd->cpuidle_data->idle_state->disabled = true;
283                 cpuidle_resume_and_unlock();
284                 goto out;
285         }
286
287         /*
288          * The list is guaranteed not to change while the loop below is being
289          * executed, unless one of the masters' .power_on() callbacks fiddles
290          * with it.
291          */
292         list_for_each_entry(link, &genpd->slave_links, slave_node) {
293                 genpd_sd_counter_inc(link->master);
294                 genpd->status = GPD_STATE_WAIT_MASTER;
295
296                 mutex_unlock(&genpd->lock);
297
298                 ret = pm_genpd_poweron(link->master);
299
300                 mutex_lock(&genpd->lock);
301
302                 /*
303                  * The "wait for parent" status is guaranteed not to change
304                  * while the master is powering on.
305                  */
306                 genpd->status = GPD_STATE_POWER_OFF;
307                 wake_up_all(&genpd->status_wait_queue);
308                 if (ret) {
309                         genpd_sd_counter_dec(link->master);
310                         goto err;
311                 }
312         }
313
314         ret = genpd_power_on(genpd, true);
315         if (ret)
316                 goto err;
317
318  out:
319         genpd_set_active(genpd);
320
321         return 0;
322
323  err:
324         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
325                 genpd_sd_counter_dec(link->master);
326
327         return ret;
328 }
329
330 /**
331  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
332  * @genpd: PM domain to power up.
333  */
334 int pm_genpd_poweron(struct generic_pm_domain *genpd)
335 {
336         int ret;
337
338         mutex_lock(&genpd->lock);
339         ret = __pm_genpd_poweron(genpd);
340         mutex_unlock(&genpd->lock);
341         return ret;
342 }
343
344 /**
345  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
346  * @domain_name: Name of the PM domain to power up.
347  */
348 int pm_genpd_name_poweron(const char *domain_name)
349 {
350         struct generic_pm_domain *genpd;
351
352         genpd = pm_genpd_lookup_name(domain_name);
353         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
354 }
355
356 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
357                                      struct device *dev)
358 {
359         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
360 }
361
362 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
363 {
364         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
365                                         save_state_latency_ns, "state save");
366 }
367
368 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
369 {
370         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
371                                         restore_state_latency_ns,
372                                         "state restore");
373 }
374
375 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
376                                      unsigned long val, void *ptr)
377 {
378         struct generic_pm_domain_data *gpd_data;
379         struct device *dev;
380
381         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
382         dev = gpd_data->base.dev;
383
384         for (;;) {
385                 struct generic_pm_domain *genpd;
386                 struct pm_domain_data *pdd;
387
388                 spin_lock_irq(&dev->power.lock);
389
390                 pdd = dev->power.subsys_data ?
391                                 dev->power.subsys_data->domain_data : NULL;
392                 if (pdd && pdd->dev) {
393                         to_gpd_data(pdd)->td.constraint_changed = true;
394                         genpd = dev_to_genpd(dev);
395                 } else {
396                         genpd = ERR_PTR(-ENODATA);
397                 }
398
399                 spin_unlock_irq(&dev->power.lock);
400
401                 if (!IS_ERR(genpd)) {
402                         mutex_lock(&genpd->lock);
403                         genpd->max_off_time_changed = true;
404                         mutex_unlock(&genpd->lock);
405                 }
406
407                 dev = dev->parent;
408                 if (!dev || dev->power.ignore_children)
409                         break;
410         }
411
412         return NOTIFY_DONE;
413 }
414
415 /**
416  * __pm_genpd_save_device - Save the pre-suspend state of a device.
417  * @pdd: Domain data of the device to save the state of.
418  * @genpd: PM domain the device belongs to.
419  */
420 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
421                                   struct generic_pm_domain *genpd)
422         __releases(&genpd->lock) __acquires(&genpd->lock)
423 {
424         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
425         struct device *dev = pdd->dev;
426         int ret = 0;
427
428         if (gpd_data->need_restore > 0)
429                 return 0;
430
431         /*
432          * If the value of the need_restore flag is still unknown at this point,
433          * we trust that pm_genpd_poweroff() has verified that the device is
434          * already runtime PM suspended.
435          */
436         if (gpd_data->need_restore < 0) {
437                 gpd_data->need_restore = 1;
438                 return 0;
439         }
440
441         mutex_unlock(&genpd->lock);
442
443         genpd_start_dev(genpd, dev);
444         ret = genpd_save_dev(genpd, dev);
445         genpd_stop_dev(genpd, dev);
446
447         mutex_lock(&genpd->lock);
448
449         if (!ret)
450                 gpd_data->need_restore = 1;
451
452         return ret;
453 }
454
455 /**
456  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
457  * @pdd: Domain data of the device to restore the state of.
458  * @genpd: PM domain the device belongs to.
459  */
460 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
461                                       struct generic_pm_domain *genpd)
462         __releases(&genpd->lock) __acquires(&genpd->lock)
463 {
464         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
465         struct device *dev = pdd->dev;
466         int need_restore = gpd_data->need_restore;
467
468         gpd_data->need_restore = 0;
469         mutex_unlock(&genpd->lock);
470
471         genpd_start_dev(genpd, dev);
472
473         /*
474          * Call genpd_restore_dev() for recently added devices too (need_restore
475          * is negative then).
476          */
477         if (need_restore)
478                 genpd_restore_dev(genpd, dev);
479
480         mutex_lock(&genpd->lock);
481 }
482
483 /**
484  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
485  * @genpd: PM domain to check.
486  *
487  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
488  * a "power off" operation, which means that a "power on" has occured in the
489  * meantime, or if its resume_count field is different from zero, which means
490  * that one of its devices has been resumed in the meantime.
491  */
492 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
493 {
494         return genpd->status == GPD_STATE_WAIT_MASTER
495                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
496 }
497
498 /**
499  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
500  * @genpd: PM domait to power off.
501  *
502  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
503  * before.
504  */
505 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
506 {
507         queue_work(pm_wq, &genpd->power_off_work);
508 }
509
510 /**
511  * pm_genpd_poweroff - Remove power from a given PM domain.
512  * @genpd: PM domain to power down.
513  *
514  * If all of the @genpd's devices have been suspended and all of its subdomains
515  * have been powered down, run the runtime suspend callbacks provided by all of
516  * the @genpd's devices' drivers and remove power from @genpd.
517  */
518 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
519         __releases(&genpd->lock) __acquires(&genpd->lock)
520 {
521         struct pm_domain_data *pdd;
522         struct gpd_link *link;
523         unsigned int not_suspended;
524         int ret = 0;
525
526  start:
527         /*
528          * Do not try to power off the domain in the following situations:
529          * (1) The domain is already in the "power off" state.
530          * (2) The domain is waiting for its master to power up.
531          * (3) One of the domain's devices is being resumed right now.
532          * (4) System suspend is in progress.
533          */
534         if (genpd->status == GPD_STATE_POWER_OFF
535             || genpd->status == GPD_STATE_WAIT_MASTER
536             || genpd->resume_count > 0 || genpd->prepared_count > 0)
537                 return 0;
538
539         if (atomic_read(&genpd->sd_count) > 0)
540                 return -EBUSY;
541
542         not_suspended = 0;
543         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
544                 enum pm_qos_flags_status stat;
545
546                 stat = dev_pm_qos_flags(pdd->dev,
547                                         PM_QOS_FLAG_NO_POWER_OFF
548                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
549                 if (stat > PM_QOS_FLAGS_NONE)
550                         return -EBUSY;
551
552                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
553                     || pdd->dev->power.irq_safe))
554                         not_suspended++;
555         }
556
557         if (not_suspended > genpd->in_progress)
558                 return -EBUSY;
559
560         if (genpd->poweroff_task) {
561                 /*
562                  * Another instance of pm_genpd_poweroff() is executing
563                  * callbacks, so tell it to start over and return.
564                  */
565                 genpd->status = GPD_STATE_REPEAT;
566                 return 0;
567         }
568
569         if (genpd->gov && genpd->gov->power_down_ok) {
570                 if (!genpd->gov->power_down_ok(&genpd->domain))
571                         return -EAGAIN;
572         }
573
574         genpd->status = GPD_STATE_BUSY;
575         genpd->poweroff_task = current;
576
577         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
578                 ret = atomic_read(&genpd->sd_count) == 0 ?
579                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
580
581                 if (genpd_abort_poweroff(genpd))
582                         goto out;
583
584                 if (ret) {
585                         genpd_set_active(genpd);
586                         goto out;
587                 }
588
589                 if (genpd->status == GPD_STATE_REPEAT) {
590                         genpd->poweroff_task = NULL;
591                         goto start;
592                 }
593         }
594
595         if (genpd->cpuidle_data) {
596                 /*
597                  * If cpuidle_data is set, cpuidle should turn the domain off
598                  * when the CPU in it is idle.  In that case we don't decrement
599                  * the subdomain counts of the master domains, so that power is
600                  * not removed from the current domain prematurely as a result
601                  * of cutting off the masters' power.
602                  */
603                 genpd->status = GPD_STATE_POWER_OFF;
604                 cpuidle_pause_and_lock();
605                 genpd->cpuidle_data->idle_state->disabled = false;
606                 cpuidle_resume_and_unlock();
607                 goto out;
608         }
609
610         if (genpd->power_off) {
611                 if (atomic_read(&genpd->sd_count) > 0) {
612                         ret = -EBUSY;
613                         goto out;
614                 }
615
616                 /*
617                  * If sd_count > 0 at this point, one of the subdomains hasn't
618                  * managed to call pm_genpd_poweron() for the master yet after
619                  * incrementing it.  In that case pm_genpd_poweron() will wait
620                  * for us to drop the lock, so we can call .power_off() and let
621                  * the pm_genpd_poweron() restore power for us (this shouldn't
622                  * happen very often).
623                  */
624                 ret = genpd_power_off(genpd, true);
625                 if (ret == -EBUSY) {
626                         genpd_set_active(genpd);
627                         goto out;
628                 }
629         }
630
631         genpd->status = GPD_STATE_POWER_OFF;
632
633         list_for_each_entry(link, &genpd->slave_links, slave_node) {
634                 genpd_sd_counter_dec(link->master);
635                 genpd_queue_power_off_work(link->master);
636         }
637
638  out:
639         genpd->poweroff_task = NULL;
640         wake_up_all(&genpd->status_wait_queue);
641         return ret;
642 }
643
644 /**
645  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
646  * @work: Work structure used for scheduling the execution of this function.
647  */
648 static void genpd_power_off_work_fn(struct work_struct *work)
649 {
650         struct generic_pm_domain *genpd;
651
652         genpd = container_of(work, struct generic_pm_domain, power_off_work);
653
654         genpd_acquire_lock(genpd);
655         pm_genpd_poweroff(genpd);
656         genpd_release_lock(genpd);
657 }
658
659 /**
660  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
661  * @dev: Device to suspend.
662  *
663  * Carry out a runtime suspend of a device under the assumption that its
664  * pm_domain field points to the domain member of an object of type
665  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
666  */
667 static int pm_genpd_runtime_suspend(struct device *dev)
668 {
669         struct generic_pm_domain *genpd;
670         struct generic_pm_domain_data *gpd_data;
671         bool (*stop_ok)(struct device *__dev);
672         int ret;
673
674         dev_dbg(dev, "%s()\n", __func__);
675
676         genpd = dev_to_genpd(dev);
677         if (IS_ERR(genpd))
678                 return -EINVAL;
679
680         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
681         if (stop_ok && !stop_ok(dev))
682                 return -EBUSY;
683
684         ret = genpd_stop_dev(genpd, dev);
685         if (ret)
686                 return ret;
687
688         /*
689          * If power.irq_safe is set, this routine will be run with interrupts
690          * off, so it can't use mutexes.
691          */
692         if (dev->power.irq_safe)
693                 return 0;
694
695         mutex_lock(&genpd->lock);
696
697         /*
698          * If we have an unknown state of the need_restore flag, it means none
699          * of the runtime PM callbacks has been invoked yet. Let's update the
700          * flag to reflect that the current state is active.
701          */
702         gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
703         if (gpd_data->need_restore < 0)
704                 gpd_data->need_restore = 0;
705
706         genpd->in_progress++;
707         pm_genpd_poweroff(genpd);
708         genpd->in_progress--;
709         mutex_unlock(&genpd->lock);
710
711         return 0;
712 }
713
714 /**
715  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
716  * @dev: Device to resume.
717  *
718  * Carry out a runtime resume of a device under the assumption that its
719  * pm_domain field points to the domain member of an object of type
720  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
721  */
722 static int pm_genpd_runtime_resume(struct device *dev)
723 {
724         struct generic_pm_domain *genpd;
725         DEFINE_WAIT(wait);
726         int ret;
727
728         dev_dbg(dev, "%s()\n", __func__);
729
730         genpd = dev_to_genpd(dev);
731         if (IS_ERR(genpd))
732                 return -EINVAL;
733
734         /* If power.irq_safe, the PM domain is never powered off. */
735         if (dev->power.irq_safe)
736                 return genpd_start_dev_no_timing(genpd, dev);
737
738         mutex_lock(&genpd->lock);
739         ret = __pm_genpd_poweron(genpd);
740         if (ret) {
741                 mutex_unlock(&genpd->lock);
742                 return ret;
743         }
744         genpd->status = GPD_STATE_BUSY;
745         genpd->resume_count++;
746         for (;;) {
747                 prepare_to_wait(&genpd->status_wait_queue, &wait,
748                                 TASK_UNINTERRUPTIBLE);
749                 /*
750                  * If current is the powering off task, we have been called
751                  * reentrantly from one of the device callbacks, so we should
752                  * not wait.
753                  */
754                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
755                         break;
756                 mutex_unlock(&genpd->lock);
757
758                 schedule();
759
760                 mutex_lock(&genpd->lock);
761         }
762         finish_wait(&genpd->status_wait_queue, &wait);
763         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
764         genpd->resume_count--;
765         genpd_set_active(genpd);
766         wake_up_all(&genpd->status_wait_queue);
767         mutex_unlock(&genpd->lock);
768
769         return 0;
770 }
771
772 static bool pd_ignore_unused;
773 static int __init pd_ignore_unused_setup(char *__unused)
774 {
775         pd_ignore_unused = true;
776         return 1;
777 }
778 __setup("pd_ignore_unused", pd_ignore_unused_setup);
779
780 /**
781  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
782  */
783 void pm_genpd_poweroff_unused(void)
784 {
785         struct generic_pm_domain *genpd;
786
787         if (pd_ignore_unused) {
788                 pr_warn("genpd: Not disabling unused power domains\n");
789                 return;
790         }
791
792         mutex_lock(&gpd_list_lock);
793
794         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
795                 genpd_queue_power_off_work(genpd);
796
797         mutex_unlock(&gpd_list_lock);
798 }
799
800 static int __init genpd_poweroff_unused(void)
801 {
802         pm_genpd_poweroff_unused();
803         return 0;
804 }
805 late_initcall(genpd_poweroff_unused);
806
807 #ifdef CONFIG_PM_SLEEP
808
809 /**
810  * pm_genpd_present - Check if the given PM domain has been initialized.
811  * @genpd: PM domain to check.
812  */
813 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
814 {
815         const struct generic_pm_domain *gpd;
816
817         if (IS_ERR_OR_NULL(genpd))
818                 return false;
819
820         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
821                 if (gpd == genpd)
822                         return true;
823
824         return false;
825 }
826
827 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
828                                     struct device *dev)
829 {
830         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
831 }
832
833 /**
834  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
835  * @genpd: PM domain to power off, if possible.
836  * @timed: True if latency measurements are allowed.
837  *
838  * Check if the given PM domain can be powered off (during system suspend or
839  * hibernation) and do that if so.  Also, in that case propagate to its masters.
840  *
841  * This function is only called in "noirq" and "syscore" stages of system power
842  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
843  * executed sequentially, so it is guaranteed that it will never run twice in
844  * parallel).
845  */
846 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
847                                    bool timed)
848 {
849         struct gpd_link *link;
850
851         if (genpd->status == GPD_STATE_POWER_OFF)
852                 return;
853
854         if (genpd->suspended_count != genpd->device_count
855             || atomic_read(&genpd->sd_count) > 0)
856                 return;
857
858         genpd_power_off(genpd, timed);
859
860         genpd->status = GPD_STATE_POWER_OFF;
861
862         list_for_each_entry(link, &genpd->slave_links, slave_node) {
863                 genpd_sd_counter_dec(link->master);
864                 pm_genpd_sync_poweroff(link->master, timed);
865         }
866 }
867
868 /**
869  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
870  * @genpd: PM domain to power on.
871  * @timed: True if latency measurements are allowed.
872  *
873  * This function is only called in "noirq" and "syscore" stages of system power
874  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
875  * executed sequentially, so it is guaranteed that it will never run twice in
876  * parallel).
877  */
878 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
879                                   bool timed)
880 {
881         struct gpd_link *link;
882
883         if (genpd->status != GPD_STATE_POWER_OFF)
884                 return;
885
886         list_for_each_entry(link, &genpd->slave_links, slave_node) {
887                 pm_genpd_sync_poweron(link->master, timed);
888                 genpd_sd_counter_inc(link->master);
889         }
890
891         genpd_power_on(genpd, timed);
892
893         genpd->status = GPD_STATE_ACTIVE;
894 }
895
896 /**
897  * resume_needed - Check whether to resume a device before system suspend.
898  * @dev: Device to check.
899  * @genpd: PM domain the device belongs to.
900  *
901  * There are two cases in which a device that can wake up the system from sleep
902  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
903  * to wake up the system and it has to remain active for this purpose while the
904  * system is in the sleep state and (2) if the device is not enabled to wake up
905  * the system from sleep states and it generally doesn't generate wakeup signals
906  * by itself (those signals are generated on its behalf by other parts of the
907  * system).  In the latter case it may be necessary to reconfigure the device's
908  * wakeup settings during system suspend, because it may have been set up to
909  * signal remote wakeup from the system's working state as needed by runtime PM.
910  * Return 'true' in either of the above cases.
911  */
912 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
913 {
914         bool active_wakeup;
915
916         if (!device_can_wakeup(dev))
917                 return false;
918
919         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
920         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
921 }
922
923 /**
924  * pm_genpd_prepare - Start power transition of a device in a PM domain.
925  * @dev: Device to start the transition of.
926  *
927  * Start a power transition of a device (during a system-wide power transition)
928  * under the assumption that its pm_domain field points to the domain member of
929  * an object of type struct generic_pm_domain representing a PM domain
930  * consisting of I/O devices.
931  */
932 static int pm_genpd_prepare(struct device *dev)
933 {
934         struct generic_pm_domain *genpd;
935         int ret;
936
937         dev_dbg(dev, "%s()\n", __func__);
938
939         genpd = dev_to_genpd(dev);
940         if (IS_ERR(genpd))
941                 return -EINVAL;
942
943         /*
944          * If a wakeup request is pending for the device, it should be woken up
945          * at this point and a system wakeup event should be reported if it's
946          * set up to wake up the system from sleep states.
947          */
948         pm_runtime_get_noresume(dev);
949         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
950                 pm_wakeup_event(dev, 0);
951
952         if (pm_wakeup_pending()) {
953                 pm_runtime_put(dev);
954                 return -EBUSY;
955         }
956
957         if (resume_needed(dev, genpd))
958                 pm_runtime_resume(dev);
959
960         genpd_acquire_lock(genpd);
961
962         if (genpd->prepared_count++ == 0) {
963                 genpd->suspended_count = 0;
964                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
965         }
966
967         genpd_release_lock(genpd);
968
969         if (genpd->suspend_power_off) {
970                 pm_runtime_put_noidle(dev);
971                 return 0;
972         }
973
974         /*
975          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
976          * so pm_genpd_poweron() will return immediately, but if the device
977          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
978          * to make it operational.
979          */
980         pm_runtime_resume(dev);
981         __pm_runtime_disable(dev, false);
982
983         ret = pm_generic_prepare(dev);
984         if (ret) {
985                 mutex_lock(&genpd->lock);
986
987                 if (--genpd->prepared_count == 0)
988                         genpd->suspend_power_off = false;
989
990                 mutex_unlock(&genpd->lock);
991                 pm_runtime_enable(dev);
992         }
993
994         pm_runtime_put(dev);
995         return ret;
996 }
997
998 /**
999  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
1000  * @dev: Device to suspend.
1001  *
1002  * Suspend a device under the assumption that its pm_domain field points to the
1003  * domain member of an object of type struct generic_pm_domain representing
1004  * a PM domain consisting of I/O devices.
1005  */
1006 static int pm_genpd_suspend(struct device *dev)
1007 {
1008         struct generic_pm_domain *genpd;
1009
1010         dev_dbg(dev, "%s()\n", __func__);
1011
1012         genpd = dev_to_genpd(dev);
1013         if (IS_ERR(genpd))
1014                 return -EINVAL;
1015
1016         return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
1017 }
1018
1019 /**
1020  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
1021  * @dev: Device to suspend.
1022  *
1023  * Carry out a late suspend of a device under the assumption that its
1024  * pm_domain field points to the domain member of an object of type
1025  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1026  */
1027 static int pm_genpd_suspend_late(struct device *dev)
1028 {
1029         struct generic_pm_domain *genpd;
1030
1031         dev_dbg(dev, "%s()\n", __func__);
1032
1033         genpd = dev_to_genpd(dev);
1034         if (IS_ERR(genpd))
1035                 return -EINVAL;
1036
1037         return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
1038 }
1039
1040 /**
1041  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1042  * @dev: Device to suspend.
1043  *
1044  * Stop the device and remove power from the domain if all devices in it have
1045  * been stopped.
1046  */
1047 static int pm_genpd_suspend_noirq(struct device *dev)
1048 {
1049         struct generic_pm_domain *genpd;
1050
1051         dev_dbg(dev, "%s()\n", __func__);
1052
1053         genpd = dev_to_genpd(dev);
1054         if (IS_ERR(genpd))
1055                 return -EINVAL;
1056
1057         if (genpd->suspend_power_off
1058             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1059                 return 0;
1060
1061         genpd_stop_dev(genpd, dev);
1062
1063         /*
1064          * Since all of the "noirq" callbacks are executed sequentially, it is
1065          * guaranteed that this function will never run twice in parallel for
1066          * the same PM domain, so it is not necessary to use locking here.
1067          */
1068         genpd->suspended_count++;
1069         pm_genpd_sync_poweroff(genpd, true);
1070
1071         return 0;
1072 }
1073
1074 /**
1075  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1076  * @dev: Device to resume.
1077  *
1078  * Restore power to the device's PM domain, if necessary, and start the device.
1079  */
1080 static int pm_genpd_resume_noirq(struct device *dev)
1081 {
1082         struct generic_pm_domain *genpd;
1083
1084         dev_dbg(dev, "%s()\n", __func__);
1085
1086         genpd = dev_to_genpd(dev);
1087         if (IS_ERR(genpd))
1088                 return -EINVAL;
1089
1090         if (genpd->suspend_power_off
1091             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1092                 return 0;
1093
1094         /*
1095          * Since all of the "noirq" callbacks are executed sequentially, it is
1096          * guaranteed that this function will never run twice in parallel for
1097          * the same PM domain, so it is not necessary to use locking here.
1098          */
1099         pm_genpd_sync_poweron(genpd, true);
1100         genpd->suspended_count--;
1101
1102         return genpd_start_dev(genpd, dev);
1103 }
1104
1105 /**
1106  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1107  * @dev: Device to resume.
1108  *
1109  * Carry out an early resume of a device under the assumption that its
1110  * pm_domain field points to the domain member of an object of type
1111  * struct generic_pm_domain representing a power domain consisting of I/O
1112  * devices.
1113  */
1114 static int pm_genpd_resume_early(struct device *dev)
1115 {
1116         struct generic_pm_domain *genpd;
1117
1118         dev_dbg(dev, "%s()\n", __func__);
1119
1120         genpd = dev_to_genpd(dev);
1121         if (IS_ERR(genpd))
1122                 return -EINVAL;
1123
1124         return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1125 }
1126
1127 /**
1128  * pm_genpd_resume - Resume of device in an I/O PM domain.
1129  * @dev: Device to resume.
1130  *
1131  * Resume a device under the assumption that its pm_domain field points to the
1132  * domain member of an object of type struct generic_pm_domain representing
1133  * a power domain consisting of I/O devices.
1134  */
1135 static int pm_genpd_resume(struct device *dev)
1136 {
1137         struct generic_pm_domain *genpd;
1138
1139         dev_dbg(dev, "%s()\n", __func__);
1140
1141         genpd = dev_to_genpd(dev);
1142         if (IS_ERR(genpd))
1143                 return -EINVAL;
1144
1145         return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1146 }
1147
1148 /**
1149  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1150  * @dev: Device to freeze.
1151  *
1152  * Freeze a device under the assumption that its pm_domain field points to the
1153  * domain member of an object of type struct generic_pm_domain representing
1154  * a power domain consisting of I/O devices.
1155  */
1156 static int pm_genpd_freeze(struct device *dev)
1157 {
1158         struct generic_pm_domain *genpd;
1159
1160         dev_dbg(dev, "%s()\n", __func__);
1161
1162         genpd = dev_to_genpd(dev);
1163         if (IS_ERR(genpd))
1164                 return -EINVAL;
1165
1166         return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1167 }
1168
1169 /**
1170  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1171  * @dev: Device to freeze.
1172  *
1173  * Carry out a late freeze of a device under the assumption that its
1174  * pm_domain field points to the domain member of an object of type
1175  * struct generic_pm_domain representing a power domain consisting of I/O
1176  * devices.
1177  */
1178 static int pm_genpd_freeze_late(struct device *dev)
1179 {
1180         struct generic_pm_domain *genpd;
1181
1182         dev_dbg(dev, "%s()\n", __func__);
1183
1184         genpd = dev_to_genpd(dev);
1185         if (IS_ERR(genpd))
1186                 return -EINVAL;
1187
1188         return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1189 }
1190
1191 /**
1192  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1193  * @dev: Device to freeze.
1194  *
1195  * Carry out a late freeze of a device under the assumption that its
1196  * pm_domain field points to the domain member of an object of type
1197  * struct generic_pm_domain representing a power domain consisting of I/O
1198  * devices.
1199  */
1200 static int pm_genpd_freeze_noirq(struct device *dev)
1201 {
1202         struct generic_pm_domain *genpd;
1203
1204         dev_dbg(dev, "%s()\n", __func__);
1205
1206         genpd = dev_to_genpd(dev);
1207         if (IS_ERR(genpd))
1208                 return -EINVAL;
1209
1210         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1211 }
1212
1213 /**
1214  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1215  * @dev: Device to thaw.
1216  *
1217  * Start the device, unless power has been removed from the domain already
1218  * before the system transition.
1219  */
1220 static int pm_genpd_thaw_noirq(struct device *dev)
1221 {
1222         struct generic_pm_domain *genpd;
1223
1224         dev_dbg(dev, "%s()\n", __func__);
1225
1226         genpd = dev_to_genpd(dev);
1227         if (IS_ERR(genpd))
1228                 return -EINVAL;
1229
1230         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1231 }
1232
1233 /**
1234  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1235  * @dev: Device to thaw.
1236  *
1237  * Carry out an early thaw of a device under the assumption that its
1238  * pm_domain field points to the domain member of an object of type
1239  * struct generic_pm_domain representing a power domain consisting of I/O
1240  * devices.
1241  */
1242 static int pm_genpd_thaw_early(struct device *dev)
1243 {
1244         struct generic_pm_domain *genpd;
1245
1246         dev_dbg(dev, "%s()\n", __func__);
1247
1248         genpd = dev_to_genpd(dev);
1249         if (IS_ERR(genpd))
1250                 return -EINVAL;
1251
1252         return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1253 }
1254
1255 /**
1256  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1257  * @dev: Device to thaw.
1258  *
1259  * Thaw a device under the assumption that its pm_domain field points to the
1260  * domain member of an object of type struct generic_pm_domain representing
1261  * a power domain consisting of I/O devices.
1262  */
1263 static int pm_genpd_thaw(struct device *dev)
1264 {
1265         struct generic_pm_domain *genpd;
1266
1267         dev_dbg(dev, "%s()\n", __func__);
1268
1269         genpd = dev_to_genpd(dev);
1270         if (IS_ERR(genpd))
1271                 return -EINVAL;
1272
1273         return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1274 }
1275
1276 /**
1277  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1278  * @dev: Device to resume.
1279  *
1280  * Make sure the domain will be in the same power state as before the
1281  * hibernation the system is resuming from and start the device if necessary.
1282  */
1283 static int pm_genpd_restore_noirq(struct device *dev)
1284 {
1285         struct generic_pm_domain *genpd;
1286
1287         dev_dbg(dev, "%s()\n", __func__);
1288
1289         genpd = dev_to_genpd(dev);
1290         if (IS_ERR(genpd))
1291                 return -EINVAL;
1292
1293         /*
1294          * Since all of the "noirq" callbacks are executed sequentially, it is
1295          * guaranteed that this function will never run twice in parallel for
1296          * the same PM domain, so it is not necessary to use locking here.
1297          *
1298          * At this point suspended_count == 0 means we are being run for the
1299          * first time for the given domain in the present cycle.
1300          */
1301         if (genpd->suspended_count++ == 0) {
1302                 /*
1303                  * The boot kernel might put the domain into arbitrary state,
1304                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1305                  * so that it tries to power it on in case it was really off.
1306                  */
1307                 genpd->status = GPD_STATE_POWER_OFF;
1308                 if (genpd->suspend_power_off) {
1309                         /*
1310                          * If the domain was off before the hibernation, make
1311                          * sure it will be off going forward.
1312                          */
1313                         genpd_power_off(genpd, true);
1314
1315                         return 0;
1316                 }
1317         }
1318
1319         if (genpd->suspend_power_off)
1320                 return 0;
1321
1322         pm_genpd_sync_poweron(genpd, true);
1323
1324         return genpd_start_dev(genpd, dev);
1325 }
1326
1327 /**
1328  * pm_genpd_complete - Complete power transition of a device in a power domain.
1329  * @dev: Device to complete the transition of.
1330  *
1331  * Complete a power transition of a device (during a system-wide power
1332  * transition) under the assumption that its pm_domain field points to the
1333  * domain member of an object of type struct generic_pm_domain representing
1334  * a power domain consisting of I/O devices.
1335  */
1336 static void pm_genpd_complete(struct device *dev)
1337 {
1338         struct generic_pm_domain *genpd;
1339         bool run_complete;
1340
1341         dev_dbg(dev, "%s()\n", __func__);
1342
1343         genpd = dev_to_genpd(dev);
1344         if (IS_ERR(genpd))
1345                 return;
1346
1347         mutex_lock(&genpd->lock);
1348
1349         run_complete = !genpd->suspend_power_off;
1350         if (--genpd->prepared_count == 0)
1351                 genpd->suspend_power_off = false;
1352
1353         mutex_unlock(&genpd->lock);
1354
1355         if (run_complete) {
1356                 pm_generic_complete(dev);
1357                 pm_runtime_set_active(dev);
1358                 pm_runtime_enable(dev);
1359                 pm_request_idle(dev);
1360         }
1361 }
1362
1363 /**
1364  * genpd_syscore_switch - Switch power during system core suspend or resume.
1365  * @dev: Device that normally is marked as "always on" to switch power for.
1366  *
1367  * This routine may only be called during the system core (syscore) suspend or
1368  * resume phase for devices whose "always on" flags are set.
1369  */
1370 static void genpd_syscore_switch(struct device *dev, bool suspend)
1371 {
1372         struct generic_pm_domain *genpd;
1373
1374         genpd = dev_to_genpd(dev);
1375         if (!pm_genpd_present(genpd))
1376                 return;
1377
1378         if (suspend) {
1379                 genpd->suspended_count++;
1380                 pm_genpd_sync_poweroff(genpd, false);
1381         } else {
1382                 pm_genpd_sync_poweron(genpd, false);
1383                 genpd->suspended_count--;
1384         }
1385 }
1386
1387 void pm_genpd_syscore_poweroff(struct device *dev)
1388 {
1389         genpd_syscore_switch(dev, true);
1390 }
1391 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1392
1393 void pm_genpd_syscore_poweron(struct device *dev)
1394 {
1395         genpd_syscore_switch(dev, false);
1396 }
1397 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1398
1399 #else /* !CONFIG_PM_SLEEP */
1400
1401 #define pm_genpd_prepare                NULL
1402 #define pm_genpd_suspend                NULL
1403 #define pm_genpd_suspend_late           NULL
1404 #define pm_genpd_suspend_noirq          NULL
1405 #define pm_genpd_resume_early           NULL
1406 #define pm_genpd_resume_noirq           NULL
1407 #define pm_genpd_resume                 NULL
1408 #define pm_genpd_freeze                 NULL
1409 #define pm_genpd_freeze_late            NULL
1410 #define pm_genpd_freeze_noirq           NULL
1411 #define pm_genpd_thaw_early             NULL
1412 #define pm_genpd_thaw_noirq             NULL
1413 #define pm_genpd_thaw                   NULL
1414 #define pm_genpd_restore_noirq          NULL
1415 #define pm_genpd_complete               NULL
1416
1417 #endif /* CONFIG_PM_SLEEP */
1418
1419 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1420                                         struct generic_pm_domain *genpd,
1421                                         struct gpd_timing_data *td)
1422 {
1423         struct generic_pm_domain_data *gpd_data;
1424         int ret;
1425
1426         ret = dev_pm_get_subsys_data(dev);
1427         if (ret)
1428                 return ERR_PTR(ret);
1429
1430         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1431         if (!gpd_data) {
1432                 ret = -ENOMEM;
1433                 goto err_put;
1434         }
1435
1436         if (td)
1437                 gpd_data->td = *td;
1438
1439         gpd_data->base.dev = dev;
1440         gpd_data->need_restore = -1;
1441         gpd_data->td.constraint_changed = true;
1442         gpd_data->td.effective_constraint_ns = -1;
1443         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1444
1445         spin_lock_irq(&dev->power.lock);
1446
1447         if (dev->power.subsys_data->domain_data) {
1448                 ret = -EINVAL;
1449                 goto err_free;
1450         }
1451
1452         dev->power.subsys_data->domain_data = &gpd_data->base;
1453         dev->pm_domain = &genpd->domain;
1454
1455         spin_unlock_irq(&dev->power.lock);
1456
1457         return gpd_data;
1458
1459  err_free:
1460         spin_unlock_irq(&dev->power.lock);
1461         kfree(gpd_data);
1462  err_put:
1463         dev_pm_put_subsys_data(dev);
1464         return ERR_PTR(ret);
1465 }
1466
1467 static void genpd_free_dev_data(struct device *dev,
1468                                 struct generic_pm_domain_data *gpd_data)
1469 {
1470         spin_lock_irq(&dev->power.lock);
1471
1472         dev->pm_domain = NULL;
1473         dev->power.subsys_data->domain_data = NULL;
1474
1475         spin_unlock_irq(&dev->power.lock);
1476
1477         kfree(gpd_data);
1478         dev_pm_put_subsys_data(dev);
1479 }
1480
1481 /**
1482  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1483  * @genpd: PM domain to add the device to.
1484  * @dev: Device to be added.
1485  * @td: Set of PM QoS timing parameters to attach to the device.
1486  */
1487 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1488                           struct gpd_timing_data *td)
1489 {
1490         struct generic_pm_domain_data *gpd_data;
1491         int ret = 0;
1492
1493         dev_dbg(dev, "%s()\n", __func__);
1494
1495         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1496                 return -EINVAL;
1497
1498         gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1499         if (IS_ERR(gpd_data))
1500                 return PTR_ERR(gpd_data);
1501
1502         genpd_acquire_lock(genpd);
1503
1504         if (genpd->prepared_count > 0) {
1505                 ret = -EAGAIN;
1506                 goto out;
1507         }
1508
1509         ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1510         if (ret)
1511                 goto out;
1512
1513         genpd->device_count++;
1514         genpd->max_off_time_changed = true;
1515
1516         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1517
1518  out:
1519         genpd_release_lock(genpd);
1520
1521         if (ret)
1522                 genpd_free_dev_data(dev, gpd_data);
1523         else
1524                 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1525
1526         return ret;
1527 }
1528
1529 /**
1530  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1531  * @domain_name: Name of the PM domain to add the device to.
1532  * @dev: Device to be added.
1533  * @td: Set of PM QoS timing parameters to attach to the device.
1534  */
1535 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1536                                struct gpd_timing_data *td)
1537 {
1538         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1539 }
1540
1541 /**
1542  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1543  * @genpd: PM domain to remove the device from.
1544  * @dev: Device to be removed.
1545  */
1546 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1547                            struct device *dev)
1548 {
1549         struct generic_pm_domain_data *gpd_data;
1550         struct pm_domain_data *pdd;
1551         int ret = 0;
1552
1553         dev_dbg(dev, "%s()\n", __func__);
1554
1555         if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1556                 return -EINVAL;
1557
1558         /* The above validation also means we have existing domain_data. */
1559         pdd = dev->power.subsys_data->domain_data;
1560         gpd_data = to_gpd_data(pdd);
1561         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1562
1563         genpd_acquire_lock(genpd);
1564
1565         if (genpd->prepared_count > 0) {
1566                 ret = -EAGAIN;
1567                 goto out;
1568         }
1569
1570         genpd->device_count--;
1571         genpd->max_off_time_changed = true;
1572
1573         if (genpd->detach_dev)
1574                 genpd->detach_dev(genpd, dev);
1575
1576         list_del_init(&pdd->list_node);
1577
1578         genpd_release_lock(genpd);
1579
1580         genpd_free_dev_data(dev, gpd_data);
1581
1582         return 0;
1583
1584  out:
1585         genpd_release_lock(genpd);
1586         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1587
1588         return ret;
1589 }
1590
1591 /**
1592  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1593  * @genpd: Master PM domain to add the subdomain to.
1594  * @subdomain: Subdomain to be added.
1595  */
1596 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1597                            struct generic_pm_domain *subdomain)
1598 {
1599         struct gpd_link *link;
1600         int ret = 0;
1601
1602         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1603             || genpd == subdomain)
1604                 return -EINVAL;
1605
1606  start:
1607         genpd_acquire_lock(genpd);
1608         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1609
1610         if (subdomain->status != GPD_STATE_POWER_OFF
1611             && subdomain->status != GPD_STATE_ACTIVE) {
1612                 mutex_unlock(&subdomain->lock);
1613                 genpd_release_lock(genpd);
1614                 goto start;
1615         }
1616
1617         if (genpd->status == GPD_STATE_POWER_OFF
1618             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1619                 ret = -EINVAL;
1620                 goto out;
1621         }
1622
1623         list_for_each_entry(link, &genpd->master_links, master_node) {
1624                 if (link->slave == subdomain && link->master == genpd) {
1625                         ret = -EINVAL;
1626                         goto out;
1627                 }
1628         }
1629
1630         link = kzalloc(sizeof(*link), GFP_KERNEL);
1631         if (!link) {
1632                 ret = -ENOMEM;
1633                 goto out;
1634         }
1635         link->master = genpd;
1636         list_add_tail(&link->master_node, &genpd->master_links);
1637         link->slave = subdomain;
1638         list_add_tail(&link->slave_node, &subdomain->slave_links);
1639         if (subdomain->status != GPD_STATE_POWER_OFF)
1640                 genpd_sd_counter_inc(genpd);
1641
1642  out:
1643         mutex_unlock(&subdomain->lock);
1644         genpd_release_lock(genpd);
1645
1646         return ret;
1647 }
1648
1649 /**
1650  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1651  * @master_name: Name of the master PM domain to add the subdomain to.
1652  * @subdomain_name: Name of the subdomain to be added.
1653  */
1654 int pm_genpd_add_subdomain_names(const char *master_name,
1655                                  const char *subdomain_name)
1656 {
1657         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1658
1659         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1660                 return -EINVAL;
1661
1662         mutex_lock(&gpd_list_lock);
1663         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1664                 if (!master && !strcmp(gpd->name, master_name))
1665                         master = gpd;
1666
1667                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1668                         subdomain = gpd;
1669
1670                 if (master && subdomain)
1671                         break;
1672         }
1673         mutex_unlock(&gpd_list_lock);
1674
1675         return pm_genpd_add_subdomain(master, subdomain);
1676 }
1677
1678 /**
1679  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1680  * @genpd: Master PM domain to remove the subdomain from.
1681  * @subdomain: Subdomain to be removed.
1682  */
1683 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1684                               struct generic_pm_domain *subdomain)
1685 {
1686         struct gpd_link *link;
1687         int ret = -EINVAL;
1688
1689         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1690                 return -EINVAL;
1691
1692  start:
1693         genpd_acquire_lock(genpd);
1694
1695         list_for_each_entry(link, &genpd->master_links, master_node) {
1696                 if (link->slave != subdomain)
1697                         continue;
1698
1699                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1700
1701                 if (subdomain->status != GPD_STATE_POWER_OFF
1702                     && subdomain->status != GPD_STATE_ACTIVE) {
1703                         mutex_unlock(&subdomain->lock);
1704                         genpd_release_lock(genpd);
1705                         goto start;
1706                 }
1707
1708                 list_del(&link->master_node);
1709                 list_del(&link->slave_node);
1710                 kfree(link);
1711                 if (subdomain->status != GPD_STATE_POWER_OFF)
1712                         genpd_sd_counter_dec(genpd);
1713
1714                 mutex_unlock(&subdomain->lock);
1715
1716                 ret = 0;
1717                 break;
1718         }
1719
1720         genpd_release_lock(genpd);
1721
1722         return ret;
1723 }
1724
1725 /**
1726  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1727  * @genpd: PM domain to be connected with cpuidle.
1728  * @state: cpuidle state this domain can disable/enable.
1729  *
1730  * Make a PM domain behave as though it contained a CPU core, that is, instead
1731  * of calling its power down routine it will enable the given cpuidle state so
1732  * that the cpuidle subsystem can power it down (if possible and desirable).
1733  */
1734 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1735 {
1736         struct cpuidle_driver *cpuidle_drv;
1737         struct gpd_cpuidle_data *cpuidle_data;
1738         struct cpuidle_state *idle_state;
1739         int ret = 0;
1740
1741         if (IS_ERR_OR_NULL(genpd) || state < 0)
1742                 return -EINVAL;
1743
1744         genpd_acquire_lock(genpd);
1745
1746         if (genpd->cpuidle_data) {
1747                 ret = -EEXIST;
1748                 goto out;
1749         }
1750         cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1751         if (!cpuidle_data) {
1752                 ret = -ENOMEM;
1753                 goto out;
1754         }
1755         cpuidle_drv = cpuidle_driver_ref();
1756         if (!cpuidle_drv) {
1757                 ret = -ENODEV;
1758                 goto err_drv;
1759         }
1760         if (cpuidle_drv->state_count <= state) {
1761                 ret = -EINVAL;
1762                 goto err;
1763         }
1764         idle_state = &cpuidle_drv->states[state];
1765         if (!idle_state->disabled) {
1766                 ret = -EAGAIN;
1767                 goto err;
1768         }
1769         cpuidle_data->idle_state = idle_state;
1770         cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1771         genpd->cpuidle_data = cpuidle_data;
1772         genpd_recalc_cpu_exit_latency(genpd);
1773
1774  out:
1775         genpd_release_lock(genpd);
1776         return ret;
1777
1778  err:
1779         cpuidle_driver_unref();
1780
1781  err_drv:
1782         kfree(cpuidle_data);
1783         goto out;
1784 }
1785
1786 /**
1787  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1788  * @name: Name of the domain to connect to cpuidle.
1789  * @state: cpuidle state this domain can manipulate.
1790  */
1791 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1792 {
1793         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1794 }
1795
1796 /**
1797  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1798  * @genpd: PM domain to remove the cpuidle connection from.
1799  *
1800  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1801  * given PM domain.
1802  */
1803 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1804 {
1805         struct gpd_cpuidle_data *cpuidle_data;
1806         struct cpuidle_state *idle_state;
1807         int ret = 0;
1808
1809         if (IS_ERR_OR_NULL(genpd))
1810                 return -EINVAL;
1811
1812         genpd_acquire_lock(genpd);
1813
1814         cpuidle_data = genpd->cpuidle_data;
1815         if (!cpuidle_data) {
1816                 ret = -ENODEV;
1817                 goto out;
1818         }
1819         idle_state = cpuidle_data->idle_state;
1820         if (!idle_state->disabled) {
1821                 ret = -EAGAIN;
1822                 goto out;
1823         }
1824         idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1825         cpuidle_driver_unref();
1826         genpd->cpuidle_data = NULL;
1827         kfree(cpuidle_data);
1828
1829  out:
1830         genpd_release_lock(genpd);
1831         return ret;
1832 }
1833
1834 /**
1835  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1836  * @name: Name of the domain to disconnect cpuidle from.
1837  */
1838 int pm_genpd_name_detach_cpuidle(const char *name)
1839 {
1840         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1841 }
1842
1843 /* Default device callbacks for generic PM domains. */
1844
1845 /**
1846  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1847  * @dev: Device to handle.
1848  */
1849 static int pm_genpd_default_save_state(struct device *dev)
1850 {
1851         int (*cb)(struct device *__dev);
1852
1853         if (dev->type && dev->type->pm)
1854                 cb = dev->type->pm->runtime_suspend;
1855         else if (dev->class && dev->class->pm)
1856                 cb = dev->class->pm->runtime_suspend;
1857         else if (dev->bus && dev->bus->pm)
1858                 cb = dev->bus->pm->runtime_suspend;
1859         else
1860                 cb = NULL;
1861
1862         if (!cb && dev->driver && dev->driver->pm)
1863                 cb = dev->driver->pm->runtime_suspend;
1864
1865         return cb ? cb(dev) : 0;
1866 }
1867
1868 /**
1869  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1870  * @dev: Device to handle.
1871  */
1872 static int pm_genpd_default_restore_state(struct device *dev)
1873 {
1874         int (*cb)(struct device *__dev);
1875
1876         if (dev->type && dev->type->pm)
1877                 cb = dev->type->pm->runtime_resume;
1878         else if (dev->class && dev->class->pm)
1879                 cb = dev->class->pm->runtime_resume;
1880         else if (dev->bus && dev->bus->pm)
1881                 cb = dev->bus->pm->runtime_resume;
1882         else
1883                 cb = NULL;
1884
1885         if (!cb && dev->driver && dev->driver->pm)
1886                 cb = dev->driver->pm->runtime_resume;
1887
1888         return cb ? cb(dev) : 0;
1889 }
1890
1891 /**
1892  * pm_genpd_init - Initialize a generic I/O PM domain object.
1893  * @genpd: PM domain object to initialize.
1894  * @gov: PM domain governor to associate with the domain (may be NULL).
1895  * @is_off: Initial value of the domain's power_is_off field.
1896  */
1897 void pm_genpd_init(struct generic_pm_domain *genpd,
1898                    struct dev_power_governor *gov, bool is_off)
1899 {
1900         if (IS_ERR_OR_NULL(genpd))
1901                 return;
1902
1903         INIT_LIST_HEAD(&genpd->master_links);
1904         INIT_LIST_HEAD(&genpd->slave_links);
1905         INIT_LIST_HEAD(&genpd->dev_list);
1906         mutex_init(&genpd->lock);
1907         genpd->gov = gov;
1908         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1909         genpd->in_progress = 0;
1910         atomic_set(&genpd->sd_count, 0);
1911         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1912         init_waitqueue_head(&genpd->status_wait_queue);
1913         genpd->poweroff_task = NULL;
1914         genpd->resume_count = 0;
1915         genpd->device_count = 0;
1916         genpd->max_off_time_ns = -1;
1917         genpd->max_off_time_changed = true;
1918         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1919         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1920         genpd->domain.ops.prepare = pm_genpd_prepare;
1921         genpd->domain.ops.suspend = pm_genpd_suspend;
1922         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1923         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1924         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1925         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1926         genpd->domain.ops.resume = pm_genpd_resume;
1927         genpd->domain.ops.freeze = pm_genpd_freeze;
1928         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1929         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1930         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1931         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1932         genpd->domain.ops.thaw = pm_genpd_thaw;
1933         genpd->domain.ops.poweroff = pm_genpd_suspend;
1934         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1935         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1936         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1937         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1938         genpd->domain.ops.restore = pm_genpd_resume;
1939         genpd->domain.ops.complete = pm_genpd_complete;
1940         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1941         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1942
1943         if (genpd->flags & GENPD_FLAG_PM_CLK) {
1944                 genpd->dev_ops.stop = pm_clk_suspend;
1945                 genpd->dev_ops.start = pm_clk_resume;
1946         }
1947
1948         mutex_lock(&gpd_list_lock);
1949         list_add(&genpd->gpd_list_node, &gpd_list);
1950         mutex_unlock(&gpd_list_lock);
1951 }
1952
1953 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1954 /*
1955  * Device Tree based PM domain providers.
1956  *
1957  * The code below implements generic device tree based PM domain providers that
1958  * bind device tree nodes with generic PM domains registered in the system.
1959  *
1960  * Any driver that registers generic PM domains and needs to support binding of
1961  * devices to these domains is supposed to register a PM domain provider, which
1962  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1963  *
1964  * Two simple mapping functions have been provided for convenience:
1965  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1966  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1967  *    index.
1968  */
1969
1970 /**
1971  * struct of_genpd_provider - PM domain provider registration structure
1972  * @link: Entry in global list of PM domain providers
1973  * @node: Pointer to device tree node of PM domain provider
1974  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1975  *         into a PM domain.
1976  * @data: context pointer to be passed into @xlate callback
1977  */
1978 struct of_genpd_provider {
1979         struct list_head link;
1980         struct device_node *node;
1981         genpd_xlate_t xlate;
1982         void *data;
1983 };
1984
1985 /* List of registered PM domain providers. */
1986 static LIST_HEAD(of_genpd_providers);
1987 /* Mutex to protect the list above. */
1988 static DEFINE_MUTEX(of_genpd_mutex);
1989
1990 /**
1991  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1992  * @genpdspec: OF phandle args to map into a PM domain
1993  * @data: xlate function private data - pointer to struct generic_pm_domain
1994  *
1995  * This is a generic xlate function that can be used to model PM domains that
1996  * have their own device tree nodes. The private data of xlate function needs
1997  * to be a valid pointer to struct generic_pm_domain.
1998  */
1999 struct generic_pm_domain *__of_genpd_xlate_simple(
2000                                         struct of_phandle_args *genpdspec,
2001                                         void *data)
2002 {
2003         if (genpdspec->args_count != 0)
2004                 return ERR_PTR(-EINVAL);
2005         return data;
2006 }
2007 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
2008
2009 /**
2010  * __of_genpd_xlate_onecell() - Xlate function using a single index.
2011  * @genpdspec: OF phandle args to map into a PM domain
2012  * @data: xlate function private data - pointer to struct genpd_onecell_data
2013  *
2014  * This is a generic xlate function that can be used to model simple PM domain
2015  * controllers that have one device tree node and provide multiple PM domains.
2016  * A single cell is used as an index into an array of PM domains specified in
2017  * the genpd_onecell_data struct when registering the provider.
2018  */
2019 struct generic_pm_domain *__of_genpd_xlate_onecell(
2020                                         struct of_phandle_args *genpdspec,
2021                                         void *data)
2022 {
2023         struct genpd_onecell_data *genpd_data = data;
2024         unsigned int idx = genpdspec->args[0];
2025
2026         if (genpdspec->args_count != 1)
2027                 return ERR_PTR(-EINVAL);
2028
2029         if (idx >= genpd_data->num_domains) {
2030                 pr_err("%s: invalid domain index %u\n", __func__, idx);
2031                 return ERR_PTR(-EINVAL);
2032         }
2033
2034         if (!genpd_data->domains[idx])
2035                 return ERR_PTR(-ENOENT);
2036
2037         return genpd_data->domains[idx];
2038 }
2039 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2040
2041 /**
2042  * __of_genpd_add_provider() - Register a PM domain provider for a node
2043  * @np: Device node pointer associated with the PM domain provider.
2044  * @xlate: Callback for decoding PM domain from phandle arguments.
2045  * @data: Context pointer for @xlate callback.
2046  */
2047 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2048                         void *data)
2049 {
2050         struct of_genpd_provider *cp;
2051
2052         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2053         if (!cp)
2054                 return -ENOMEM;
2055
2056         cp->node = of_node_get(np);
2057         cp->data = data;
2058         cp->xlate = xlate;
2059
2060         mutex_lock(&of_genpd_mutex);
2061         list_add(&cp->link, &of_genpd_providers);
2062         mutex_unlock(&of_genpd_mutex);
2063         pr_debug("Added domain provider from %s\n", np->full_name);
2064
2065         return 0;
2066 }
2067 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2068
2069 /**
2070  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2071  * @np: Device node pointer associated with the PM domain provider
2072  */
2073 void of_genpd_del_provider(struct device_node *np)
2074 {
2075         struct of_genpd_provider *cp;
2076
2077         mutex_lock(&of_genpd_mutex);
2078         list_for_each_entry(cp, &of_genpd_providers, link) {
2079                 if (cp->node == np) {
2080                         list_del(&cp->link);
2081                         of_node_put(cp->node);
2082                         kfree(cp);
2083                         break;
2084                 }
2085         }
2086         mutex_unlock(&of_genpd_mutex);
2087 }
2088 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2089
2090 /**
2091  * of_genpd_get_from_provider() - Look-up PM domain
2092  * @genpdspec: OF phandle args to use for look-up
2093  *
2094  * Looks for a PM domain provider under the node specified by @genpdspec and if
2095  * found, uses xlate function of the provider to map phandle args to a PM
2096  * domain.
2097  *
2098  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2099  * on failure.
2100  */
2101 struct generic_pm_domain *of_genpd_get_from_provider(
2102                                         struct of_phandle_args *genpdspec)
2103 {
2104         struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2105         struct of_genpd_provider *provider;
2106
2107         mutex_lock(&of_genpd_mutex);
2108
2109         /* Check if we have such a provider in our array */
2110         list_for_each_entry(provider, &of_genpd_providers, link) {
2111                 if (provider->node == genpdspec->np)
2112                         genpd = provider->xlate(genpdspec, provider->data);
2113                 if (!IS_ERR(genpd))
2114                         break;
2115         }
2116
2117         mutex_unlock(&of_genpd_mutex);
2118
2119         return genpd;
2120 }
2121 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2122
2123 /**
2124  * genpd_dev_pm_detach - Detach a device from its PM domain.
2125  * @dev: Device to attach.
2126  * @power_off: Currently not used
2127  *
2128  * Try to locate a corresponding generic PM domain, which the device was
2129  * attached to previously. If such is found, the device is detached from it.
2130  */
2131 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2132 {
2133         struct generic_pm_domain *pd;
2134         int ret = 0;
2135
2136         pd = pm_genpd_lookup_dev(dev);
2137         if (!pd)
2138                 return;
2139
2140         dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2141
2142         while (1) {
2143                 ret = pm_genpd_remove_device(pd, dev);
2144                 if (ret != -EAGAIN)
2145                         break;
2146                 cond_resched();
2147         }
2148
2149         if (ret < 0) {
2150                 dev_err(dev, "failed to remove from PM domain %s: %d",
2151                         pd->name, ret);
2152                 return;
2153         }
2154
2155         /* Check if PM domain can be powered off after removing this device. */
2156         genpd_queue_power_off_work(pd);
2157 }
2158
2159 static void genpd_dev_pm_sync(struct device *dev)
2160 {
2161         struct generic_pm_domain *pd;
2162
2163         pd = dev_to_genpd(dev);
2164         if (IS_ERR(pd))
2165                 return;
2166
2167         genpd_queue_power_off_work(pd);
2168 }
2169
2170 /**
2171  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2172  * @dev: Device to attach.
2173  *
2174  * Parse device's OF node to find a PM domain specifier. If such is found,
2175  * attaches the device to retrieved pm_domain ops.
2176  *
2177  * Both generic and legacy Samsung-specific DT bindings are supported to keep
2178  * backwards compatibility with existing DTBs.
2179  *
2180  * Returns 0 on successfully attached PM domain or negative error code.
2181  */
2182 int genpd_dev_pm_attach(struct device *dev)
2183 {
2184         struct of_phandle_args pd_args;
2185         struct generic_pm_domain *pd;
2186         int ret;
2187
2188         if (!dev->of_node)
2189                 return -ENODEV;
2190
2191         if (dev->pm_domain)
2192                 return -EEXIST;
2193
2194         ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2195                                         "#power-domain-cells", 0, &pd_args);
2196         if (ret < 0) {
2197                 if (ret != -ENOENT)
2198                         return ret;
2199
2200                 /*
2201                  * Try legacy Samsung-specific bindings
2202                  * (for backwards compatibility of DT ABI)
2203                  */
2204                 pd_args.args_count = 0;
2205                 pd_args.np = of_parse_phandle(dev->of_node,
2206                                                 "samsung,power-domain", 0);
2207                 if (!pd_args.np)
2208                         return -ENOENT;
2209         }
2210
2211         pd = of_genpd_get_from_provider(&pd_args);
2212         if (IS_ERR(pd)) {
2213                 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2214                         __func__, PTR_ERR(pd));
2215                 of_node_put(dev->of_node);
2216                 return PTR_ERR(pd);
2217         }
2218
2219         dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2220
2221         while (1) {
2222                 ret = pm_genpd_add_device(pd, dev);
2223                 if (ret != -EAGAIN)
2224                         break;
2225                 cond_resched();
2226         }
2227
2228         if (ret < 0) {
2229                 dev_err(dev, "failed to add to PM domain %s: %d",
2230                         pd->name, ret);
2231                 of_node_put(dev->of_node);
2232                 return ret;
2233         }
2234
2235         dev->pm_domain->detach = genpd_dev_pm_detach;
2236         dev->pm_domain->sync = genpd_dev_pm_sync;
2237         pm_genpd_poweron(pd);
2238
2239         return 0;
2240 }
2241 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2242 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2243
2244
2245 /***        debugfs support        ***/
2246
2247 #ifdef CONFIG_PM_ADVANCED_DEBUG
2248 #include <linux/pm.h>
2249 #include <linux/device.h>
2250 #include <linux/debugfs.h>
2251 #include <linux/seq_file.h>
2252 #include <linux/init.h>
2253 #include <linux/kobject.h>
2254 static struct dentry *pm_genpd_debugfs_dir;
2255
2256 /*
2257  * TODO: This function is a slightly modified version of rtpm_status_show
2258  * from sysfs.c, so generalize it.
2259  */
2260 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2261 {
2262         static const char * const status_lookup[] = {
2263                 [RPM_ACTIVE] = "active",
2264                 [RPM_RESUMING] = "resuming",
2265                 [RPM_SUSPENDED] = "suspended",
2266                 [RPM_SUSPENDING] = "suspending"
2267         };
2268         const char *p = "";
2269
2270         if (dev->power.runtime_error)
2271                 p = "error";
2272         else if (dev->power.disable_depth)
2273                 p = "unsupported";
2274         else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2275                 p = status_lookup[dev->power.runtime_status];
2276         else
2277                 WARN_ON(1);
2278
2279         seq_puts(s, p);
2280 }
2281
2282 static int pm_genpd_summary_one(struct seq_file *s,
2283                                 struct generic_pm_domain *genpd)
2284 {
2285         static const char * const status_lookup[] = {
2286                 [GPD_STATE_ACTIVE] = "on",
2287                 [GPD_STATE_WAIT_MASTER] = "wait-master",
2288                 [GPD_STATE_BUSY] = "busy",
2289                 [GPD_STATE_REPEAT] = "off-in-progress",
2290                 [GPD_STATE_POWER_OFF] = "off"
2291         };
2292         struct pm_domain_data *pm_data;
2293         const char *kobj_path;
2294         struct gpd_link *link;
2295         int ret;
2296
2297         ret = mutex_lock_interruptible(&genpd->lock);
2298         if (ret)
2299                 return -ERESTARTSYS;
2300
2301         if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2302                 goto exit;
2303         seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]);
2304
2305         /*
2306          * Modifications on the list require holding locks on both
2307          * master and slave, so we are safe.
2308          * Also genpd->name is immutable.
2309          */
2310         list_for_each_entry(link, &genpd->master_links, master_node) {
2311                 seq_printf(s, "%s", link->slave->name);
2312                 if (!list_is_last(&link->master_node, &genpd->master_links))
2313                         seq_puts(s, ", ");
2314         }
2315
2316         list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2317                 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2318                 if (kobj_path == NULL)
2319                         continue;
2320
2321                 seq_printf(s, "\n    %-50s  ", kobj_path);
2322                 rtpm_status_str(s, pm_data->dev);
2323                 kfree(kobj_path);
2324         }
2325
2326         seq_puts(s, "\n");
2327 exit:
2328         mutex_unlock(&genpd->lock);
2329
2330         return 0;
2331 }
2332
2333 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2334 {
2335         struct generic_pm_domain *genpd;
2336         int ret = 0;
2337
2338         seq_puts(s, "    domain                      status         slaves\n");
2339         seq_puts(s, "           /device                                      runtime status\n");
2340         seq_puts(s, "----------------------------------------------------------------------\n");
2341
2342         ret = mutex_lock_interruptible(&gpd_list_lock);
2343         if (ret)
2344                 return -ERESTARTSYS;
2345
2346         list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2347                 ret = pm_genpd_summary_one(s, genpd);
2348                 if (ret)
2349                         break;
2350         }
2351         mutex_unlock(&gpd_list_lock);
2352
2353         return ret;
2354 }
2355
2356 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2357 {
2358         return single_open(file, pm_genpd_summary_show, NULL);
2359 }
2360
2361 static const struct file_operations pm_genpd_summary_fops = {
2362         .open = pm_genpd_summary_open,
2363         .read = seq_read,
2364         .llseek = seq_lseek,
2365         .release = single_release,
2366 };
2367
2368 static int __init pm_genpd_debug_init(void)
2369 {
2370         struct dentry *d;
2371
2372         pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2373
2374         if (!pm_genpd_debugfs_dir)
2375                 return -ENOMEM;
2376
2377         d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2378                         pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2379         if (!d)
2380                 return -ENOMEM;
2381
2382         return 0;
2383 }
2384 late_initcall(pm_genpd_debug_init);
2385
2386 static void __exit pm_genpd_debug_exit(void)
2387 {
2388         debugfs_remove_recursive(pm_genpd_debugfs_dir);
2389 }
2390 __exitcall(pm_genpd_debug_exit);
2391 #endif /* CONFIG_PM_ADVANCED_DEBUG */